id int64 11 59.9k | original stringlengths 33 150k | modified stringlengths 37 150k |
|---|---|---|
45,191 | def test_drop_duplicates():
frame_data = {
"A": list(range(3)) * 2,
"B": list(range(1, 4)) * 2,
"C": list(range(6)),
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data) # noqa F841
df_equals(
modin_df.drop_duplicates(subset=["A", "B"], kee... | def test_drop_duplicates():
frame_data = {
"A": list(range(3)) * 2,
"B": list(range(1, 4)) * 2,
"C": list(range(6)),
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
df_equals(
modin_df.drop_duplicates(subset=["A", "B"], keep="first", in... |
9,363 | def test_wrap_var_set():
assert not isinstance(wrap_var(set(['foo'])), AnsibleUnsafe)
for item in wrap_var(set(['foo'])):
assert isinstance(item, AnsibleUnsafe)
| def test_wrap_var_set():
assert isinstance(wrap_var(set(['foo'])), set)
for item in wrap_var(set(['foo'])):
assert isinstance(item, AnsibleUnsafe)
|
24,704 | def _declare_qos_parameteres(
entity_type: Union[Type[Publisher], Type[Subscription]],
node: 'Node',
topic_name: Text,
qos: QoSProfile,
options: QoSOverridingOptions
) -> QoSProfile:
"""
Declare qos parameters for a Publisher or a Subscription.
:param entity_type: Either `rclpy.node.Pub... | def _declare_qos_parameters(
entity_type: Union[Type[Publisher], Type[Subscription]],
node: 'Node',
topic_name: Text,
qos: QoSProfile,
options: QoSOverridingOptions
) -> QoSProfile:
"""
Declare qos parameters for a Publisher or a Subscription.
:param entity_type: Either `rclpy.node.Publ... |
3,123 | def test_win_type_freq_return_deprecation():
freq_roll = Series(range(2), index=date_range("2020", periods=2)).rolling("2s")
with tm.assert_produces_warning(FutureWarning):
freq_roll.win_type
| def test_win_type_freq_return_deprecation():
freq_roll = Series(range(2), index=date_range("2020", periods=2)).rolling("2s")
with tm.assert_produces_warning(FutureWarning):
assert freq_roll.win_type == "freq"
|
19,831 | def populate_counts(sf, schema, objs_cached, logger):
objects_to_count = [objname for objname in objs_cached]
counts, transports_errors, salesforce_errors = count_sobjects(sf, objects_to_count)
errors = transports_errors + salesforce_errors
for error in errors[0:10]:
logger.warning(f"Error count... | def populate_counts(sf, schema, objs_cached, logger):
objects_to_count = [objname for objname in objs_cached]
counts, transports_errors, salesforce_errors = count_sobjects(sf, objects_to_count)
errors = transports_errors + salesforce_errors
for error in errors[0:10]:
logger.warning(f"Error count... |
31,982 | def main():
install_logging('Prepare_Content_Packs_For_Testing.log', logger=logging)
option = option_handler()
packs_artifacts_path = option.packs_artifacts_path
id_set_path = option.id_set_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_a... | def main():
install_logging('Prepare_Content_Packs_For_Testing.log', logger=logging)
option = option_handler()
packs_artifacts_path = option.packs_artifacts_path
id_set_path = option.id_set_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_a... |
8,665 | def configure(config):
config.define_section('currency', CurrencySection, validate=False)
config.currency.configure_setting('fixer_io_key', 'API key for fixer IO. Leave blank to use exchangeratesapi.io:')
config.currency.configure_setting('enable_regex', 'automatically respond to regex matches:')
| def configure(config):
config.define_section('currency', CurrencySection, validate=False)
config.currency.configure_setting('fixer_io_key', 'API key for fixer IO. Leave blank to use exchangeratesapi.io:')
config.currency.configure_setting('enable_regex', 'Automatically respond to regex matches?')
|
20,224 | def process_missing(missing_ids):
"""Create missing school and alias objects and dump csv of additions. """
csv_out_data = []
csv_slug = '{}/schools_added_on_{}.csv'.format(ipeds_directory,
datetime.date.today())
missing_data = process_datafiles(add_s... | def process_missing(missing_ids):
"""Create missing school and alias objects and dump csv of additions."""
csv_out_data = []
csv_slug = '{}/schools_added_on_{}.csv'.format(ipeds_directory,
datetime.date.today())
missing_data = process_datafiles(add_scho... |
30,938 | def write_data(sheet, data_item, data_headers, workbook, bold, border):
if not isinstance(data_item, list):
data_item = [data_item]
if not data_headers:
data_headers = list(data_item[0].keys())
worksheet = workbook.add_worksheet(sheet)
row = 0
col = 0
for key in data_headers:
... | def write_data(sheet, data_item, data_headers, workbook, bold, border):
if not isinstance(data_item, list):
data_item = [data_item]
if not data_headers:
data_headers = list(data_item[0].keys())
worksheet = workbook.add_worksheet(sheet)
row = 0
col = 0
for key in data_headers:
... |
31,006 | def get_pack_dir(branch: str, pr_number: str, repo: str) -> List[str]:
"""
Get a packs dir names from a contribution pull request changed files
Args:
branch: The contrib branch
pr_number: The contrib PR
repo: The contrib repo
Returns:
A list of packs dir names, if found.... | def get_pack_dir(branch: str, pr_number: str, repo: str) -> List[str]:
"""
Get packs dir names from a contribution pull request changed files
Args:
branch: The contrib branch
pr_number: The contrib PR
repo: The contrib repo
Returns:
A list of packs dir names, if found.
... |
44,408 | def states_to_numbers(hilbert: DiscreteHilbert, σ: Array) -> Array:
"""
Converts the configuration σ to a 64-bit integer labelling the Hilbert Space.
.. Note::
Requires jax >= 0.3.17 and will crash on older versions.
Args:
hilbert: The Hilbert space
σ: A single or a batch of ... | def states_to_numbers(hilbert: DiscreteHilbert, σ: Array) -> Array:
"""
Converts the configuration σ to a 64-bit integer labelling the Hilbert Space.
.. Note::
Requires jax >= 0.3.17 and will raise an exception on older versions.
Args:
hilbert: The Hilbert space
σ: A single o... |
6,606 | def get_or_make_bin(item_code, warehouse):
bin_record = frappe.db.get_value('Bin', {'item_code': item_code, 'warehouse': warehouse})
if not bin_record:
bin_obj = frappe.get_doc({
"doctype": "Bin",
"item_code": item_code,
"warehouse": warehouse,
})
bin_obj.flags.ignore_permissions = 1
bin_obj.insert(... | def get_or_make_bin(item_code, warehouse) -> str:
bin_record = frappe.db.get_value('Bin', {'item_code': item_code, 'warehouse': warehouse})
if not bin_record:
bin_obj = frappe.get_doc({
"doctype": "Bin",
"item_code": item_code,
"warehouse": warehouse,
})
bin_obj.flags.ignore_permissions = 1
bin_obj.... |
13,911 | def _find_excluded_ranges(
lines: List[Tuple[int, str]],
*,
warnings: _ExclusionRangeWarnings,
exclude_lines_by_pattern: Optional[str] = None,
exclude_branches_by_pattern: Optional[str] = None,
exclude_pattern_prefix: str,
) -> Callable[[int], bool]:
"""
Scan through all lines to find li... | def _find_excluded_ranges(
lines: List[Tuple[int, str]],
*,
warnings: _ExclusionRangeWarnings,
exclude_lines_by_pattern: Optional[str] = None,
exclude_branches_by_pattern: Optional[str] = None,
exclude_pattern_prefix: str,
) -> Callable[[int], bool]:
"""
Scan through all lines to find li... |
5,578 | def parse_metar(metar_text, year, month, station_metadata=station_info):
"""Parse a METAR report in text form into a list of named tuples.
Parameters
----------
metar_text : str
The METAR report
station_metadata : dict
Mapping of station identifiers to station metadata
year : in... | def parse_metar(metar_text, year, month, station_metadata=station_info):
"""Parse a METAR report in text form into a list of named tuples.
Parameters
----------
metar_text : str
The METAR report
station_metadata : dict
Mapping of station identifiers to station metadata
year : in... |
42,005 | def _run_iteration(
zmap: Dict[complex, Union[int, float]], coordinates: List[complex], overshoot: float = 0.0
) -> Tuple[Dict[complex, Union[int, float]], float]:
max_fractional_delta = 0.0
for coord in coordinates:
current_val = zmap.get(coord, None)
max_neighbor = -np.inf
min_ne... | def _run_iteration(
zmap: Dict[complex, Union[int, float]], coordinates: List[complex], overshoot: float = 0.0
) -> Tuple[Dict[complex, Union[int, float]], float]:
max_fractional_delta = 0.0
for coord in coordinates:
current_val = zmap.get(coord, None)
max_neighbor = -np.inf
min_ne... |
38,902 | def field_singleton_schema( # noqa: C901 (ignore complexity)
field: Field,
*,
by_alias: bool,
model_name_map: Dict[Type['BaseModel'], str],
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
known_models: Set[Type['BaseModel']],
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
... | def field_singleton_schema( # noqa: C901 (ignore complexity)
field: Field,
*,
by_alias: bool,
model_name_map: Dict[Type['BaseModel'], str],
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
known_models: Set[Type['BaseModel']],
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
... |
44,177 | def catch_warn_ExpvalCost(ansatz, hamiltonian, device, **kwargs):
"""Computes the ExpvalCost and catches the initial deprecation warning."""
with pytest.warns(UserWarning, match="will be deprecated,"):
res = qml.ExpvalCost(ansatz, hamiltonian, device, **kwargs)
return res
| def catch_warn_ExpvalCost(ansatz, hamiltonian, device, **kwargs):
"""Computes the ExpvalCost and catches the initial deprecation warning."""
with pytest.warns(UserWarning, match="is deprecated,"):
res = qml.ExpvalCost(ansatz, hamiltonian, device, **kwargs)
return res
|
4,560 | def clean(signals, sessions=None, detrend=True, standardize='zscore',
confounds=None, standardize_confounds=True, filter="butterworth",
low_pass=None, high_pass=None, t_r=2.5, ensure_finite=False):
"""Improve SNR on masked fMRI signals.
This function can do several things on the input signa... | def clean(signals, sessions=None, detrend=True, standardize='zscore',
confounds=None, standardize_confounds=True, filter='butterworth',
low_pass=None, high_pass=None, t_r=2.5, ensure_finite=False):
"""Improve SNR on masked fMRI signals.
This function can do several things on the input signa... |
14,255 | def get_sim_steps(
time: Union[Real, Decimal],
units: str = "step",
round_mode: str = "error"
) -> int:
"""Calculates the number of simulation time steps for a given amount of *time*.
Args:
time: The value to convert to simulation time steps.
units: String specifying the units of th... | def get_sim_steps(
time: Union[Real, Decimal],
units: str = "step",
round_mode: str = "error"
) -> int:
"""Calculates the number of simulation time steps for a given amount of *time*.
Args:
time: The value to convert to simulation time steps.
units: String specifying the units of th... |
14,125 | def _continuous_to_discrete_coords(total_bounds, bounds, p):
"""
Calculates mid points & ranges of geoms and returns
as discrete coords
Parameters
----------
total_bounds : Total bounds of geometries - array
bounds : Bounds of each geometry - array
p : The number of iterations used ... | def _continuous_to_discrete_coords(total_bounds, bounds, p):
"""
Calculates mid points & ranges of geoms and returns
as discrete coords
Parameters
----------
total_bounds : Total bounds of geometries - array
bounds : Bounds of each geometry - array
p : The number of iterations used ... |
6,585 | def execute():
click.secho(
"E-Invoicing Integration is moved to a separate app and will be removed from ERPNext in version-14.\n"
"Please install the app to continue using the integration: https://github.com/frappe/erpnext_gst_compliance",
fg="yellow",
)
| def execute():
click.secho(
"Indian E-Invoicing integration is moved to a separate app and will be removed from ERPNext in version-14.\n"
"Please install the app to continue using the integration: https://github.com/frappe/erpnext_gst_compliance",
fg="yellow",
)
|
20,458 | def merge_stock_location_path_stock_rule(env):
openupgrade.logged_query(
env.cr, """
INSERT INTO stock_rule (name, active, action, sequence, company_id,
location_id, location_src_id, route_id, procure_method,
route_sequence, picking_type_id, delay, propagate, warehouse_id,
... | def merge_stock_location_path_stock_rule(env):
openupgrade.logged_query(
env.cr, """
INSERT INTO stock_rule (name, active, action, sequence, company_id,
location_id, location_src_id, route_id, procure_method,
route_sequence, picking_type_id, delay, propagate, warehouse_id,
... |
31,722 | def get_remote_data_command(client: Client, params: Dict[str, Any], args: Dict) -> GetRemoteDataResponse:
"""
get-remote-data command: Returns an updated incident and entries
If offense's events were updated in the long running container, update the demisto incident.
Args:
client (Client): QRad... | def get_remote_data_command(client: Client, params: Dict[str, Any], args: Dict) -> GetRemoteDataResponse:
"""
get-remote-data command: Returns an updated incident and entries
If offense's events were updated in the long running container, update the demisto incident.
Args:
client (Client): QRad... |
6,077 | def matchQueue(jobJDL, queueDict, fullMatch=False):
"""
Match the job description to the queue definition
:param str job: JDL job description
:param bool fullMatch: test matching on all the criteria
:param dict queueDict: queue parameters dictionary
:return: S_OK/S_ERROR, Value - result of matching, S_OK ... | def matchQueue(jobJDL, queueDict, fullMatch=False):
"""
Match the job description to the queue definition
:param str job: JDL job description
:param bool fullMatch: test matching on all the criteria
:param dict queueDict: queue parameters dictionary
:return: S_OK/S_ERROR, Value - result of matching, S_OK ... |
25,968 | def get_data_service_client(cli_ctx, service_type, account_name, account_key, connection_string=None,
sas_token=None, socket_timeout=None, token_credential=None, endpoint_suffix=None,
location_mode=None):
logger.debug('Getting data service client service_type=... | def get_data_service_client(cli_ctx, service_type, account_name, account_key, connection_string=None,
sas_token=None, socket_timeout=None, token_credential=None, endpoint_suffix=None,
location_mode=None):
logger.debug('Getting data service client service_type=... |
54,216 | def group_settings_greedy(settings: Iterable[InitObsSetting]) \
-> Dict[InitObsSetting, List[InitObsSetting]]:
"""
Group a list of settings which can be simultaneously measured via
a greedy algorithm.
We construct a dictionary keyed by `max_setting` (see docstrings
for `_max_weight_state` a... | def group_settings_greedy(settings: Iterable[InitObsSetting]) \
-> Dict[InitObsSetting, List[InitObsSetting]]:
"""
Group a list of settings which can be simultaneously measured via
a greedy algorithm.
We construct a dictionary keyed by `max_setting` (see docstrings
for `_max_weight_state` a... |
20,273 | def unholder(item):
"""Get the held itme of an object holder of list of object holers."""
if isinstance(item, list):
return [i.held_object if hasattr(i, 'held_object') else i for i in item]
if hasattr(item, 'held_object'):
return item.held_object
return item
| def unholder(item):
"""Get the held item of an object holder or list of object holders."""
if isinstance(item, list):
return [i.held_object if hasattr(i, 'held_object') else i for i in item]
if hasattr(item, 'held_object'):
return item.held_object
return item
|
40,426 | def test_graph_store_conversion():
graph_store = MyGraphStore()
edge_index = get_edge_index(100, 100, 300)
edge_index = sort_edge_index(edge_index, sort_by_row=False)
adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100))
coo = (edge_index[0], edge_index[1])
csr = adj.csr()[:2]... | def test_graph_store_conversion():
graph_store = MyGraphStore()
edge_index = get_edge_index(100, 100, 300)
edge_index = sort_edge_index(edge_index, sort_by_row=False)
adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100))
coo = (edge_index[0], edge_index[1])
csr = adj.csr()[:2]... |
58,329 | def rk4(f, x, t, dt, stages=4, s=0.0):
"""Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers.
The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0
convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama
schemefor SDEs (s > 0.0) with stron... | def rk4(f, x, t, dt, stages=4, s=0.0):
"""Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers.
The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0
convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama
schemefor SDEs (s > 0.0) with stron... |
53,266 | def boris_push_relativistic(x, v, B, E, q, m, dt):
r"""
The explicit Boris pusher, including realtivistic corrections.
Parameters
----------
x : np.ndarray
particle position at full timestep, in SI (meter) units.
v : np.ndarray
particle velocity at half timestep, in SI (meter/se... | def boris_push_relativistic(x, v, B, E, q, m, dt):
r"""
The explicit Boris pusher, including realtivistic corrections.
Parameters
----------
x : np.ndarray
particle position at full timestep, in SI (meter) units.
v : np.ndarray
particle velocity at half timestep, in SI (meter/se... |
1,217 | def needs_nibabel_data(subdir=None):
""" Decorator for tests needing nibabel-data
Parameters
----------
subdir : None or str
Subdirectory we need in nibabel-data directory. If None, only require
nibabel-data directory itself.
Returns
-------
skip_dec : decorator
De... | def needs_nibabel_data(subdir=None):
""" Decorator for tests needing nibabel-data
Parameters
----------
subdir : None or str
Subdirectory we need in nibabel-data directory. If None, only require
nibabel-data directory itself.
Returns
-------
skip_dec : decorator
De... |
57,843 | def main() -> None:
try:
arguments = demisto.args()
api_key = demisto.params().get('apikey')
base_url = urljoin(demisto.params()['url'], '/api/')
verify_certificate = not demisto.params().get('insecure', False)
first_fetch_time = arg_to_timestamp(
arg=demisto.para... | def main() -> None:
try:
arguments = demisto.args()
api_key = demisto.params().get('apikey')
base_url = urljoin(demisto.params()['url'], '/api/')
verify_certificate = not demisto.params().get('insecure', False)
first_fetch_time = arg_to_timestamp(
arg=demisto.para... |
57,765 | def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: GreatHor... | def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: GreatHor... |
31,228 | def get_connector_runs(client: Client, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]:
"""Get Connector Runs command.
Args:
client: Client which connects to api
Returns:
Human Readable
Entry Context
Raw Data
"""
connector_id = demisto.getArg("connector_id")
... | def get_connector_runs(client: Client, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]:
"""Get Connector Runs command.
Args:
client: Client which connects to api
Returns:
Human Readable
Entry Context
Raw Data
"""
connector_id = str(args.get("connector_id"))
... |
31,366 | def is_there_private_packs_to_upload(public_index_json, private_index_path):
""" Checks if there are private packs that should be uploaded.
The check compares the private index with the public one to verify if Content commit hash of each private pack in
those files (private and public index files) are equal... | def is_there_private_packs_to_upload(public_index_json, private_index_path):
""" Checks if there are private packs that should be uploaded.
The check compares the private index with the public one to verify if Content commit hash of each private pack in
those files (private and public index files) are equal... |
5,862 | def _dirstats_preprocessing(samples, normalize, axis):
"""
Preprocessing of input for directional stats functions. Performs
input validation and if necesssary normalization. Used by
directionalvar and directionalmean.
Parameters
----------
samples : array
Input array. Must be at lea... | def _dirstats_preprocessing(samples, normalize, axis):
"""
Preprocessing of input for directional stats functions. Performs
input validation and if necesssary normalization. Used by
directionalvar and directionalmean.
Parameters
----------
samples : array
Input array. Must be at lea... |
42,827 | def backup_packages(backup_path, dry_run: bool = False, skip=False):
"""
Creates `packages` directory and places install list text files there.
"""
def run_cmd_if_no_dry_run(command, dest, dry_run) -> int:
if dry_run:
print_dry_run_copy_info(f"$ {command}", dest)
# Return -1 for any processes depending on c... | def backup_packages(backup_path, dry_run: bool = False, skip=False):
"""
Creates `packages` directory and places install list text files there.
"""
def run_cmd_if_no_dry_run(command, dest, dry_run) -> int:
if dry_run:
print_dry_run_copy_info(f"$ {command}", dest)
# Return -1 for any processes depending on c... |
38,427 | def register_keys(web3: Web3, keys: Optional[list]):
def not_none(x):
return x if x is not None else []
for key in not_none(keys):
register_key(web3, key)
| def register_keys(web3: Web3, keys: Optional[list]):
def not_none(x):
return x if x is not None else []
for key in keys or []:
register_key(web3, key)
|
39,301 | def vtk_points(points, deep=True):
"""Convert numpy or list of points to a vtkPoints object."""
if not isinstance(points, np.ndarray):
points = np.array(points)
# verify is numeric
if not np.issubdtype(points.dtype, np.number):
raise TypeError('Points must be a numeric type')
# che... | def vtk_points(points, deep=True):
"""Convert numpy array or array-like to a vtkPoints object."""
if not isinstance(points, np.ndarray):
points = np.array(points)
# verify is numeric
if not np.issubdtype(points.dtype, np.number):
raise TypeError('Points must be a numeric type')
# c... |
2,233 | def fastica(
X,
n_components=None,
*,
algorithm="parallel",
whiten=None,
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-04,
w_init=None,
random_state=None,
return_X_mean=False,
compute_sources=True,
return_n_iter=False,
):
"""Perform Fast Independent Compo... | def fastica(
X,
n_components=None,
*,
algorithm="parallel",
whiten="warn",
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-04,
w_init=None,
random_state=None,
return_X_mean=False,
compute_sources=True,
return_n_iter=False,
):
"""Perform Fast Independent Com... |
27,900 | def copy(x, dst):
"""Copies the input variable onto the specified device.
This function copies the array of input variable onto the device specified
by ``dst``. When ``dst == -1``, it copies the array onto the host memory.
This function supports copies from host to host, from host to device,
from d... | def copy(x, dst):
"""Copies the input variable onto the specified device.
This function copies the array of input variable onto the device specified
by ``dst``. When ``dst == -1``, it copies the array onto the host memory.
This function supports copies from host to host, from host to device,
from d... |
35,864 | def fetch_nearest_dataset_names(X,y=None, **kwargs):
"""
X: numpy array
an n_samples x n_features array of independent variables
y: numpy array or None (default: None)
a n_samples array of dependent variables
"""
df = pd.DataFrame({**{'x_'+str(i):x for i,x in enumerate(X.transpose)}
... | def nearest_datasets(X, y=None, **kwargs):
"""
X: numpy array
an n_samples x n_features array of independent variables
y: numpy array or None (default: None)
a n_samples array of dependent variables
"""
df = pd.DataFrame({**{'x_'+str(i):x for i,x in enumerate(X.transpose)}
... |
57,213 | def regenerate_missing_stats_for_exploration(
exp_id: str
) -> Tuple[list[str], list[str], int, int]:
"""Regenerates missing ExplorationStats models and entries for all
corresponding states in an exploration.
Args:
exp_id: str. The ID of the exp.
Returns:
4-tuple(missing_exp_stats,... | def regenerate_missing_stats_for_exploration(
exp_id: str
) -> Tuple[list[str], list[str], int, int]:
"""Regenerates missing ExplorationStats models and entries for all
corresponding states in an exploration.
Args:
exp_id: str. The ID of the exp.
Returns:
4-tuple(missing_exp_stats,... |
44,200 | def factorize(two, tol):
r"""Return double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
... | def factorize(two, tol):
r"""Return double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
... |
13,161 | def wait_for_deleted(
*,
pg_manager: gitlab.base.RESTManager,
object_id: int,
description: str,
hard_delete: bool = False,
) -> None:
"""Ensure the object specified can not be retrieved. If object still exists after
timeout period, fail the test"""
max_iterations = int(TIMEOUT / SLEEP_IN... | def wait_for_deleted(
*,
manager: gitlab.base.RESTManager,
object_id: int,
description: str,
hard_delete: bool = False,
) -> None:
"""Ensure the object specified can not be retrieved. If object still exists after
timeout period, fail the test"""
max_iterations = int(TIMEOUT / SLEEP_INTER... |
41,702 | def test_generate_packages_json(tmp_path):
# Set up directory to store dummy package files for SHA-256 hash verification
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
pkg_map = buildall.generate_dependency_graph(PACKAGES_DIR, {"pkg_1", "pkg_2"})
for pkg in pkg_map.values():
pkg.... | def test_generate_packages_json(tmp_path):
# Set up directory to store dummy package files for SHA-256 hash verification
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
pkg_map = buildall.generate_dependency_graph(PACKAGES_DIR, {"pkg_1", "pkg_2"})
for pkg in pkg_map.values():
pkg.... |
40,165 | def compute_library_size_batch(
adata,
batch_key: str,
local_l_mean_key: str = None,
local_l_var_key: str = None,
X_layers_key=None,
copy: bool = False,
):
"""Computes the library size
Parameters
----------
adata
anndata object containing counts
batch_key
... | def compute_library_size_batch(
adata,
batch_key: str,
local_l_mean_key_added: str = None,
local_l_var_key: str = None,
X_layers_key=None,
copy: bool = False,
):
"""Computes the library size
Parameters
----------
adata
anndata object containing counts
batch_key
... |
23,742 | def _get_lvm_cmdline(cmd):
''' Build command line for :program:`lvm` call.
The purpose of this function is to keep all the detailed lvm options in
one place.
:param cmd: array of str, where cmd[0] is action and the rest are arguments
:return array of str appropriate for subprocess.Popen
'''
... | def _get_lvm_cmdline(cmd):
''' Build command line for :program:`lvm` call.
The purpose of this function is to keep all the detailed lvm options in
one place.
:param cmd: array of str, where cmd[0] is action and the rest are arguments
:return array of str appropriate for subprocess.Popen
'''
... |
1,795 | def test_permutation_importance_sample_weight():
# Creating data with 2 features and 1000 samples, where the target
# variable is a linear combination of the two features, such that
# in half of the samples the impact of feature 1 is twice the impact of
# feature 2, and vice versa on the other half of t... | def test_permutation_importance_sample_weight():
# Creating data with 2 features and 1000 samples, where the target
# variable is a linear combination of the two features, such that
# in half of the samples the impact of feature 1 is twice the impact of
# feature 2, and vice versa on the other half of t... |
32,146 | def get_vulnerability_command():
vuln_id = demisto.args()['vulnerability_id']
scan_results_id = demisto.args()['scan_results_id']
page = int(demisto.args().get('page'))
limit = int(demisto.args().get('limit'))
if limit > 200:
limit = 200
vuln_filter = [{
'filterName': 'pluginID'... | def get_vulnerability_command():
vuln_id = demisto.args()['vulnerability_id']
scan_results_id = demisto.args()['scan_results_id']
page = int(demisto.args().get('page'))
limit = int(demisto.args().get('limit'))
if limit > 200:
limit = 200
vuln_filter = [{
'filterName': 'pluginID'... |
23,830 | def _capture_scm_auto_fields(conanfile, conanfile_dir, recipe_layout, ignore_dirty):
"""Deduce the values for the scm auto fields or functions assigned to 'url' or 'revision'
and replace the conanfile.py contents.
Returns a tuple with (scm_data, path_to_scm_local_directory)"""
scm_data = get_scm_d... | def _capture_scm_auto_fields(conanfile, conanfile_dir, recipe_layout, ignore_dirty):
"""Deduce the values for the scm auto fields or functions assigned to 'url' or 'revision'
and replace the conanfile.py contents.
Returns a tuple with (scm_data, path_to_scm_local_directory)"""
scm_data = get_scm_d... |
56,277 | def export(
w: Union[Shape, Workplane],
fname: str,
exportType: Optional[ExportLiterals] = None,
tolerance: float = 0.1,
angularTolerance: float = 0.1,
opt=None,
):
"""
Export Workplane or Shape to file. Multiple entities are converted to compound.
:param w: Shape or Workplane to ... | def export(
w: Union[Shape, Workplane],
fname: str,
exportType: Optional[ExportLiterals] = None,
tolerance: float = 0.1,
angularTolerance: float = 0.1,
opt=None,
):
"""
Export Workplane or Shape to file. Multiple entities are converted to compound.
:param w: Shape or Workplane to ... |
24,851 | def my_func(self): # [missing-return-doc]
"""This is a docstring.
Returns:
bool:
"""
return False
| def my_func(self): # [missing-return-doc]
"""warn_partial_google_returns_type
Returns:
bool:
"""
return False
|
7,867 | def test_get_atoms(res):
"""Tests evaluating single nuclide concentration."""
t, n = res.get_atoms("1", "Xe135")
t_ref = np.array([0.0, 1296000.0, 2592000.0, 3888000.0])
n_ref = np.array(
[6.67473282e+08, 3.76986925e+14, 3.68587383e+14, 3.91338675e+14])
np.testing.assert_allclose(t, t_ref)... | def test_get_atoms(res):
"""Tests evaluating single nuclide concentration."""
t, n = res.get_atoms("1", "Xe135")
t_ref = np.array([0.0, 1296000.0, 2592000.0, 3888000.0])
n_ref = np.array(
[6.67473282e+08, 3.76986925e+14, 3.68587383e+14, 3.91338675e+14])
np.testing.assert_allclose(t, t_ref)... |
12,030 | def recombine_regions(
full_mesh_cube: Cube,
region_cubes: Iterable[Cube],
index_coord_name: AnyStr = "i_mesh_index",
) -> Cube:
"""
Put data from regional sub-meshes back onto the original full mesh.
The result is a region_cube identical to 'full_mesh_cube', but with its data
replaced by a... | def recombine_regions(
full_mesh_cube: Cube,
region_cubes: Iterable[Cube],
index_coord_name: AnyStr = "i_mesh_index",
) -> Cube:
"""
Put data from regional sub-meshes back onto the original full mesh.
The result is a region_cube identical to 'full_mesh_cube', but with its data
replaced by a... |
22,389 | def lint_inputs(tool_xml, lint_ctx):
"""Lint parameters in a tool's inputs block."""
datasource = is_datasource(tool_xml)
inputs = tool_xml.findall("./inputs//param")
num_inputs = 0
for param in inputs:
num_inputs += 1
param_attrib = param.attrib
has_errors = False
if... | def lint_inputs(tool_xml, lint_ctx):
"""Lint parameters in a tool's inputs block."""
datasource = is_datasource(tool_xml)
inputs = tool_xml.findall("./inputs//param")
num_inputs = 0
for param in inputs:
num_inputs += 1
param_attrib = param.attrib
has_errors = False
if... |
1,342 | def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as... | def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as... |
2,714 | def inplace_swap_column(X, m, n):
"""
To swap two columns of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two columns are to be swapped. It should be of
CSR or CSC format.
m : int
Index of the column of... | def inplace_swap_column(X, m, n):
"""
Swap two columns of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two columns are to be swapped. It should be of
CSR or CSC format.
m : int
Index of the column of X ... |
20,523 | def get_parser():
# Initialize the parser
parser = argparse.ArgumentParser(
description=(
"This function extracts the spinal cord centerline. Three methods are available: OptiC (automatic), "
"Viewer (manual) and Fitseg (applied on segmented image). These functions output (i) a N... | def get_parser():
# Initialize the parser
parser = argparse.ArgumentParser(
description=(
"This function extracts the spinal cord centerline. Three methods are available: OptiC (automatic), "
"Viewer (manual) and Fitseg (applied on segmented image). These functions output (i) a N... |
51,564 | def merge(datasets, bounds=None, res=None, nodata=None, precision=7, indexes=None,
method='first'):
"""Copy valid pixels from input files to an output file.
All files must have the same number of bands, data type, and
coordinate reference system.
Input files are merged in their listed order ... | def merge(datasets, bounds=None, res=None, nodata=None, precision=7, indexes=None,
method='first'):
"""Copy valid pixels from input files to an output file.
All files must have the same number of bands, data type, and
coordinate reference system.
Input files are merged in their listed order ... |
478 | def form_session_handler(v, text, msg):
"""
The form session handler will use the inbound text to answer the next question
in the open SQLXformsSession for the associated contact. If no session is open,
the handler passes. If multiple sessions are open, they are all closed and an
error message is di... | def form_session_handler(v, text, msg):
"""
The form session handler will use the inbound text to answer the next question
in the open SQLXformsSession for the associated contact. If no session is open,
the handler passes. If multiple sessions are open, they are all closed and an
error message is di... |
30,344 | def make_edit_request_for_an_object(obj_id, obj_type, params):
# Remove items with empty values:
params = {k: v for k, v in params.items() if v is not None}
url_suffix = '/{0}/{1}?with=attributes,sources'.format(OBJ_DIRECTORY[obj_type], obj_id)
if obj_type == 'indicator':
url_suffix += ',score'... | def make_edit_request_for_an_object(obj_id, obj_type, params):
# Remove items with empty values.
params = {k: v for k, v in params.items() if v is not None}
url_suffix = '/{0}/{1}?with=attributes,sources'.format(OBJ_DIRECTORY[obj_type], obj_id)
if obj_type == 'indicator':
url_suffix += ',score'... |
31,865 | def get_assignee(client: Client, args) -> str:
return client.live_assign_get(args)
| def get_assignee(client: Client, args) -> CommandResults:
return client.live_assign_get(args)
|
30,639 | def get_report_command(client: Client, args: dict) -> CommandResults:
report_id = args.get('report_id')
result = client.get_report_request(report_id)
headers = ['ID', 'Title', 'Timestamp', 'Description', 'Severity', 'Link', 'IOCs_v2', 'Tags', 'Visibility']
ioc_contents = []
contents = {
'ID... | def get_report_command(client: Client, args: dict) -> CommandResults:
report_id = args.get('report_id')
result = client.get_report_request(report_id)
headers = ['ID', 'Title', 'Timestamp', 'Description', 'Severity', 'Link', 'IOCs_v2', 'Tags', 'Visibility']
ioc_contents = []
contents = {
'ID... |
32,403 | def main() -> None:
"""main function, parses params and runs command_func functions
:return:
:rtype:
"""
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
verify_certificate = not params.get('insecure', False)
SERVER_URL = params.get('serverUrl', '') + '/a... | def main() -> None:
"""main function, parses params and runs command_func functions
:return:
:rtype:
"""
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
verify_certificate = not params.get('insecure', False)
server_url = params.get('serverUrl', '') + '/a... |
20,522 | def get_parser():
# Initialize the parser
parser = argparse.ArgumentParser(
description=(
"This function extracts the spinal cord centerline. Three methods are available: OptiC (automatic), "
"Viewer (manual) and Fitseg (applied on segmented image). These functions output (i) a N... | def get_parser():
# Initialize the parser
parser = argparse.ArgumentParser(
description=(
"This function extracts the spinal cord centerline. Three methods are available: OptiC (automatic), "
"Viewer (manual) and Fitseg (applied on segmented image). These functions output (i) a N... |
3,534 | def prepare_build(
project,
version=None,
commit=None,
record=True,
force=False,
immutable=True,
):
"""
Prepare a build in a Celery task for project and version.
If project has a ``build_queue``, execute the task on this build queue. If
project has ``skip... | def prepare_build(
project,
version=None,
commit=None,
record=True,
force=False,
immutable=True,
):
"""
Prepare a build in a Celery task for project and version.
If project has a ``build_queue``, execute the task on this build queue. If
project has ``skip... |
29,818 | def get_flink_job_name(flink_job: FlinkJobDetails):
return flink_job["name"].split(".", 2)[-1]
| def get_flink_job_name(flink_job: FlinkJobDetails) -> str:
return flink_job["name"].split(".", 2)[-1]
|
4,558 | def test_check_second_level_input():
from nilearn.glm.second_level.second_level import _check_second_level_input
with pytest.raises(ValueError,
match="A second level model requires a list with at "
"least two first level models or niimgs"):
_check_seco... | def test_check_second_level_input():
from nilearn.glm.second_level.second_level import _check_second_level_input
with pytest.raises(ValueError,
match="A second level model requires a list with at "
"least two first level models or niimgs"):
_check_seco... |
47,986 | def main():
all_passed = True
index_file_paths = (
OMZ_ROOT / 'models/intel/index.md',
OMZ_ROOT / 'models/public/index.md',
OMZ_ROOT / 'demos/README.md',
)
all_md_files = tuple(find_md_files())
def complain(message):
nonlocal all_passed
all_passed = False
... | def main():
all_passed = True
index_file_paths = (
OMZ_ROOT / 'models/intel/index.md',
OMZ_ROOT / 'models/public/index.md',
OMZ_ROOT / 'demos/README.md',
)
all_md_files = tuple(find_md_files())
def complain(message):
nonlocal all_passed
all_passed = False
... |
55,638 | def homography_warp(
patch_src: torch.Tensor,
src_homo_dst: torch.Tensor,
dsize: Tuple[int, int],
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: bool = False,
normalized_coordinates: bool = True,
normalized_homography: bool = True,
) -> torch.Tensor:
r"""Warp ima... | def homography_warp(
patch_src: torch.Tensor,
src_homo_dst: torch.Tensor,
dsize: Tuple[int, int],
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: bool = False,
normalized_coordinates: bool = True,
normalized_homography: bool = True,
) -> torch.Tensor:
r"""Warp ima... |
54,064 | def add_random_edge(edge_index, p: float = 0.2, force_undirected: bool = False,
num_nodes: Optional[Union[Tuple[int], int]] = None,
training: bool = True) -> Tuple[Tensor, Tensor]:
r"""Randomly adds edges of ratio :obj:`p` into the existing edges
:obj:`edge_index`.
T... | def add_random_edge(edge_index, p: float = 0.2, force_undirected: bool = False,
num_nodes: Optional[Union[Tuple[int], int]] = None,
training: bool = True) -> Tuple[Tensor, Tensor]:
r"""Randomly adds edges of ratio :obj:`p` into the existing edges
:obj:`edge_index`.
T... |
46,590 | def test_supported_chars() -> None:
supported_chars = "abc123_/:-\\+.$%*@"
c = OmegaConf.create({"dir1": "${copy:" + supported_chars + "}"})
OmegaConf.register_new_resolver("copy", lambda x: x)
assert c.dir1 == supported_chars
| def test_custom_resolver_param_supported_chars() -> None:
supported_chars = "abc123_/:-\\+.$%*@"
c = OmegaConf.create({"dir1": "${copy:" + supported_chars + "}"})
OmegaConf.register_new_resolver("copy", lambda x: x)
assert c.dir1 == supported_chars
|
23,588 | def dirindex(ghi, ghi_clearsky, dni_clearsky, zenith, times, pressure=101325.,
use_delta_kt_prime=True, temp_dew=None, min_cos_zenith=0.065,
max_zenith=87):
"""
Determine DNI from GHI using the DIRINDEX model.
The DIRINDEX model [1] modifies the DIRINT model implemented in
``p... | def dirindex(ghi, ghi_clearsky, dni_clearsky, zenith, times, pressure=101325.,
use_delta_kt_prime=True, temp_dew=None, min_cos_zenith=0.065,
max_zenith=87):
"""
Determine DNI from GHI using the DIRINDEX model.
The DIRINDEX model [1] modifies the DIRINT model implemented in
``p... |
3,368 | def parse_sources(config, filter_appconnect=True):
"""
Parses the given sources in the config string (from JSON).
"""
if not config:
return []
try:
sources = json.loads(config)
except Exception as e:
raise InvalidSourcesError(f"{e}")
try:
jsonschema.validat... | def parse_sources(config, filter_appconnect=True):
"""
Parses the given sources in the config string (from JSON).
"""
if not config:
return []
try:
sources = json.loads(config)
except Exception as e:
raise InvalidSourcesError(f"{e}")
try:
jsonschema.validat... |
29,875 | def run_combiner(sample_paths: List[str],
out_file: str,
tmp_path: str,
intervals: Optional[List[hl.utils.Interval]] = None,
header: Optional[str] = None,
sample_names: Optional[List[str]] = None,
branch_factor: int = ... | def run_combiner(sample_paths: List[str],
out_file: str,
tmp_path: str,
intervals: Optional[List[hl.utils.Interval]] = None,
header: Optional[str] = None,
sample_names: Optional[List[str]] = None,
branch_factor: int = ... |
54,596 | def blackify(base_branch, black_command):
current_branch = git("branch", "--show-current")
if not current_branch or base_branch == current_branch:
print("You need to check out a feature brach to work on")
return 1
if not os.path.exists(".git"):
print("Run me in the root of your rep... | def blackify(base_branch, black_command):
current_branch = git("branch", "--show-current")
if not current_branch or base_branch == current_branch:
print("You need to check out a feature brach to work on")
return 1
if not os.path.exists(".git"):
print("Run me in the root of your rep... |
33,325 | def test_run_script(setup_test_data):
""" Test that the populate_reporting_agency_tas script acts as expected """
connection = get_connection(read_only=False)
sql_path = str(settings.APP_DIR / "reporting/management/sql/populate_reporting_agency_tas.sql")
with open(sql_path) as f:
test_sql = f.r... | def test_run_script(setup_test_data):
""" Test that the populate_reporting_agency_tas script acts as expected """
connection = get_connection(read_only=False)
sql_path = str(settings.APP_DIR / "reporting" / "management" / "sql" / "populate_reporting_agency_tas.sql")
with open(sql_path) as f:
te... |
14,701 | def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the access to Netatmo binary sensor."""
home = config.get(CONF_HOME)
timeout = config.get(CONF_TIMEOUT)
if timeout is None:
timeout = DEFAULT_TIMEOUT
module_name = None
import pyatmo
conf = hass.data.ge... | def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the access to Netatmo binary sensor."""
home = config.get(CONF_HOME)
timeout = config.get(CONF_TIMEOUT)
if timeout is None:
timeout = DEFAULT_TIMEOUT
module_name = None
import pyatmo
auth = hass.data[DA... |
31,630 | def is_valid_attack_pattern(items):
try:
results = demisto.executeCommand('mitre-get-attack-pattern-value', {'attack_ids': items})
list_contents = results[0]['Contents']
values = [content.get('value') for content in list_contents]
return values if values else False
except Value... | def is_valid_attack_pattern(items):
try:
results = demisto.executeCommand('mitre-get-attack-pattern-value', {'attack_ids': items})
list_contents = results[0]['Contents']
values = [content.get('value') for content in list_contents]
return values if values else False
except Value... |
58,818 | def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or... | def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or... |
48,868 | def __getattr__(name):
# PEP-562: Lazy loaded attributes on python modules
path = __lazy_imports.get(name)
if path:
import operator
# Strip of the "airflow." prefix because of how `__import__` works (it always returns the top level
# module)
without_prefix = path.split('.', ... | def __getattr__(name):
# PEP-562: Lazy loaded attributes on python modules
path = __lazy_imports.get(name)
if path:
import operator
# Strip of the "airflow." prefix because of how `__import__` works (it always returns the top level
# module)
without_prefix = path.split('.', ... |
55,636 | def raw_to_rgb(image: torch.Tensor, cfa: CFA) -> torch.Tensor:
r"""Convert a raw bayer image to RGB version of image. We are assuming a CFA
with 2 green, 1 red, 1 blue. A bilinear interpolation is used for R/G and a fix convolution
for the green pixels. To simplify calculations we expect the Height Widht to... | def raw_to_rgb(image: torch.Tensor, cfa: CFA) -> torch.Tensor:
r"""Convert a raw bayer image to RGB version of image.
We are assuming a CFA
with 2 green, 1 red, 1 blue. A bilinear interpolation is used for R/G and a fix convolution
for the green pixels. To simplify calculations we expect the Height... |
57,676 | def fetch_incidents(client, last_run: Dict[str, int], first_fetch_time: Optional[int]):
max_results = arg_to_int(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCID... | def fetch_incidents(client, last_run: Dict[str, int], first_fetch_time: Optional[int]):
max_results = arg_to_int(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCID... |
11,444 | def resolve_tenant(default_tenant, tenant_id=None, **_):
# type: (str, Optional[str], **Any) -> str
"""Returns the correct tenant for a token request given a credential's configuration"""
if tenant_id is None:
return default_tenant
if (default_tenant == "adfs"
or os.environ.get(Environme... | def resolve_tenant(default_tenant, tenant_id=None, **_):
# type: (str, Optional[str], **Any) -> str
"""Returns the correct tenant for a token request given a credential's configuration"""
if tenant_id is None:
return default_tenant
if (default_tenant == "adfs"
or os.environ.get(Environme... |
41,899 | def _get_edf_plot(studies: List[Study]) -> Axes:
# Set up the graph style.
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
fig, ax = plt.subplots()
ax.set_title("Empirical Distribution Function Plot")
ax.set_xlabel("Objective Value")
ax.set_ylabel("Cumulative Pr... | def _get_edf_plot(studies: List[Study]) -> Axes:
# Set up the graph style.
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
ax = plt.subplot()
ax.set_title("Empirical Distribution Function Plot")
ax.set_xlabel("Objective Value")
ax.set_ylabel("Cumulative Probabil... |
52,291 | def get_parser():
parser = SCTArgumentParser(
description=(
"This function takes an anatomical image and its cord segmentation (binary file), and outputs the "
"cord segmentation labeled with vertebral level. The algorithm requires an initialization (first disc) and "
"th... | def get_parser():
parser = SCTArgumentParser(
description=(
"This function takes an anatomical image and its cord segmentation (binary file), and outputs the "
"cord segmentation labeled with vertebral level. The algorithm requires an initialization (first disc) and "
"th... |
47,101 | def create_rename_keys(config, base_model=False):
rename_keys = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
("blocks." + str(i) + ".norm1.weight", "vit.encoder.layer." + str(i) + ... | def create_rename_keys(config, base_model=False):
rename_keys = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.w... |
57,775 | def arduino_set_pin_command(server: Server, args: any) -> str:
pin_type: str = args.get('pin_type')
prefix: str = "Arduino.DigitalPins" if pin_type == "digital" else "Arduino.AnalogPins"
pin_number: int = int(args.get('pin_number'))
value: int = int(args.get('value'))
result: int = int(server.send_d... | def arduino_set_pin_command(server: Server, args: any) -> str:
pin_type: str = args.get('pin_type')
prefix: str = "Arduino.DigitalPins" if pin_type == "digital" else "Arduino.AnalogPins"
pin_number = int(args.get('pin_number'))
value = int(args.get('value'))
result = int(server.send_data(f"set:{pin_... |
30,483 | def add(num1, num2):
"""
Adds two integers.
Known limitations:
- The sum of both numbers cannot exceed 300.
- May take some time to count.
:param num1: First number.
:param num2: Second number.
:return: result of two numbers
"""
start = time()
sleep(num1)
sleep(num2)
... | def add(num1, num2):
"""
Adds two integers.
Known limitations:
- The sum of both numbers cannot exceed 300.
- May take some time to count.
:param num1: First number.
:param num2: Second number.
:return: result of two numbers
"""
start = time()
sleep(num1*num2)
sleep(nu... |
31,909 | def url_reputation_command():
"""
Execute SlashNext's url/reputation API against the requested url reputation command with the given parameters
@:return: None
"""
# 1. Get input url from Demisto
url = demisto.args().get('url')
# 2. Get the url reputation from SlashNext API
response = url... | def url_reputation_command():
"""
Execute SlashNext's url/reputation API against the requested url reputation command with the given parameters
@:return: None
"""
# 1. Get input url from Demisto
url = demisto.args().get('url')
# 2. Get the url reputation from SlashNext API
response = url... |
23,020 | def test_pathlib_path(tmpdir):
import pathlib
path = pathlib.Path(tmpdir)
ddf.to_parquet(path)
ddf2 = dd.read_parquet(path)
assert_eq(ddf, ddf2, check_divisions=False, check_index=False)
| def test_pathlib_path(tmpdir):
import pathlib
path = pathlib.Path(tmpdir)
ddf.to_parquet(path, engine=engine)
ddf2 = dd.read_parquet(path)
assert_eq(ddf, ddf2, check_divisions=False, check_index=False)
|
52,939 | def read_certificate(cert_path):
"""
Load the certificate from the provided path, and return a human readable version of it (TEXT mode).
:param str cert_path: the path to the certificate
:return: the TEXT version of the certificate, as it would be displayed by openssl binary
"""
with open(cert_p... | def read_certificate(cert_path):
"""
Load the certificate from the provided path, and return a human readable version of it (TEXT mode).
:param str cert_path: the path to the certificate
:returns: the TEXT version of the certificate, as it would be displayed by openssl binary
:rtype: str
"""
... |
30,704 | def get_conversation_by_name(conversation_name: str) -> dict:
"""
Get a slack conversation by its name
:param conversation_name: The conversation name
:return: The slack conversation
"""
integration_context = demisto.getIntegrationContext()
conversation_to_search = conversation_name.lower()... | def get_conversation_by_name(conversation_name: str) -> dict:
"""
Get a slack conversation by its name
:param conversation_name: The conversation name
:return: The slack conversation
"""
integration_context = demisto.getIntegrationContext()
conversation_to_search = conversation_name.lower()... |
28,980 | def verify_proof(proof, rootHash, name):
previous_computed_hash = None
reverse_computed_name = ''
verified_value = False
for i, node in enumerate(proof['nodes'][::-1]):
found_child_in_chain = False
to_hash = b''
previous_child_character = None
for child in node['children'... | def verify_proof(proof, rootHash, name):
previous_computed_hash = None
reverse_computed_name = ''
verified_value = False
for i, node in enumerate(proof['nodes'][::-1]):
found_child_in_chain = False
to_hash = b''
previous_child_character = None
for child in node['children'... |
44,484 | def use_gcp_secret(secret_name='user-gcp-sa', secret_file_path_in_volume=None, volume_name=None, secret_volume_mount_path='/secret/gcp-credentials'):
"""An operator that configures the container to use GCP service account.
The user-gcp-sa secret is created as part of the kubeflow deployment that
st... | def use_gcp_secret(secret_name='user-gcp-sa', secret_file_path_in_volume=None, volume_name=None, secret_volume_mount_path='/secret/gcp-credentials'):
"""An operator that configures the container to use GCP service account.
The user-gcp-sa secret is created as part of the kubeflow deployment that
st... |
25,079 | def _colorize(agg, color_key, how, span, min_alpha, name):
if cupy and isinstance(agg.data, cupy.ndarray):
from ._cuda_utils import interp
array = cupy.array
else:
interp = np.interp
array = np.array
if not agg.ndim == 3:
raise ValueError("agg must be 3D")
cats =... | def _colorize(agg, color_key, how, span, min_alpha, name):
if cupy and isinstance(agg.data, cupy.ndarray):
from ._cuda_utils import interp
array = cupy.array
else:
interp = np.interp
array = np.array
if not agg.ndim == 3:
raise ValueError("agg must be 3D")
cats =... |
6,793 | def get_contacts(email_strings):
email_addrs = []
for email_string in email_strings:
if email_string:
for email in email_string.split(","):
parsed_email = parseaddr(email)[1]
if parsed_email:
email_addrs.append(parsed_email)
contacts = []
for email in email_addrs:
email = get_email_without_lin... | def get_contacts(email_strings):
email_addrs = []
for email_string in email_strings:
if email_string:
for email in email_string.split(","):
parsed_email = parseaddr(email)[1]
if parsed_email:
email_addrs.append(parsed_email)
contacts = []
for email in email_addrs:
email = get_email_without_lin... |
22,282 | def purge_datasets(app, cutoff_time, remove_from_disk, info_only=False, force_retry=False):
# Purges deleted datasets whose update_time is older than cutoff_time. Files may or may
# not be removed from disk.
dataset_count = 0
disk_space = 0
start = time.time()
if force_retry:
datasets =... | def purge_datasets(app, cutoff_time, remove_from_disk, info_only=False, force_retry=False):
# Purges deleted datasets whose update_time is older than cutoff_time. Files may or may
# not be removed from disk.
dataset_count = 0
disk_space = 0
start = time.time()
if force_retry:
datasets =... |
47,936 | def render_routine(line):
"""Function for rendering single formula
Args:
line (tuple): formula idx, formula string, path to store rendered image
"""
formula, file_idx, folder_path = line
output_path = Path(folder_path, file_idx)
pre_name = os.path.normcase(output_path).replace('/', '_')... | def render_routine(line):
"""Function for rendering single formula
Args:
line (tuple): formula idx, formula string, path to store rendered image
"""
formula, file_idx, folder_path = line
output_path = Path(folder_path, file_idx)
pre_name = os.path.normcase(output_path).replace('/', '_')... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.