positive stringlengths 100 30.3k | anchor stringlengths 1 15k |
|---|---|
def yieldable(self):
"""Return True if there is a line that the buffer can return, False otherwise."""
if self.read_buffer is None:
return False
t = _remove_trailing_new_line(self.read_buffer)
n = _find_furthest_new_line(t)
if n >= 0:
return True
... | Return True if there is a line that the buffer can return, False otherwise. |
def _is_ignorable_404(uri):
"""
Returns True if the given request *shouldn't* notify the site managers.
"""
urls = getattr(django_settings, "IGNORABLE_404_URLS", ())
return any(pattern.search(uri) for pattern in urls) | Returns True if the given request *shouldn't* notify the site managers. |
def reply(self):
"""Reply to the selected status"""
status = self.get_selected_status()
app, user = self.app, self.user
if not app or not user:
self.footer.draw_message("You must be logged in to reply", Color.RED)
return
compose_modal = ComposeModal(self.... | Reply to the selected status |
def get_mapping(session, table, candidates, generator, key_map):
"""Generate map of keys and values for the candidate from the generator.
:param session: The database session.
:param table: The table we will be inserting into (i.e. Feature or Label).
:param candidates: The candidates to get mappings fo... | Generate map of keys and values for the candidate from the generator.
:param session: The database session.
:param table: The table we will be inserting into (i.e. Feature or Label).
:param candidates: The candidates to get mappings for.
:param generator: A generator yielding (candidate_id, key, value)... |
def output(self, msg, indent, status=None):
""" Alias for print_indent_msg with color determined by status."""
color = None
if self.use_color:
color = get_color_from_status(status)
print_indent_msg(msg, indent, color) | Alias for print_indent_msg with color determined by status. |
def from_config(cls, cp, model, nprocesses=1, use_mpi=False):
"""Loads the sampler from the given config file."""
section = "sampler"
# check name
assert cp.get(section, "name") == cls.name, (
"name in section [sampler] must match mine")
# get the number of walkers to... | Loads the sampler from the given config file. |
def create_node(kwargs):
'''
Convenience function to make the rest api call for node creation.
'''
if not isinstance(kwargs, dict):
kwargs = {}
# Required parameters
params = {
'Action': 'CreateInstance',
'InstanceType': kwargs.get('size_id', ''),
'RegionId': kwa... | Convenience function to make the rest api call for node creation. |
def get_history_by_tail_number(self, tail_number, page=1, limit=100):
"""Fetch the history of a particular aircraft by its tail number.
This method can be used to get the history of a particular aircraft by its tail number.
It checks the user authentication and returns the data accordingly.
... | Fetch the history of a particular aircraft by its tail number.
This method can be used to get the history of a particular aircraft by its tail number.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
... |
def calculate(cls, byte_arr, crc=0):
"""Compute CRC for input bytes."""
for byte in byte_iter(byte_arr):
# Taken verbatim from FIT SDK docs
tmp = cls.CRC_TABLE[crc & 0xF]
crc = (crc >> 4) & 0x0FFF
crc = crc ^ tmp ^ cls.CRC_TABLE[byte & 0xF]
tm... | Compute CRC for input bytes. |
def exists(name, attributes):
'''
Make sure the given attributes exist on the file/directory
name
The path to the file/directory
attributes
The attributes that should exist on the file/directory, this is accepted as
an array, with key and value split with an equals sign, if you... | Make sure the given attributes exist on the file/directory
name
The path to the file/directory
attributes
The attributes that should exist on the file/directory, this is accepted as
an array, with key and value split with an equals sign, if you want to specify
a hex value then ... |
def factory(**default_opts):
"""
Factory function to create decorators for tasks' run methods. Default options for the decorator
function can be given in *default_opts*. The returned decorator can be used with or without
function invocation. Example:
.. code-block:: python
@factory(digits=... | Factory function to create decorators for tasks' run methods. Default options for the decorator
function can be given in *default_opts*. The returned decorator can be used with or without
function invocation. Example:
.. code-block:: python
@factory(digits=2)
def runtime(fn, opts, task, *a... |
async def get_stats(self, battletag: str, regions=(EUROPE, KOREA, AMERICAS, CHINA, JAPAN, ANY),
platform=None, _session=None, handle_ratelimit=None, max_tries=None, request_timeout=None):
"""Returns the stats for the profiles on the specified regions and platform. The format for regions ... | Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile.
The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats spec... |
def is_restricted(self, assets, dt):
"""
Returns whether or not an asset or iterable of assets is restricted
on a dt.
"""
if isinstance(assets, Asset):
return self._is_restricted_for_asset(assets, dt)
is_restricted = partial(self._is_restricted_for_asset, dt=... | Returns whether or not an asset or iterable of assets is restricted
on a dt. |
def groups_moderators(self, room_id=None, group=None, **kwargs):
"""Lists all moderators of a group."""
if room_id:
return self.__call_api_get('groups.moderators', roomId=room_id, kwargs=kwargs)
elif group:
return self.__call_api_get('groups.moderators', roomName=group, k... | Lists all moderators of a group. |
def export(self, node):
"""Export tree starting at `node`."""
attriter = self.attriter or (lambda attr_values: attr_values)
return self.__export(node, self.dictcls, attriter, self.childiter) | Export tree starting at `node`. |
def __get_config(self):
""" Really connect """
if not self.name:
room_resp = self.conn.get(BASE_URL + "/new")
room_resp.raise_for_status()
url = room_resp.url
try:
self.name = re.search(r"r/(.+?)$", url).group(1)
except Excepti... | Really connect |
def update_balances(self, recursive=True):
"""
Calculate tree balance factor
"""
if self.node:
if recursive:
if self.node.left:
self.node.left.update_balances()
if self.node.right:
self.node.right.update... | Calculate tree balance factor |
def _get_metadata_path(self, key):
""" return the metadata pathname for this key """
return "{group}/meta/{key}/meta".format(group=self.group._v_pathname,
key=key) | return the metadata pathname for this key |
async def withdraw_bulk(self, *args, **kwargs):
"""
Withdraw funds requests to user wallet
Accepts:
- coinid [string] (blockchain id (example: BTCTEST, LTCTEST))
- address [string] withdrawal address (in hex for tokens)
- amount [int] withdrawal amount multipl... | Withdraw funds requests to user wallet
Accepts:
- coinid [string] (blockchain id (example: BTCTEST, LTCTEST))
- address [string] withdrawal address (in hex for tokens)
- amount [int] withdrawal amount multiplied by decimals_k (10**8)
Returns dictionary with following ... |
def update(self, enabled=values.unset, webhook_url=values.unset,
webhook_method=values.unset):
"""
Update the ExportConfigurationInstance
:param bool enabled: The enabled
:param unicode webhook_url: The webhook_url
:param unicode webhook_method: The webhook_method... | Update the ExportConfigurationInstance
:param bool enabled: The enabled
:param unicode webhook_url: The webhook_url
:param unicode webhook_method: The webhook_method
:returns: Updated ExportConfigurationInstance
:rtype: twilio.rest.preview.bulk_exports.export_configuration.Expo... |
def poll(self):
""" Poll the job status.
Returns the changes in this iteration."""
self.runner.module_name = 'async_status'
self.runner.module_args = "jid=%s" % self.jid
self.runner.pattern = "*"
self.runner.background = 0
self.runner.inventory.restrict_to(s... | Poll the job status.
Returns the changes in this iteration. |
def modify(name, **kwargs):
'''
Modify an existing job in the schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.modify job1 function='test.ping' seconds=3600
'''
ret = {'comment': '',
'changes': {},
'result': True}
time_conflict = False
for i... | Modify an existing job in the schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.modify job1 function='test.ping' seconds=3600 |
def close(self):
""" Close all connections and empty the pool.
This method is thread safe.
"""
if self._closed:
return
try:
with self.lock:
if not self._closed:
self._closed = True
for address in list... | Close all connections and empty the pool.
This method is thread safe. |
def setup_scrollarea(self):
"""Setup the scrollarea that will contain the FigureThumbnails."""
self.view = QWidget()
self.scene = QGridLayout(self.view)
self.scene.setColumnStretch(0, 100)
self.scene.setColumnStretch(2, 100)
self.scrollarea = QScrollArea()
self.... | Setup the scrollarea that will contain the FigureThumbnails. |
def glob(self, pattern):
""" Return a list of Path objects that match the pattern.
`pattern` - a path relative to this directory, with wildcards.
For example, ``Path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their :file:`bin` directories.
.. see... | Return a list of Path objects that match the pattern.
`pattern` - a path relative to this directory, with wildcards.
For example, ``Path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their :file:`bin` directories.
.. seealso:: :func:`glob.glob`
.. ... |
def fcoe_get_login_output_fcoe_login_list_fcoe_login_fcoe_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_login = ET.Element("fcoe_get_login")
config = fcoe_get_login
output = ET.SubElement(fcoe_get_login, "output")
... | Auto Generated Code |
def fetch_and_transform(
transformed_filename,
transformer,
loader,
source_filename,
source_url,
subdir=None):
"""
Fetch a remote file from `source_url`, save it locally as `source_filename` and then use
the `loader` and `transformer` function arguments to tur... | Fetch a remote file from `source_url`, save it locally as `source_filename` and then use
the `loader` and `transformer` function arguments to turn this saved data into an in-memory
object. |
def get_logger(name=None, format_string=None):
"""
:type name: str
:param name: used for declaring log channels.
:type format_string: str
:param format_string: for custom formatting
"""
logging.captureWarnings(True)
log = logging.getLogger(name)
# Bind custom method to instance.
... | :type name: str
:param name: used for declaring log channels.
:type format_string: str
:param format_string: for custom formatting |
def utcoffset(self):
"""
:return:
None or a datetime.timedelta() of the offset from UTC
"""
if self.tzinfo is None:
return None
return self.tzinfo.utcoffset(self.replace(year=2000)) | :return:
None or a datetime.timedelta() of the offset from UTC |
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
split_dn = dn.split(r'.')
leftmost, remainder = split_dn[0], split_dn[1:]
wildcards = left... | Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3 |
def captureQuery(self, query, params=(), engine=None, **kwargs):
"""
Creates an event for a SQL query.
>>> client.captureQuery('SELECT * FROM foo')
"""
return self.capture(
'raven.events.Query', query=query, params=params, engine=engine,
**kwargs) | Creates an event for a SQL query.
>>> client.captureQuery('SELECT * FROM foo') |
def create_segments(self, segments):
"""Enqueue segment creates"""
for segment in segments:
s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE,
a_const.CREATE)
self.provision_queue.put(s_res) | Enqueue segment creates |
def get(self, arg):
"""
Return instance object with given EC2 ID or nametag.
"""
try:
reservations = self.get_all_instances(filters={'tag:Name': [arg]})
instance = reservations[0].instances[0]
except IndexError:
try:
instance = ... | Return instance object with given EC2 ID or nametag. |
def indices_to_labels(self, indices: Sequence[int]) -> List[str]:
""" Converts a sequence of indices into their corresponding labels."""
return [(self.INDEX_TO_LABEL[index]) for index in indices] | Converts a sequence of indices into their corresponding labels. |
def _generate_examples(self, data_dir):
"""Generate corrupted Cifar10 test data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
data_dir: root directory of downloaded dataset
Yields:
dictionary with image file and label.
"""
corruption = self.builder_... | Generate corrupted Cifar10 test data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
data_dir: root directory of downloaded dataset
Yields:
dictionary with image file and label. |
def par_relax_AX(i):
"""Parallel implementation of relaxation if option ``RelaxParam`` !=
1.0.
"""
global mp_X
global mp_Xnr
global mp_DX
global mp_DXnr
mp_Xnr[mp_grp[i]:mp_grp[i+1]] = mp_X[mp_grp[i]:mp_grp[i+1]]
mp_DXnr[i] = mp_DX[i]
if mp_rlx != 1.0:
grpind = slice(mp_... | Parallel implementation of relaxation if option ``RelaxParam`` !=
1.0. |
def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream):
"""Extracts metadata from a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry ... | Extracts metadata from a file entry.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
file_entry (dfvfs.FileEntry): file entry to extract metadata from.
data_stream (dfvfs.DataStream): data stream or None... |
def strip_prompt(self, a_string):
""" Strip 'Done' from command output """
output = super(NetscalerSSH, self).strip_prompt(a_string)
lines = output.split(self.RESPONSE_RETURN)
if "Done" in lines[-1]:
return self.RESPONSE_RETURN.join(lines[:-1])
else:
retur... | Strip 'Done' from command output |
def _create_significance_table(self,data):
"""
Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Re... | Create a table containing p-values for significance tests. Add features of
the distributions and the p-values to the dataframe.
Parameters
----------
data : pandas DataFrame
The input dataset.
Returns
----------
df : pandas DataFrame
... |
def rlmb_tiny_stochastic():
"""Tiny setting with a stochastic next-frame model."""
hparams = rlmb_ppo_tiny()
hparams.epochs = 1 # Too slow with 2 for regular runs.
hparams.generative_model = "next_frame_basic_stochastic"
hparams.generative_model_params = "next_frame_basic_stochastic"
return hparams | Tiny setting with a stochastic next-frame model. |
def cli(**args):
""" Shakedown is a DC/OS test-harness wrapper for the pytest tool.
"""
import shakedown
# Read configuration options from ~/.shakedown (if exists)
args = read_config(args)
# Set configuration defaults
args = set_config_defaults(args)
if args['quiet']:
shakedow... | Shakedown is a DC/OS test-harness wrapper for the pytest tool. |
def _parse_network_settings(opts, current):
'''
Filters given options and outputs valid settings for
the global network settings file.
'''
# Normalize keys
opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts))
current = dict((k.lower(), v) for (k, v) in six.iteritems(current))
#... | Filters given options and outputs valid settings for
the global network settings file. |
def delete_connector_c_pool(name, target='server', cascade=True, server=None):
'''
Delete a connection pool
'''
data = {'target': target, 'cascade': cascade}
return _delete_element(name, 'resources/connector-connection-pool', data, server) | Delete a connection pool |
def _check_lookup_prop(self, result_data):
"""Checks that selected lookup property can be used for this testcase."""
if not self._lookup_prop:
return False
if not result_data.get("id") and self._lookup_prop != "name":
return False
if not result_data.get("title") ... | Checks that selected lookup property can be used for this testcase. |
def list_path_traversal(path):
'''
Returns a full list of directories leading up to, and including, a path.
So list_path_traversal('/path/to/salt') would return:
['/', '/path', '/path/to', '/path/to/salt']
in that order.
This routine has been tested on Windows systems as well.
list_pat... | Returns a full list of directories leading up to, and including, a path.
So list_path_traversal('/path/to/salt') would return:
['/', '/path', '/path/to', '/path/to/salt']
in that order.
This routine has been tested on Windows systems as well.
list_path_traversal('c:\\path\\to\\salt') on Window... |
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the... | Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call... |
def path(self):
"""Timestamp for placing into filepaths."""
out = self.datetime.strftime("%Y-%m-%d")
out += " "
ssm = (
self.datetime - self.datetime.replace(hour=0, minute=0, second=0, microsecond=0)
).total_seconds()
out += str(int(ssm)).zfill(5)
ret... | Timestamp for placing into filepaths. |
def register_event(self, direction, verb, child_fn, priority=10):
"""Register an event with all servers.
Args:
direction (str): `in`, `out`, `both`, or `girc`.
verb (str): Event name, `all`, or `raw`.
child_fn (function): Handler function.
priority (int):... | Register an event with all servers.
Args:
direction (str): `in`, `out`, `both`, or `girc`.
verb (str): Event name, `all`, or `raw`.
child_fn (function): Handler function.
priority (int): Handler priority (lower priority executes first).
Note: `all` will ... |
def _refine_v(seq, species):
'''
Completes the 5' end of a a truncated sequence with germline nucleotides.
Input is a MongoDB dict (seq) and the species.
'''
vgerm = germlines.get_germline(seq['v_gene']['full'], species)
aln = global_alignment(seq['vdj_nt'], vgerm)
... | Completes the 5' end of a a truncated sequence with germline nucleotides.
Input is a MongoDB dict (seq) and the species. |
def format_datetime(d: PotentialDatetimeType,
fmt: str,
default: str = None) -> Optional[str]:
"""
Format a datetime with a ``strftime`` format specification string, or
return ``default`` if the input is ``None``.
"""
d = coerce_to_pendulum(d)
if d is None... | Format a datetime with a ``strftime`` format specification string, or
return ``default`` if the input is ``None``. |
def index():
"""
This is not served anywhere in the web application.
It is used explicitly in the context of generating static files since
flask-frozen requires url_for's to crawl content.
url_for's are not used with file.show_file directly and are instead
dynamically generated through javascrip... | This is not served anywhere in the web application.
It is used explicitly in the context of generating static files since
flask-frozen requires url_for's to crawl content.
url_for's are not used with file.show_file directly and are instead
dynamically generated through javascript for performance purpose... |
def load(parser, serializer):
"""Returns a dictionary of builtin functions for Fortran. Checks the
cache first to see if we have a serialized version. If we don't, it
loads it from the XML file.
:arg parser: the DocParser instance for parsing the XML tags.
:arg serializer: a Serializer instance fro... | Returns a dictionary of builtin functions for Fortran. Checks the
cache first to see if we have a serialized version. If we don't, it
loads it from the XML file.
:arg parser: the DocParser instance for parsing the XML tags.
:arg serializer: a Serializer instance from the CodeParser to cache
the l... |
def get_instance(self, payload):
"""
Build an instance of MessageInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.messaging.v1.session.message.MessageInstance
:rtype: twilio.rest.messaging.v1.session.message.MessageInstance
"""
r... | Build an instance of MessageInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.messaging.v1.session.message.MessageInstance
:rtype: twilio.rest.messaging.v1.session.message.MessageInstance |
def _is_version_duplicate(self):
""" Define should new version be created for object or no.
Reasons to provide custom check instead of default `ignore_revision_duplicates`:
- no need to compare all revisions - it is OK if right object version exists in any revision;
- need... | Define should new version be created for object or no.
Reasons to provide custom check instead of default `ignore_revision_duplicates`:
- no need to compare all revisions - it is OK if right object version exists in any revision;
- need to compare object attributes (not serialized... |
def getModulePath(project_path,module_name,verbose):
'''Searches for module_name in searchpath and returns the filepath.
If no filepath was found, returns None.'''
if not module_name:
return None
sys.path.append(project_path)
try:
package = pkgutil.get_loader(module_name)
except ... | Searches for module_name in searchpath and returns the filepath.
If no filepath was found, returns None. |
def _read_protocol_line(self):
"""Reads the next line of instrumentation output relevant to snippets.
This method will skip over lines that don't start with 'SNIPPET' or
'INSTRUMENTATION_RESULT'.
Returns:
(str) Next line of snippet-related instrumentation output, stripped.
... | Reads the next line of instrumentation output relevant to snippets.
This method will skip over lines that don't start with 'SNIPPET' or
'INSTRUMENTATION_RESULT'.
Returns:
(str) Next line of snippet-related instrumentation output, stripped.
Raises:
jsonrpc_clien... |
def generate_signature(method, version, endpoint,
date, rel_url, content_type, content,
access_key, secret_key, hash_type):
'''
Generates the API request signature from the given parameters.
'''
hash_type = hash_type
hostname = endpoint._val.netloc # FI... | Generates the API request signature from the given parameters. |
def list_to_serialized(ref, the_list):
"""Serialize the list of elements
Used for the retention store
:param ref: Not used
:type ref:
:param the_list: dictionary to convert
:type the_list: dict
:return: dict of serialized
:rtype: dict
"""
result = []
for elt in the_list:
... | Serialize the list of elements
Used for the retention store
:param ref: Not used
:type ref:
:param the_list: dictionary to convert
:type the_list: dict
:return: dict of serialized
:rtype: dict |
def _compute_distance(self, dists, C):
"""
Compute the second term of the equation described on p. 1144:
`` c4 * np.log(sqrt(R ** 2. + h ** 2.)
"""
return C["c4"] * np.log(np.sqrt(dists.rrup ** 2. + C["h"] ** 2.)) | Compute the second term of the equation described on p. 1144:
`` c4 * np.log(sqrt(R ** 2. + h ** 2.) |
def proj_l2(v, gamma, axis=None):
r"""Compute the projection operator of the :math:`\ell_2` norm.
The projection operator of the uncentered :math:`\ell_2` norm,
.. math::
\mathrm{argmin}_{\mathbf{x}} (1/2) \| \mathbf{x} - \mathbf{v} \|_2^2 \;
\text{ s.t. } \; \| \mathbf{x} - \mathbf{s} \|_2 \l... | r"""Compute the projection operator of the :math:`\ell_2` norm.
The projection operator of the uncentered :math:`\ell_2` norm,
.. math::
\mathrm{argmin}_{\mathbf{x}} (1/2) \| \mathbf{x} - \mathbf{v} \|_2^2 \;
\text{ s.t. } \; \| \mathbf{x} - \mathbf{s} \|_2 \leq \gamma
can be computed as :mat... |
def find_max_and_min_frequencies(name, mass_range_params, freqs):
"""
ADD DOCS
"""
cutoff_fns = pnutils.named_frequency_cutoffs
if name not in cutoff_fns.keys():
err_msg = "%s not recognized as a valid cutoff frequency choice." %name
err_msg += "Recognized choices: " + " ".join(cuto... | ADD DOCS |
def select_eps(xmrs, nodeid=None, iv=None, label=None, pred=None):
"""
Return the list of matching elementary predications in *xmrs*.
:class:`~delphin.mrs.components.ElementaryPredication` objects for
*xmrs* match if their `nodeid` matches *nodeid*,
`intrinsic_variable` matches *iv*, `label` matche... | Return the list of matching elementary predications in *xmrs*.
:class:`~delphin.mrs.components.ElementaryPredication` objects for
*xmrs* match if their `nodeid` matches *nodeid*,
`intrinsic_variable` matches *iv*, `label` matches *label*, and
`pred` to *pred*. The *nodeid*, *iv*, *label*, and *pred* fi... |
def file_list(*packages, **kwargs):
'''
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's rpm database (not generally
recommended).
root
use root as top level directory (default: "/")
CLI Examples:
.. code-block... | List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's rpm database (not generally
recommended).
root
use root as top level directory (default: "/")
CLI Examples:
.. code-block:: bash
salt '*' lowpkg.file_list httpd... |
def do_stop_role(self, role):
"""
Stop a role
Usage:
> stop_role <role> Stops this role
"""
if not role:
return None
if not self.has_cluster():
return None
if '-' not in role:
print("Please enter a valid role name... | Stop a role
Usage:
> stop_role <role> Stops this role |
def tx2genefile(gtf, out_file=None, data=None, tsv=True, keep_version=False):
"""
write out a file of transcript->gene mappings.
"""
if tsv:
extension = ".tsv"
sep = "\t"
else:
extension = ".csv"
sep = ","
if file_exists(out_file):
return out_file
with... | write out a file of transcript->gene mappings. |
def close(self, filehandle):
"""Close openend file if no longer used."""
with self.lock:
if filehandle in self.files:
self.files[filehandle] -= 1
# trim the file cache
index = 0
size = len(self.past)
while size >... | Close openend file if no longer used. |
def zone_data(self):
"""Get zone data"""
if self._zone_data is None:
self._zone_data = self._get('/zones/' + self.domain).json()
return self._zone_data | Get zone data |
def load_module(self, name):
"""Load and return a module"""
if name in sys.modules:
return sys.modules[name]
# load the actual import hook module
module_name = self.mount2name(name)
__import__(module_name)
# alias the import hook module to the mount, so both c... | Load and return a module |
def _get_approved_attributes(self, idp, idp_policy, sp_entity_id, state):
"""
Returns a list of approved attributes
:type idp: saml.server.Server
:type idp_policy: saml2.assertion.Policy
:type sp_entity_id: str
:type state: satosa.state.State
:rtype: list[str]
... | Returns a list of approved attributes
:type idp: saml.server.Server
:type idp_policy: saml2.assertion.Policy
:type sp_entity_id: str
:type state: satosa.state.State
:rtype: list[str]
:param idp: The saml frontend idp server
:param idp_policy: The idp policy
... |
def from_path(kls, vertices):
"""
Given an Nx3 array of vertices that constitute a single path,
generate a skeleton with appropriate edges.
"""
if vertices.shape[0] == 0:
return PrecomputedSkeleton()
skel = PrecomputedSkeleton(vertices)
edges = np.zeros(shape=(skel.vertices.shape[0] ... | Given an Nx3 array of vertices that constitute a single path,
generate a skeleton with appropriate edges. |
def replicate(source, model, cache=None):
'''Replicates the `source` object to `model` class and returns its
reflection.'''
target = replicate_no_merge(source, model, cache=cache)
if target is not None:
db = object_session(source)
target = db.merge(target)
return target | Replicates the `source` object to `model` class and returns its
reflection. |
def _get_sorted_section(self, nts_section):
"""Sort GO IDs in each section, if requested by user."""
#pylint: disable=unnecessary-lambda
if self.section_sortby is True:
return sorted(nts_section, key=lambda nt: self.sortgos.usrgo_sortby(nt))
if self.section_sortby is False or... | Sort GO IDs in each section, if requested by user. |
def add_data(self, metric, value, ts=None):
"""
Add data to queue
:param metric: the metric name
:type metric: str
:param value: the value of data
:type value: int
:param ts: the timestamp
:type ts: int | None
:return: True if added successfully, ... | Add data to queue
:param metric: the metric name
:type metric: str
:param value: the value of data
:type value: int
:param ts: the timestamp
:type ts: int | None
:return: True if added successfully, otherwise False
:rtype: bool |
def load_zip_data(zipname, f_sino_real, f_sino_imag,
f_angles=None, f_phantom=None, f_info=None):
"""Load example sinogram data from a .zip file"""
ret = []
with zipfile.ZipFile(str(zipname)) as arc:
sino_real = np.loadtxt(arc.open(f_sino_real))
sino_imag = np.loadtxt(arc.o... | Load example sinogram data from a .zip file |
def value_counts(expr, sort=True, ascending=False, dropna=False):
"""
Return object containing counts of unique values.
The resulting object will be in descending order so that the first element is the most frequently-occuring
element. Exclude NA values by default
:param expr: sequence
:param ... | Return object containing counts of unique values.
The resulting object will be in descending order so that the first element is the most frequently-occuring
element. Exclude NA values by default
:param expr: sequence
:param sort: if sort
:type sort: bool
:param dropna: Don’t include counts of ... |
def trainHMM_fromFile(wav_file, gt_file, hmm_model_name, mt_win, mt_step):
'''
This function trains a HMM model for segmentation-classification using a single annotated audio file
ARGUMENTS:
- wav_file: the path of the audio filename
- gt_file: the path of the ground truth filename
... | This function trains a HMM model for segmentation-classification using a single annotated audio file
ARGUMENTS:
- wav_file: the path of the audio filename
- gt_file: the path of the ground truth filename
(a csv file of the form <segment start in seconds>,<segment end ... |
def reset_small(self, eq):
"""Reset numbers smaller than 1e-12 in f and g equations"""
assert eq in ('f', 'g')
for idx, var in enumerate(self.__dict__[eq]):
if abs(var) <= 1e-12:
self.__dict__[eq][idx] = 0 | Reset numbers smaller than 1e-12 in f and g equations |
def _get_tag_match(self, ele, tree):
"""
Match tag
:param ele:
:type ele:
:param tree:
:type tree: None, list
:return:
:rtype: None | list
"""
if tree in [None, []]:
return [ele]
res = []
t = tree[0]
br... | Match tag
:param ele:
:type ele:
:param tree:
:type tree: None, list
:return:
:rtype: None | list |
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
if nidm_version['major'] < 1 or \
(nidm_version['major'] == 1 and nidm_version['minor'] < 3):
self.type = NLX_OLD_FSL
atts = (
(PROV['type'], self.typ... | Create prov entities and activities. |
def _multitaper_cross_spectrum(self, clm, slm, k, convention='power',
unit='per_l', clat=None, clon=None,
coord_degrees=True, lmax=None,
taper_wt=None):
"""
Return the multitaper cross-spectrum estim... | Return the multitaper cross-spectrum estimate and standard error for
two input SHCoeffs class instances. |
def post_periodic_filtered(values, repeat_after, block):
"""
After every *repeat_after* items, blocks the next *block* items from
*values*. Note that unlike :func:`pre_periodic_filtered`, *repeat_after*
can't be 0. For example, to block every tenth item read from an ADC::
from gpiozero import M... | After every *repeat_after* items, blocks the next *block* items from
*values*. Note that unlike :func:`pre_periodic_filtered`, *repeat_after*
can't be 0. For example, to block every tenth item read from an ADC::
from gpiozero import MCP3008
from gpiozero.tools import post_periodic_filtered
... |
def has_attr(self, table_name, attr_name):
"""
:param str table_name: Table name that the attribute exists.
:param str attr_name: Attribute name to be tested.
:return: |True| if the table has the attribute.
:rtype: bool
:raises simplesqlite.TableNotFoundError:
... | :param str table_name: Table name that the attribute exists.
:param str attr_name: Attribute name to be tested.
:return: |True| if the table has the attribute.
:rtype: bool
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:Sample Code:
... |
def render_povray(scene, filename='ipython', width=600, height=600,
antialiasing=0.01, extra_opts={}):
'''Render the scene with povray for publication.
:param dict scene: The scene to render
:param string filename: Output filename or 'ipython' to render in the notebook.
:param int wid... | Render the scene with povray for publication.
:param dict scene: The scene to render
:param string filename: Output filename or 'ipython' to render in the notebook.
:param int width: Width in pixels.
:param int height: Height in pixels.
:param dict extra_opts: Dictionary to merge/override with the ... |
def subnetpool_create(request, name, prefixes, **kwargs):
"""Create a subnetpool.
ip_version is auto-detected in back-end.
Parameters:
request -- Request context
name -- Name for subnetpool
prefixes -- List of prefixes for pool
Keyword Arguments (optional):... | Create a subnetpool.
ip_version is auto-detected in back-end.
Parameters:
request -- Request context
name -- Name for subnetpool
prefixes -- List of prefixes for pool
Keyword Arguments (optional):
min_prefixlen -- Minimum prefix length for allocations f... |
def populate_user(self):
"""
Populates the Django user object using the default bind credentials.
"""
user = None
try:
# self.attrs will only be non-None if we were able to load this user
# from the LDAP directory, so this filters out nonexistent users.
... | Populates the Django user object using the default bind credentials. |
def create(cls, statement_format, date_start, date_end,
monetary_account_id=None, regional_format=None,
custom_headers=None):
"""
:type user_id: int
:type monetary_account_id: int
:param statement_format: The format type of statement. Allowed values:
... | :type user_id: int
:type monetary_account_id: int
:param statement_format: The format type of statement. Allowed values:
MT940, CSV, PDF.
:type statement_format: str
:param date_start: The start date for making statements.
:type date_start: str
:param date_end: Th... |
def reaction_signature(eq, direction=False, stoichiometry=False):
"""Return unique signature object for :class:`Reaction`.
Signature objects are hashable, and compare equal only if the reactions
are considered the same according to the specified rules.
Args:
direction: Include reaction directi... | Return unique signature object for :class:`Reaction`.
Signature objects are hashable, and compare equal only if the reactions
are considered the same according to the specified rules.
Args:
direction: Include reaction directionality when considering equality.
stoichiometry: Include stoichi... |
def crop_to_fit(self, image_size, view_size):
"""
Set cropping values in `p:blipFill/a:srcRect` such that an image of
*image_size* will stretch to exactly fit *view_size* when its aspect
ratio is preserved.
"""
self.blipFill.crop(self._fill_cropping(image_size, view_size)... | Set cropping values in `p:blipFill/a:srcRect` such that an image of
*image_size* will stretch to exactly fit *view_size* when its aspect
ratio is preserved. |
def ecs_idsKEGG(organism):
"""
Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism
:param organism: an organisms as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'ec' and 'KEGGid'.
"""
kegg_ec=urlopen("http://rest.kegg.jp/link/"+organism+"/enzyme").read... | Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism
:param organism: an organisms as listed in organismsKEGG()
:returns: a Pandas dataframe of with 'ec' and 'KEGGid'. |
def get_content(self, renderer, data,
accepted_media_type, renderer_context):
"""
Get the content as if it had been rendered by the default
non-documenting renderer.
"""
if not renderer:
return '[No renderers were found]'
renderer_context[... | Get the content as if it had been rendered by the default
non-documenting renderer. |
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can ... | Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. Wh... |
def silenceRemoval(x, fs, st_win, st_step, smoothWindow=0.5, weight=0.5, plot=False):
'''
Event Detection (silence removal)
ARGUMENTS:
- x: the input audio signal
- fs: sampling freq
- st_win, st_step: window size and step in seconds
- smoo... | Event Detection (silence removal)
ARGUMENTS:
- x: the input audio signal
- fs: sampling freq
- st_win, st_step: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- weight: (optinal) weight f... |
def receive_empty(self, message):
"""
Pair ACKs with requests.
:type message: Message
:param message: the received message
:rtype : Transaction
:return: the transaction to which the message belongs to
"""
logger.debug("receive_empty - " + str(message))
... | Pair ACKs with requests.
:type message: Message
:param message: the received message
:rtype : Transaction
:return: the transaction to which the message belongs to |
def post(self, endpoint, **kwargs):
"""Create a resource.
Args:
endpoint: resource endpoint.
"""
return self._request(requests.post, endpoint, **kwargs) | Create a resource.
Args:
endpoint: resource endpoint. |
def get_hotkey_name(names=None):
"""
Returns a string representation of hotkey from the given key names, or
the currently pressed keys if not given. This function:
- normalizes names;
- removes "left" and "right" prefixes;
- replaces the "+" key name with "plus" to avoid ambiguity;
- puts ... | Returns a string representation of hotkey from the given key names, or
the currently pressed keys if not given. This function:
- normalizes names;
- removes "left" and "right" prefixes;
- replaces the "+" key name with "plus" to avoid ambiguity;
- puts modifier keys first, in a standardized order;... |
def run(self):
"""
the main loop
"""
try:
master_process = BackgroundProcess.objects.filter(pk=self.process_id).first()
if master_process:
master_process.last_update = now()
master_process.message = 'init child processes'
... | the main loop |
def write_batch_json(self, content):
"""Write batch json data to a file."""
timestamp = str(time.time()).replace('.', '')
batch_json_file = os.path.join(
self.tcex.args.tc_temp_path, 'batch-{}.json'.format(timestamp)
)
with open(batch_json_file, 'w') as fh:
... | Write batch json data to a file. |
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self._test_channel_read(pattern=r"[>#]")
self.ansi_escape_codes = True
self.set_base_prompt()
self.disable_paging()
# Clear the read buffer
time.sleep(0.3 * self.gl... | Prepare the session after the connection has been established. |
def _load_yaml_config(cls, config_data, filename="(unknown)"):
"""Load a yaml config file."""
try:
config = yaml.safe_load(config_data)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = ("Inval... | Load a yaml config file. |
def filter(self, obj, *args, **kwargs):
"""
Filter the given object through the filter chain.
:param obj: The object to filter
:param args: Additional arguments to pass to each filter function.
:param kwargs: Additional keyword arguments to pass to each filter
... | Filter the given object through the filter chain.
:param obj: The object to filter
:param args: Additional arguments to pass to each filter function.
:param kwargs: Additional keyword arguments to pass to each filter
function.
:return: The filtered object or :data... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.