positive stringlengths 100 30.3k | anchor stringlengths 1 15k |
|---|---|
def get_pod_by_uid(uid, podlist):
"""
Searches for a pod uid in the podlist and returns the pod if found
:param uid: pod uid
:param podlist: podlist dict object
:return: pod dict object if found, None if not found
"""
for pod in podlist.get("items", []):
try:
if pod["meta... | Searches for a pod uid in the podlist and returns the pod if found
:param uid: pod uid
:param podlist: podlist dict object
:return: pod dict object if found, None if not found |
def bind_topic_exchange(self, exchange_name, routing_key, queue_name):
"""
绑定主题交换机和队列
:param exchange_name: 需要绑定的交换机名
:param routing_key:
:param queue_name: 需要绑定的交换机队列名
:return:
"""
self._channel.queue_declare(
queue=queue_name,
aut... | 绑定主题交换机和队列
:param exchange_name: 需要绑定的交换机名
:param routing_key:
:param queue_name: 需要绑定的交换机队列名
:return: |
async def stop(self):
"""
Stops playback from lavalink.
.. important::
This method will clear the queue.
"""
await self.node.stop(self.channel.guild.id)
self.queue = []
self.current = None
self.position = 0
self._paused = False | Stops playback from lavalink.
.. important::
This method will clear the queue. |
def split_comments(comments):
"""Split COMMENTS into flag comments and other comments. Flag
comments are those that begin with '#,', e.g. '#,fuzzy'."""
flags = []
other = []
for c in comments:
if len(c) > 1 and c[1] == ',':
flags.append(c)
else:
othe... | Split COMMENTS into flag comments and other comments. Flag
comments are those that begin with '#,', e.g. '#,fuzzy'. |
def normalizeSequence(sequence):
"""
normalize sequence by subtracting the mean and
:param sequence: a list of data samples
:param considerDimensions: a list of dimensions to consider
:return: normalized sequence
"""
seq = np.array(sequence).astype('float64')
meanSeq = np.mean(seq)
stdSeq = np.std(se... | normalize sequence by subtracting the mean and
:param sequence: a list of data samples
:param considerDimensions: a list of dimensions to consider
:return: normalized sequence |
def _mock_request(self, **kwargs):
"""
A mocked out make_request call that bypasses all network calls
and simply returns any mocked responses defined.
"""
model = kwargs.get('model')
service = model.service_model.endpoint_prefix
operation = model.name
LOG.... | A mocked out make_request call that bypasses all network calls
and simply returns any mocked responses defined. |
def start(self, timeout=None):
"""Install the server on its IOLoop, optionally starting the IOLoop.
Parameters
----------
timeout : float or None, optional
Time in seconds to wait for server thread to start.
"""
if self._running.isSet():
raise Ru... | Install the server on its IOLoop, optionally starting the IOLoop.
Parameters
----------
timeout : float or None, optional
Time in seconds to wait for server thread to start. |
def choose_ancestral_states_mppa(tree, feature, states, force_joint=True):
"""
Chooses node ancestral states based on their marginal probabilities using MPPA method.
:param force_joint: make sure that Joint state is chosen even if it has a low probability.
:type force_joint: bool
:param tree: tree ... | Chooses node ancestral states based on their marginal probabilities using MPPA method.
:param force_joint: make sure that Joint state is chosen even if it has a low probability.
:type force_joint: bool
:param tree: tree of interest
:type tree: ete3.Tree
:param feature: character for which the ances... |
def sniff(count=0, store=1, offline=None, prn = None, lfilter=None, L2socket=None, timeout=None, stop_callback=None, *arg, **karg):
"""Sniff packets
sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args) -> list of packets
Select interface to sniff by setting conf.iface. Use ... | Sniff packets
sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args) -> list of packets
Select interface to sniff by setting conf.iface. Use show_interfaces() to see interface names.
count: number of packets to capture. 0 means infinity
store: wether to store sniffed packets ... |
def norm(x, mu, sigma=1.0):
""" Scipy norm function """
return stats.norm(loc=mu, scale=sigma).pdf(x) | Scipy norm function |
def request(self, method, url, params=None, **aio_kwargs):
"""Make a request to provider."""
oparams = {
'oauth_consumer_key': self.consumer_key,
'oauth_nonce': sha1(str(RANDOM()).encode('ascii')).hexdigest(),
'oauth_signature_method': self.signature.name,
... | Make a request to provider. |
def encode(self):
"""
Just iterate over the child elements and append them to the current element
:return: the encoded element
:rtype: xml.etree.ElementTree.Element
"""
element = ElementTree.Element(
self.name,
attrib={'type': FieldConstants.ARRAY... | Just iterate over the child elements and append them to the current element
:return: the encoded element
:rtype: xml.etree.ElementTree.Element |
def updatewhere(clas,pool_or_cursor,where_keys,**update_keys):
"this doesn't allow raw_keys for now"
# if clas.JSONFIELDS: raise NotImplementedError # todo(awinter): do I need to make the same change for SpecialField?
if not where_keys or not update_keys: raise ValueError
setclause=','.join(k+'=%s' ... | this doesn't allow raw_keys for now |
def set_idlesleep(self, idlesleep):
"""
Sets CPU idle sleep time value.
:param idlesleep: idle sleep value (integer)
"""
is_running = yield from self.is_running()
if is_running: # router is running
yield from self._hypervisor.send('vm set_idle_sleep_time "{... | Sets CPU idle sleep time value.
:param idlesleep: idle sleep value (integer) |
def _check_above_value_float(string, minimum):
"""
Checks that the number parsed from the string is above a minimum.
This is used on compulsory numeric fields.
If the value is not above the minimum an exception is thrown.
:param string: the field value
:param minimum: minimum value
"""
... | Checks that the number parsed from the string is above a minimum.
This is used on compulsory numeric fields.
If the value is not above the minimum an exception is thrown.
:param string: the field value
:param minimum: minimum value |
def generic(self, input_string, **kwargs):
""" return a generic filename for a given dataset and component
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] ... | return a generic filename for a given dataset and component |
def visualize_conv_activations(activation, name):
"""Visualize activations for convolution layers.
Remarks:
This tries to place all activations into a square.
Args:
activation: tensor with the activation [B,H,W,C]
name: label for tensorboard
Returns:
image of almost al... | Visualize activations for convolution layers.
Remarks:
This tries to place all activations into a square.
Args:
activation: tensor with the activation [B,H,W,C]
name: label for tensorboard
Returns:
image of almost all activations |
def parse_xml(self, node):
""" Parse an Object from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
def read_points(text):
"""parse a text string of float tuples and return [(x,...),...]
"""
return tuple(tuple(map(flo... | Parse an Object from ElementTree xml node
:param node: ElementTree xml node
:return: self |
def lfprob (dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5*dfden,... | Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn |
def _compute_surface_areas(self, cell_ids):
"""For each edge, one half of the the edge goes to each of the end
points. Used for Neumann boundary conditions if on the boundary of the
mesh and transition conditions if in the interior.
"""
# Each of the three edges may contribute to... | For each edge, one half of the the edge goes to each of the end
points. Used for Neumann boundary conditions if on the boundary of the
mesh and transition conditions if in the interior. |
def _readintle(self, length, start):
"""Read bits and interpret as a little-endian signed int."""
ui = self._readuintle(length, start)
if not ui >> (length - 1):
# Top bit not set, number is positive
return ui
# Top bit is set, so number is negative
tmp = ... | Read bits and interpret as a little-endian signed int. |
def start(self):
"""
Start trapping WINCH signals and resizing the PTY.
This method saves the previous WINCH handler so it can be restored on
`stop()`.
"""
def handle(signum, frame):
if signum == signal.SIGWINCH:
self.pty.resize()
se... | Start trapping WINCH signals and resizing the PTY.
This method saves the previous WINCH handler so it can be restored on
`stop()`. |
def reassemble(self, blueprint, fields, documents):
"""
Reassemble the given set of fields for a list of pre-assembed documents.
NOTE: Reassembly is done in place, since the data you send the method
should be JSON type safe, if you need to retain the existing document
it is reco... | Reassemble the given set of fields for a list of pre-assembed documents.
NOTE: Reassembly is done in place, since the data you send the method
should be JSON type safe, if you need to retain the existing document
it is recommended that you copy them using `copy.deepcopy`. |
def as_variable(obj, name=None) -> 'Union[Variable, IndexVariable]':
"""Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' a... | Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If... |
def _run_amber(paired, work_dir, lenient=False):
"""AMBER: calculate allele frequencies at likely heterozygous sites.
lenient flag allows amber runs on small test sets.
"""
amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber"))
out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sam... | AMBER: calculate allele frequencies at likely heterozygous sites.
lenient flag allows amber runs on small test sets. |
def bfd(self, **kwargs):
"""Configure BFD for Interface.
Args:
name (str): name of the interface to configure (230/0/1 etc)
int_type (str): interface type (gigabitethernet etc)
tx (str): BFD transmit interval in milliseconds (300, 500, etc)
rx (str): BFD ... | Configure BFD for Interface.
Args:
name (str): name of the interface to configure (230/0/1 etc)
int_type (str): interface type (gigabitethernet etc)
tx (str): BFD transmit interval in milliseconds (300, 500, etc)
rx (str): BFD receive interval in milliseconds (30... |
def UpdateHuntObject(self, hunt_id, start_time=None, **kwargs):
"""Updates the hunt object by applying the update function."""
hunt_obj = self.ReadHuntObject(hunt_id)
delta_suffix = "_delta"
for k, v in kwargs.items():
if v is None:
continue
if k.endswith(delta_suffix):
ke... | Updates the hunt object by applying the update function. |
def _fix_logging_shortcuts(cls):
"""
Fix standard logging shortcuts to correctly report logging module.
This is only useful if you:
- actually use %(name) and care about it being correct
- you would still like to use the logging.info() etc shortcuts
So basically you'd l... | Fix standard logging shortcuts to correctly report logging module.
This is only useful if you:
- actually use %(name) and care about it being correct
- you would still like to use the logging.info() etc shortcuts
So basically you'd like to write this:
import logging
... |
def _save_files(self, data, dtype_out_time):
"""Save the data to netcdf files in direc_out."""
path = self.path_out[dtype_out_time]
if not os.path.isdir(self.dir_out):
os.makedirs(self.dir_out)
if 'reg' in dtype_out_time:
try:
reg_data = xr.open_da... | Save the data to netcdf files in direc_out. |
def calculate_v(nfs):
"""Calculates V(n+1/n) values. Useful for establishing the quality of
your normalization regime. See Vandesompele 2002 for advice on
interpretation.
:param DataFrame nfs: A matrix of all normalization factors, produced by
`calculate_all_nfs`.
:return: a Series of values... | Calculates V(n+1/n) values. Useful for establishing the quality of
your normalization regime. See Vandesompele 2002 for advice on
interpretation.
:param DataFrame nfs: A matrix of all normalization factors, produced by
`calculate_all_nfs`.
:return: a Series of values [V(2/1), V(3/2), V(4/3), ...... |
def from_config(config):
"""
Creates new Parameters from ConfigMap object.
:param config: a ConfigParams that contain parameters.
:return: a new Parameters object.
"""
result = Parameters()
if config == None or len(config) == 0:
return resul... | Creates new Parameters from ConfigMap object.
:param config: a ConfigParams that contain parameters.
:return: a new Parameters object. |
def preprocess(*_unused, **processors):
"""
Decorator that applies pre-processors to the arguments of a function before
calling the function.
Parameters
----------
**processors : dict
Map from argument name -> processor function.
A processor function takes three arguments: (fun... | Decorator that applies pre-processors to the arguments of a function before
calling the function.
Parameters
----------
**processors : dict
Map from argument name -> processor function.
A processor function takes three arguments: (func, argname, argvalue).
`func` is the the fu... |
def script_file(self):
"""
Returns the startup script file for this VPCS VM.
:returns: path to startup script file
"""
# use the default VPCS file if it exists
path = os.path.join(self.working_dir, 'startup.vpc')
if os.path.exists(path):
return path
... | Returns the startup script file for this VPCS VM.
:returns: path to startup script file |
def delete_items(self, url, container, container_object=None):
"""Deletes an objects in a container.
:param url:
:param container:
"""
headers, container_uri = self._return_base_data(
url=url,
container=container,
container_object=container_o... | Deletes an objects in a container.
:param url:
:param container: |
def _add_conflicting_arguments(self):
"""It's too dangerous to use `-y` and `-r` together."""
group = self._parser.add_mutually_exclusive_group()
group.add_argument(
'-y', '--yes', '--yeah',
action='store_true',
help='execute fixed command without confirmation... | It's too dangerous to use `-y` and `-r` together. |
def offering(self):
"""
Deprecated. Use course and run independently.
"""
warnings.warn(
"Offering is no longer a supported property of Locator. Please use the course and run properties.",
DeprecationWarning,
stacklevel=2
)
if not self.... | Deprecated. Use course and run independently. |
def num_nodes(tree):
"""Determine the number of nodes in a tree"""
if tree.is_leaf:
return 1
else:
return 1 + num_nodes(tree.left_child) + num_nodes(tree.right_child) | Determine the number of nodes in a tree |
def reply(self, user, msg, errors_as_replies=True):
"""Fetch a reply from the RiveScript brain.
Arguments:
user (str): A unique user ID for the person requesting a reply.
This could be e.g. a screen name or nickname. It's used internally
to store user variabl... | Fetch a reply from the RiveScript brain.
Arguments:
user (str): A unique user ID for the person requesting a reply.
This could be e.g. a screen name or nickname. It's used internally
to store user variables (including topic and history), so if your
bo... |
def get_file_from_s3(job, s3_url, encryption_key=None, write_to_jobstore=True):
"""
Downloads a supplied URL that points to an unencrypted, unprotected file on Amazon S3. The file
is downloaded and a subsequently written to the jobstore and the return value is a the path to
the file in the jobstore.
... | Downloads a supplied URL that points to an unencrypted, unprotected file on Amazon S3. The file
is downloaded and a subsequently written to the jobstore and the return value is a the path to
the file in the jobstore. |
def write_out(self, output):
"""Banana banana
"""
for page in self.walk():
ext = self.project.extensions[page.extension_name]
ext.write_out_page(output, page) | Banana banana |
def main():
"""
NAME
plot_magmap.py
DESCRIPTION
makes a color contour map of desired field model
SYNTAX
plot_magmap.py [command line options]
OPTIONS
-h prints help and quits
-f FILE specify field model file with format: l m g h
-fmt [pdf,eps,svg,... | NAME
plot_magmap.py
DESCRIPTION
makes a color contour map of desired field model
SYNTAX
plot_magmap.py [command line options]
OPTIONS
-h prints help and quits
-f FILE specify field model file with format: l m g h
-fmt [pdf,eps,svg,png] specify format for... |
def _cache_translation(translation, timeout=cache.default_timeout):
"""
Store a new translation in the cache.
"""
if not appsettings.PARLER_ENABLE_CACHING:
return
if translation.master_id is None:
raise ValueError("Can't cache unsaved translation")
# Cache a translation object.... | Store a new translation in the cache. |
def print_results(cls, stdout, stderr):
"""Print linter results and exits with an error if there's any."""
for line in stderr:
print(line, file=sys.stderr)
if stdout:
if stderr: # blank line to separate stdout from stderr
print(file=sys.stderr)
... | Print linter results and exits with an error if there's any. |
def fuzz_string(seed_str, runs=100, fuzz_factor=50):
"""A random fuzzer for a simulated text viewer application.
It takes a string as seed and generates <runs> variant of it.
:param seed_str: the string to use as seed for fuzzing.
:param runs: number of fuzzed variants to supply.
:param fuzz_facto... | A random fuzzer for a simulated text viewer application.
It takes a string as seed and generates <runs> variant of it.
:param seed_str: the string to use as seed for fuzzing.
:param runs: number of fuzzed variants to supply.
:param fuzz_factor: degree of fuzzing = 1 / fuzz_factor.
:return: list of... |
def trim(self):
"""Clear not used counters"""
for key, value in list(iteritems(self.counters)):
if value.empty():
del self.counters[key] | Clear not used counters |
def command(state, args):
"""Reset anime watched episodes."""
args = parser.parse_args(args[1:])
aid = state.results.parse_aid(args.aid, default_key='db')
query.update.reset(state.db, aid, args.episode) | Reset anime watched episodes. |
def without(self, *values):
"""
Return a version of the array that does not
contain the specified value(s).
"""
if self._clean.isDict():
newlist = {}
for i, k in enumerate(self.obj):
# if k not in values: # use indexof to check identity
... | Return a version of the array that does not
contain the specified value(s). |
def get_changes(self, fixer=str.lower,
task_handle=taskhandle.NullTaskHandle()):
"""Fix module names
`fixer` is a function that takes and returns a `str`. Given
the name of a module, it should return the fixed name.
"""
stack = changestack.ChangeStack(self.... | Fix module names
`fixer` is a function that takes and returns a `str`. Given
the name of a module, it should return the fixed name. |
def get_header_dict(response, header):
""" returns a dictionary of the cache control headers
the same as is used by django.utils.cache.patch_cache_control
if there are no Cache-Control headers returns and empty dict
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
... | returns a dictionary of the cache control headers
the same as is used by django.utils.cache.patch_cache_control
if there are no Cache-Control headers returns and empty dict |
def _backlog(self, data):
"""Find all the datagrepper messages between 'then' and 'now'.
Put those on our work queue.
Should be called in a thread so as not to block the hub at startup.
"""
try:
data = json.loads(data)
except ValueError as e:
se... | Find all the datagrepper messages between 'then' and 'now'.
Put those on our work queue.
Should be called in a thread so as not to block the hub at startup. |
def colorize(bg, base, fg, *text):
""" colorize(bg, base, fg, *text)
"""
# All argument types must be str.
rtext = [str(f) for f in text]
return COLORIZE_FORMAT.format(
_to_int(bg), _to_int(base), _to_int(fg), ''.join(rtext)
) | colorize(bg, base, fg, *text) |
def get_arp_output_arp_entry_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
... | Auto Generated Code |
def verify_hash_type(self):
'''
Verify and display a nag-messsage to the log if vulnerable hash-type is used.
:return:
'''
if self.config['hash_type'].lower() in ['md5', 'sha1']:
log.warning(
'IMPORTANT: Do not use %s hashing algorithm! Please set '
... | Verify and display a nag-messsage to the log if vulnerable hash-type is used.
:return: |
def set_session(self, autocommit=None, readonly=None):
"""Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the current
version, you need to always enable this, because
:meth:`commit` is not imple... | Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the current
version, you need to always enable this, because
:meth:`commit` is not implemented.
:param readonly:
Switch the connectio... |
def peer_list(self):
""" GET /network/peers
Use the Network APIs to retrieve information about the network of peer
nodes comprising the blockchain network.
```golang
message PeersMessage {
repeated PeerEndpoint peers = 1;
}
message PeerEndpoint {
... | GET /network/peers
Use the Network APIs to retrieve information about the network of peer
nodes comprising the blockchain network.
```golang
message PeersMessage {
repeated PeerEndpoint peers = 1;
}
message PeerEndpoint {
PeerID ID = 1;
... |
def run(pipeline, input_gen, options={}):
""" Run a pipeline over a input generator
>>> # if we have a simple component
>>> from reliure.pipeline import Composable
>>> @Composable
... def print_each(letters):
... for letter in letters:
... print(letter)
... yield let... | Run a pipeline over a input generator
>>> # if we have a simple component
>>> from reliure.pipeline import Composable
>>> @Composable
... def print_each(letters):
... for letter in letters:
... print(letter)
... yield letter
>>> # that we want to run over a given inp... |
def compile(self, compass):
"""
Calls the compass script specified in the compass extension
with the paths provided by the config.rb.
"""
try:
output = subprocess.check_output(
[compass.compass_path, 'compile', '-q'],
cwd=self.b... | Calls the compass script specified in the compass extension
with the paths provided by the config.rb. |
def match(self, path, **kw):
'''
path - str (urlencoded)
'''
m = self._pattern.match(path)
if m:
kwargs = m.groupdict()
# convert params
for url_arg_name, value_urlencoded in kwargs.items():
conv_obj = self._url_params[url_arg_n... | path - str (urlencoded) |
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
... | Serialize an element and its child nodes to a string |
def UserAcceptance(
matchList,
recursiveLookup = True,
promptComment = None,
promptOnly = False,
xStrOverride = "to skip this selection"
):
"""
Prompt user to select a entry from a given match list or to enter a new
string to look up. If the match list is empty user must enter a new string
or exit.
... | Prompt user to select a entry from a given match list or to enter a new
string to look up. If the match list is empty user must enter a new string
or exit.
Parameters
----------
matchList : list
A list of entries which the user can select a valid match from.
recursiveLookup : boolean [optional: ... |
def moresane_by_scale(self, start_scale=1, stop_scale=20, subregion=None, sigma_level=4, loop_gain=0.1,
tolerance=0.75, accuracy=1e-6, major_loop_miter=100, minor_loop_miter=30, all_on_gpu=False,
decom_mode="ser", core_count=1, conv_device='cpu', conv_mode='linear', e... | Extension of the MORESANE algorithm. This takes a scale-by-scale approach, attempting to remove all sources
at the lower scales before moving onto the higher ones. At each step the algorithm may return to previous
scales to remove the sources uncovered by the deconvolution.
INPUTS:
star... |
def validate(self):
"""Could this config be used to send a real email?"""
missing = []
for k, v in self._map.items():
attr = getattr(self, k, False)
if not attr or attr == CONFIG_PLACEHOLDER:
missing.append(v)
if missing:
return "Missin... | Could this config be used to send a real email? |
def update_account_api_key(self, account_id, api_key, body, **kwargs): # noqa: E501
"""Update API key details. # noqa: E501
An endpoint for updating API key details. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apiKey} -d '{\"name\": \"TestAp... | Update API key details. # noqa: E501
An endpoint for updating API key details. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apiKey} -d '{\"name\": \"TestApiKey25\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501... |
def add_transformers(line):
'''Extract the transformers names from a line of code of the form
from __experimental__ import transformer1 [,...]
and adds them to the globally known dict
'''
assert FROM_EXPERIMENTAL.match(line)
line = FROM_EXPERIMENTAL.sub(' ', line)
# we now have: " tra... | Extract the transformers names from a line of code of the form
from __experimental__ import transformer1 [,...]
and adds them to the globally known dict |
def create(self, name, sources=None, destinations=None,
services=None, action='allow', log_options=None,
authentication_options=None, connection_tracking=None,
is_disabled=False, vpn_policy=None, mobile_vpn=False,
add_pos=None, after=None, before=None,
... | Create a layer 3 firewall rule
:param str name: name of rule
:param sources: source/s for rule
:type sources: list[str, Element]
:param destinations: destination/s for rule
:type destinations: list[str, Element]
:param services: service/s for rule
:type services:... |
def smooth(self):
"""
Read/write boolean specifying whether to use curve smoothing to
form the line connecting the data points in this series into
a continuous curve. If |False|, a series of straight line segments
are used to connect the points.
"""
smooth = self.... | Read/write boolean specifying whether to use curve smoothing to
form the line connecting the data points in this series into
a continuous curve. If |False|, a series of straight line segments
are used to connect the points. |
def outcomes(self, outcomes):
""" Setter for _outcomes field
See property.
:param dict outcomes: Dictionary outcomes[outcome_id] that maps :class:`int` outcome_ids onto values of type
:class:`rafcon.core.state_elements.logical_port.Outcome`
:raises excepti... | Setter for _outcomes field
See property.
:param dict outcomes: Dictionary outcomes[outcome_id] that maps :class:`int` outcome_ids onto values of type
:class:`rafcon.core.state_elements.logical_port.Outcome`
:raises exceptions.TypeError: if outcomes parameter has t... |
def get_key(self, section, key):
"""
Gets key value from settings file.
:param section: Current section to retrieve key from.
:type section: unicode
:param key: Current key to retrieve.
:type key: unicode
:return: Current key value.
:rtype: object
... | Gets key value from settings file.
:param section: Current section to retrieve key from.
:type section: unicode
:param key: Current key to retrieve.
:type key: unicode
:return: Current key value.
:rtype: object |
def check_proxy_setting():
"""
If the environmental variable 'HTTP_PROXY' is set, it will most likely be
in one of these forms:
proxyhost:8080
http://proxyhost:8080
urlllib2 requires the proxy URL to start with 'http://'
This routine does that, and returns the transport for xml... | If the environmental variable 'HTTP_PROXY' is set, it will most likely be
in one of these forms:
proxyhost:8080
http://proxyhost:8080
urlllib2 requires the proxy URL to start with 'http://'
This routine does that, and returns the transport for xmlrpc. |
def set(self, key, value):
"""
Saves the input with the given key in the section that was
passed to the constructor. If either the section or the key
are not found, they are created.
Does nothing if the given value is None.
:type key: str
:param key: The key fo... | Saves the input with the given key in the section that was
passed to the constructor. If either the section or the key
are not found, they are created.
Does nothing if the given value is None.
:type key: str
:param key: The key for which to define a value.
:type value... |
def subspace_index(self, little_endian_bits_int: int
) -> Tuple[Union[slice, int, 'ellipsis'], ...]:
"""An index for the subspace where the target axes equal a value.
Args:
little_endian_bits_int: The desired value of the qubits at the
targeted `axes`,... | An index for the subspace where the target axes equal a value.
Args:
little_endian_bits_int: The desired value of the qubits at the
targeted `axes`, packed into an integer. The least significant
bit of the integer is the desired bit for the first axis, and
... |
def multiply(a, b, prim=0x11b, field_charac_full=256, carryless=True):
'''A slow multiply method. This method gives the same results as the
other __mul__ method but without needing precomputed tables,
thus it can be used to generate those tables.
If prim is set to 0 and carryless=False,... | A slow multiply method. This method gives the same results as the
other __mul__ method but without needing precomputed tables,
thus it can be used to generate those tables.
If prim is set to 0 and carryless=False, the function produces the result of a standard multiplication of integers (outsid... |
def get_step_f(step_f, lR2, lS2):
"""Update the stepsize of given the primal and dual errors.
See Boyd (2011), section 3.4.1
"""
mu, tau = 10, 2
if lR2 > mu*lS2:
return step_f * tau
elif lS2 > mu*lR2:
return step_f / tau
return step_f | Update the stepsize of given the primal and dual errors.
See Boyd (2011), section 3.4.1 |
def elbv2_load_balancer_arn_suffix(self, lookup, default=None):
"""
Args:
lookup: the friendly name of the v2 elb to look up
default: value to return in case of no match
Returns:
The shorthand fragment of the ALB's ARN, of the form `app/*/*`
"""
try:
elb = self._elbv2_load_ba... | Args:
lookup: the friendly name of the v2 elb to look up
default: value to return in case of no match
Returns:
The shorthand fragment of the ALB's ARN, of the form `app/*/*` |
def missing_optional_tagfiles(self):
"""
From v0.97 we need to validate any tagfiles listed
in the optional tagmanifest(s). As there is no mandatory
directory structure for additional tagfiles we can
only check for entries with missing files (not missing
entries for exist... | From v0.97 we need to validate any tagfiles listed
in the optional tagmanifest(s). As there is no mandatory
directory structure for additional tagfiles we can
only check for entries with missing files (not missing
entries for existing files). |
def schemaless_reader(fo, writer_schema, reader_schema=None):
"""Reads a single record writen using the
:meth:`~fastavro._write_py.schemaless_writer`
Parameters
----------
fo: file-like
Input stream
writer_schema: dict
Schema used when calling schemaless_writer
reader_schema... | Reads a single record writen using the
:meth:`~fastavro._write_py.schemaless_writer`
Parameters
----------
fo: file-like
Input stream
writer_schema: dict
Schema used when calling schemaless_writer
reader_schema: dict, optional
If the schema has changed since being writte... |
def add_binding(self, *keys, **kwargs):
"""
Decorator for annotating key bindings.
:param filter: :class:`~prompt_toolkit.filters.CLIFilter` to determine
when this key binding is active.
:param eager: :class:`~prompt_toolkit.filters.CLIFilter` or `bool`.
When Tru... | Decorator for annotating key bindings.
:param filter: :class:`~prompt_toolkit.filters.CLIFilter` to determine
when this key binding is active.
:param eager: :class:`~prompt_toolkit.filters.CLIFilter` or `bool`.
When True, ignore potential longer matches when this key binding is
... |
def exists(name, **kwargs):
'''
Check if a ZFS filesystem or volume or snapshot exists.
name : string
name of dataset
type : string
also check if dataset is of a certain type, valid choices are:
filesystem, snapshot, volume, bookmark, or all.
.. versionadded:: 2015.5.0
... | Check if a ZFS filesystem or volume or snapshot exists.
name : string
name of dataset
type : string
also check if dataset is of a certain type, valid choices are:
filesystem, snapshot, volume, bookmark, or all.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
... |
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inert... | Compute inertia with cosine distance using known labels. |
def progress_str(max_val, lbl='Progress: ', repl=False, approx=False,
backspace=PROGGRESS_BACKSPACE):
r""" makes format string that prints progress: %Xd/MAX_VAL with backspaces
NOTE: \r can be used instead of backspaces. This function is not very
relevant because of that.
"""
# st... | r""" makes format string that prints progress: %Xd/MAX_VAL with backspaces
NOTE: \r can be used instead of backspaces. This function is not very
relevant because of that. |
def run_query(db, query):
'''
Run SQL query and return result
CLI Example:
.. code-block:: bash
salt '*' oracle.run_query my_db "select * from my_table"
'''
if db in [x.keys()[0] for x in show_dbs()]:
conn = _connect(show_dbs(db)[db]['uri'])
else:
log.debug('No uri... | Run SQL query and return result
CLI Example:
.. code-block:: bash
salt '*' oracle.run_query my_db "select * from my_table" |
def encode_record_with_schema_id(self, schema_id, record, is_key=False):
"""
Encode a record with a given schema id. The record must
be a python dictionary.
:param int schema_id: integer ID
:param dict record: An object to serialize
:param bool is_key: If the record is a... | Encode a record with a given schema id. The record must
be a python dictionary.
:param int schema_id: integer ID
:param dict record: An object to serialize
:param bool is_key: If the record is a key
:returns: decoder function
:rtype: func |
def find_close_value(self, LIST, value):
'''
take a LIST and find the nearest value in LIST to 'value'
'''
diff = inf
for a in LIST:
if abs(value - a) < diff:
diff = abs(value - a)
result = a
return(result) | take a LIST and find the nearest value in LIST to 'value' |
def create_job(cpu_width, time_height):
"""
:param cpu_width: number of cpus
:param time_height: amount of time
:return: the instantiated JobBlock object
"""
shell_command = stress_string.format(cpu_width, time_height)
job = JobBlock(cpu_width, time_height)
job.set_job(subprocess.call, ... | :param cpu_width: number of cpus
:param time_height: amount of time
:return: the instantiated JobBlock object |
def wait_locally(self):
"""If you have run the query in a non-blocking way, call this method to pause
until the query is finished."""
try: self.thread.join(sys.maxint) # maxint timeout so that we can Ctrl-C them
except KeyboardInterrupt: print "Stopped waiting on job '%s'" % self.kwargs[... | If you have run the query in a non-blocking way, call this method to pause
until the query is finished. |
def putData(self,data=None,exten=None):
""" Now that we are removing the data from the object to save memory,
we need something that cleanly puts the data array back into
the object so that we can write out everything together using
something like fits.writeto....this method... | Now that we are removing the data from the object to save memory,
we need something that cleanly puts the data array back into
the object so that we can write out everything together using
something like fits.writeto....this method is an attempt to
make sure that when yo... |
def kasten96_lt(airmass_absolute, precipitable_water, aod_bb):
"""
Calculate Linke turbidity factor using Kasten pyrheliometric formula.
Note that broadband aerosol optical depth (AOD) can be approximated by AOD
measured at 700 nm according to Molineaux [4] . Bird and Hulstrom offer an
alternate ap... | Calculate Linke turbidity factor using Kasten pyrheliometric formula.
Note that broadband aerosol optical depth (AOD) can be approximated by AOD
measured at 700 nm according to Molineaux [4] . Bird and Hulstrom offer an
alternate approximation using AOD measured at 380 nm and 500 nm.
Based on original... |
def Rz_to_lambdanu_jac(R,z,Delta=1.):
"""
NAME:
Rz_to_lambdanu_jac
PURPOSE:
calculate the Jacobian of the cylindrical (R,z) to prolate spheroidal
(lambda,nu) conversion
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
Delta - foca... | NAME:
Rz_to_lambdanu_jac
PURPOSE:
calculate the Jacobian of the cylindrical (R,z) to prolate spheroidal
(lambda,nu) conversion
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
Delta - focal distance that defines the spheroidal coordinate ... |
def map_trips(
feed: "Feed",
trip_ids: List[str],
color_palette: List[str] = cs.COLORS_SET2,
*,
include_stops: bool = True,
):
"""
Return a Folium map showing the given trips and (optionally)
their stops.
Parameters
----------
feed : Feed
trip_ids : list
IDs of t... | Return a Folium map showing the given trips and (optionally)
their stops.
Parameters
----------
feed : Feed
trip_ids : list
IDs of trips in ``feed.trips``
color_palette : list
Palette to use to color the routes. If more routes than colors,
then colors will be recycled.
... |
def image_get(fingerprint,
remote_addr=None,
cert=None,
key=None,
verify_cert=True,
_raw=False):
''' Get an image by its fingerprint
fingerprint :
The fingerprint of the image to retrieve
remote_addr :
An... | Get an image by its fingerprint
fingerprint :
The fingerprint of the image to retrieve
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserv... |
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
if hasattr(self, "dtype"):
return pandas.Series({str(self.dtype): 1})
result = self.dtypes.value_counts()
result.ind... | Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object. |
def track_to_ref(track, with_track_no=False):
"""Convert a mopidy track to a mopidy ref."""
if with_track_no and track.track_no > 0:
name = '%d - ' % track.track_no
else:
name = ''
for artist in track.artists:
if len(name) > 0:
name += ', '
name += artist.name... | Convert a mopidy track to a mopidy ref. |
def getvector(d,s):
'''
Get a vector flds data.
Parameters:
-----------
d -- flds data.
s -- key for the data.
'''
return np.array([d[s+"x"],d[s+"y"],d[s+"z"]]); | Get a vector flds data.
Parameters:
-----------
d -- flds data.
s -- key for the data. |
def _EncodeString(self, string):
"""Encodes a string in the preferred encoding.
Returns:
bytes: encoded string.
"""
try:
# Note that encode() will first convert string into a Unicode string
# if necessary.
encoded_string = string.encode(
self._preferred_encoding, error... | Encodes a string in the preferred encoding.
Returns:
bytes: encoded string. |
def to_flags(value):
"""Return a (flags, ednsflags) tuple which encodes the rcode.
@param value: the rcode
@type value: int
@raises ValueError: rcode is < 0 or > 4095
@rtype: (int, int) tuple
"""
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
v... | Return a (flags, ednsflags) tuple which encodes the rcode.
@param value: the rcode
@type value: int
@raises ValueError: rcode is < 0 or > 4095
@rtype: (int, int) tuple |
def _add_line_segment(self, x, y):
"""Add a |_LineSegment| operation to the drawing sequence."""
self._drawing_operations.append(_LineSegment.new(self, x, y)) | Add a |_LineSegment| operation to the drawing sequence. |
def mk_auth_token(self, account, admin=False, duration=0):
""" Builds an authentification token, using preauth mechanism.
See http://wiki.zimbra.com/wiki/Preauth
:param duration: in seconds defaults to 0, which means "use account
default"
:param account: an account obje... | Builds an authentification token, using preauth mechanism.
See http://wiki.zimbra.com/wiki/Preauth
:param duration: in seconds defaults to 0, which means "use account
default"
:param account: an account object to be used as a selector
:returns: the auth string |
def past(self, rev=None):
"""Return a Mapping of items at or before the given revision.
Default revision is the last one looked up.
"""
if rev is not None:
self.seek(rev)
return WindowDictPastView(self._past) | Return a Mapping of items at or before the given revision.
Default revision is the last one looked up. |
def _get_version():
'''
Get the xbps version
'''
version_string = __salt__['cmd.run'](
[_check_xbps(), '--version'],
output_loglevel='trace')
if version_string is None:
# Dunno why it would, but...
return False
VERSION_MATCH = re.compile(r'(?:XBPS:[\s]+)([\d.]+)(... | Get the xbps version |
def verify_oauth_token_and_set_current_user():
"""Verify OAuth token and set current user on request stack.
This function should be used **only** on REST application.
.. code-block:: python
app.before_request(verify_oauth_token_and_set_current_user)
"""
for func in oauth2._before_request_... | Verify OAuth token and set current user on request stack.
This function should be used **only** on REST application.
.. code-block:: python
app.before_request(verify_oauth_token_and_set_current_user) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.