code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def serial(self):
asnint = libcrypto.X509_get_serialNumber(self.cert)
bio = Membio()
libcrypto.i2a_ASN1_INTEGER(bio.bio, asnint)
return int(str(bio), 16) | Serial number of certificate as integer |
def list_opts():
for mod in load_conf_modules():
mod_opts = mod.list_opts()
if type(mod_opts) is list:
for single_mod_opts in mod_opts:
yield single_mod_opts[0], single_mod_opts[1]
else:
yield mod_opts[0], mod_opts[1] | List all conf modules opts.
Goes through all conf modules and yields their opts. |
def open(self, path, delimiter=None, mode='r', buffering=-1, encoding=None, errors=None,
newline=None):
if not re.match('^[rbt]{1,3}$', mode):
raise ValueError('mode argument must be only have r, b, and t')
file_open = get_read_function(path, self.disable_compression)
fi... | Reads and parses input files as defined.
If delimiter is not None, then the file is read in bulk then split on it. If it is None
(the default), then the file is parsed as sequence of lines. The rest of the options are
passed directly to builtins.open with the exception that write/append file mo... |
def source(self):
if len(self.sources) == 0:
raise ValueError("No source associated with %s" % self.__class__.__name__)
elif len(self.sources) > 1:
raise ValueError("Multiple sources for %s" % self.__class__.__name__)
return list(self.sources)[0] | Returns the single source name for a variant collection if it is unique,
otherwise raises an error. |
def json_options_to_metadata(options, add_brackets=True):
try:
options = loads('{' + options + '}' if add_brackets else options)
return options
except ValueError:
return {} | Read metadata from its json representation |
def mixerfields(data, commdct):
objkey = "Connector:Mixer".upper()
fieldlists = splittermixerfieldlists(data, commdct, objkey)
return extractfields(data, commdct, objkey, fieldlists) | get mixer fields to diagram it |
def _preprocess(project_dict):
handlers = {
('archive',): _list_if_none,
('on-run-start',): _list_if_none_or_string,
('on-run-end',): _list_if_none_or_string,
}
for k in ('models', 'seeds'):
handlers[(k,)] = _dict_if_none
handlers[(k, '... | Pre-process certain special keys to convert them from None values
into empty containers, and to turn strings into arrays of strings. |
def default_depart(self, mdnode):
if mdnode.is_container():
fn_name = 'visit_{0}'.format(mdnode.t)
if not hasattr(self, fn_name):
warn("Container node skipped: type={0}".format(mdnode.t))
else:
self.current_node = self.current_node.parent | Default node depart handler
If there is a matching ``visit_<type>`` method for a container node,
then we should make sure to back up to it's parent element when the node
is exited. |
def delete_key(self, key_to_delete):
log = logging.getLogger(self.cls_logger + '.delete_key')
log.info('Attempting to delete key: {k}'.format(k=key_to_delete))
try:
self.s3client.delete_object(Bucket=self.bucket_name, Key=key_to_delete)
except ClientError:
_, ex, ... | Deletes the specified key
:param key_to_delete:
:return: |
def _cursor_forward(self, count=1):
self.x = min(self.size[1] - 1, self.x + count) | Moves cursor right count columns. Cursor stops at right margin. |
def _byte_pad(data, bound=4):
bound = int(bound)
if len(data) % bound != 0:
pad = bytes(bound - (len(data) % bound))
result = bytes().join([data, pad])
assert (len(result) % bound) == 0
return result
return data | GLTF wants chunks aligned with 4- byte boundaries
so this function will add padding to the end of a
chunk of bytes so that it aligns with a specified
boundary size
Parameters
--------------
data : bytes
Data to be padded
bound : int
Length of desired boundary
Returns
--... |
def select_from_drop_down_by_text(self, drop_down_locator, option_locator, option_text, params=None):
self.click(drop_down_locator, params['drop_down'] if params else None)
for option in self.get_present_elements(option_locator, params['option'] if params else None):
if self.get_text(option)... | Select option from drop down widget using text.
:param drop_down_locator: locator tuple (if any, params needs to be in place) or WebElement instance
:param option_locator: locator tuple (if any, params needs to be in place)
:param option_text: text to base option selection on
:param par... |
def deploy_clone_from_vm(self, si, logger, data_holder, vcenter_data_model, reservation_id, cancellation_context):
template_resource_model = data_holder.template_resource_model
return self._deploy_a_clone(si,
logger,
data_holder.app... | deploy Cloned VM From VM Command, will deploy vm from another vm
:param cancellation_context:
:param reservation_id:
:param si:
:param logger:
:type data_holder:
:type vcenter_data_model:
:rtype DeployAppResult:
:return: |
def subscriber_choice_control(self):
self.current.task_data['option'] = None
self.current.task_data['chosen_subscribers'], names = self.return_selected_form_items(
self.input['form']['SubscriberList'])
self.current.task_data[
'msg'] = "You should choose at least one subsc... | It controls subscribers choice and generates
error message if there is a non-choice. |
def create_conversion_event(self, event_key, user_id, attributes, event_tags):
params = self._get_common_params(user_id, attributes)
conversion_params = self._get_required_params_for_conversion(event_key, event_tags)
params[self.EventParams.USERS][0][self.EventParams.SNAPSHOTS].append(conversion_params)
... | Create conversion Event to be sent to the logging endpoint.
Args:
event_key: Key representing the event which needs to be recorded.
user_id: ID for user.
attributes: Dict representing user attributes and values.
event_tags: Dict representing metadata associated with the event.
Returns:... |
def show(cobertura_file, format, output, source, source_prefix):
cobertura = Cobertura(cobertura_file, source=source)
Reporter = reporters[format]
reporter = Reporter(cobertura)
report = reporter.generate()
if not isinstance(report, bytes):
report = report.encode('utf-8')
isatty = True i... | show coverage summary of a Cobertura report |
def ping(self):
msg_code = riak.pb.messages.MSG_CODE_PING_REQ
codec = self._get_codec(msg_code)
msg = codec.encode_ping()
resp_code, _ = self._request(msg, codec)
if resp_code == riak.pb.messages.MSG_CODE_PING_RESP:
return True
else:
return False | Ping the remote server |
def _show_previous_blank_lines(block):
pblock = block.previous()
while (pblock.text().strip() == '' and
pblock.blockNumber() >= 0):
pblock.setVisible(True)
pblock = pblock.previous() | Show the block previous blank lines |
def set_time_zone(self, item):
i3s_time = item["full_text"].encode("UTF-8", "replace")
try:
i3s_time = i3s_time.decode()
except:
pass
parts = i3s_time.split()
i3s_datetime = " ".join(parts[:2])
if len(parts) < 3:
return True
els... | Work out the time zone and create a shim tzinfo.
We return True if all is good or False if there was an issue and we
need to re check the time zone. see issue #1375 |
def join_resource_name(self, v):
d = self.dict
d['fragment'] = [v, None]
return MetapackResourceUrl(downloader=self._downloader, **d) | Return a MetapackResourceUrl that includes a reference to the resource. Returns a
MetapackResourceUrl, which will have a fragment |
def import_from_netcdf(network, path, skip_time=False):
assert has_xarray, "xarray must be installed for netCDF support."
basename = os.path.basename(path) if isinstance(path, string_types) else None
with ImporterNetCDF(path=path) as importer:
_import_from_importer(network, importer, basename=basena... | Import network data from netCDF file or xarray Dataset at `path`.
Parameters
----------
path : string|xr.Dataset
Path to netCDF dataset or instance of xarray Dataset
skip_time : bool, default False
Skip reading in time dependent attributes |
def _cache_loc(self, path, saltenv='base', cachedir=None):
cachedir = self.get_cachedir(cachedir)
dest = salt.utils.path.join(cachedir,
'files',
saltenv,
path)
destdir = os.path.dirname(de... | Return the local location to cache the file, cache dirs will be made |
def import_legislators(src):
logger.info("Importing Legislators From: {0}".format(src))
current = pd.read_csv("{0}/{1}/legislators-current.csv".format(
src, LEGISLATOR_DIR))
historic = pd.read_csv("{0}/{1}/legislators-historic.csv".format(
src, LEGISLATOR_DIR))
legislators = current.appe... | Read the legislators from the csv files into a single Dataframe. Intended
for importing new data. |
def training_set_multiplication(training_set, mult_queue):
logging.info("Multiply data...")
for algorithm in mult_queue:
new_trning_set = []
for recording in training_set:
samples = algorithm(recording['handwriting'])
for sample in samples:
new_trning_set.... | Multiply the training set by all methods listed in mult_queue.
Parameters
----------
training_set :
set of all recordings that will be used for training
mult_queue :
list of all algorithms that will take one recording and generate more
than one.
Returns
-------
mutl... |
def create_unique_wcsname(fimg, extnum, wcsname):
wnames = list(wcsutil.altwcs.wcsnames(fimg, ext=extnum).values())
if wcsname not in wnames:
uniqname = wcsname
else:
rpatt = re.compile(wcsname+'_\d')
index = 0
for wname in wnames:
rmatch = rpatt.match(wname)
... | This function evaluates whether the specified wcsname value has
already been used in this image. If so, it automatically modifies
the name with a simple version ID using wcsname_NNN format.
Parameters
----------
fimg : obj
PyFITS object of image with WCS information to be updated
extn... |
def unpack(s):
header = IRHeader(*struct.unpack(_IR_FORMAT, s[:_IR_SIZE]))
s = s[_IR_SIZE:]
if header.flag > 0:
header = header._replace(label=np.frombuffer(s, np.float32, header.flag))
s = s[header.flag*4:]
return header, s | Unpack a MXImageRecord to string.
Parameters
----------
s : str
String buffer from ``MXRecordIO.read``.
Returns
-------
header : IRHeader
Header of the image record.
s : str
Unpacked string.
Examples
--------
>>> record = mx.recordio.MXRecordIO('test.re... |
def help_heading(self):
message = m.Heading(
tr('Help for {step_name}').format(step_name=self.step_name),
**SUBSECTION_STYLE)
return message | Helper method that returns just the header.
:returns: A heading object.
:rtype: safe.messaging.heading.Heading |
def set_win_wallpaper(img):
if "x86" in os.environ["PROGRAMFILES"]:
ctypes.windll.user32.SystemParametersInfoW(20, 0, img, 3)
else:
ctypes.windll.user32.SystemParametersInfoA(20, 0, img, 3) | Set the wallpaper on Windows. |
def refresh_fqdn_cache(force=False):
if not isinstance(force, bool):
raise CommandExecutionError("Force option must be boolean.")
if force:
query = {'type': 'op',
'cmd': '<request><system><fqdn><refresh><force>yes</force></refresh></fqdn></system></request>'}
else:
q... | Force refreshes all FQDNs used in rules.
force
Forces all fqdn refresh
CLI Example:
.. code-block:: bash
salt '*' panos.refresh_fqdn_cache
salt '*' panos.refresh_fqdn_cache force=True |
def mbar_log_W_nk(u_kn, N_k, f_k):
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
logW = f_k - u_kn.T - log_denominator_n[:, np.newaxis]
return logW | Calculate the log weight matrix.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndar... |
def get_details(app_url=defaults.APP_URL):
url = '%s/environment' % app_url
response = requests.get(url)
if response.status_code == 200:
return response.json()
else:
raise JutException('Unable to retrieve environment details from %s, got %s: %s' %
(url, respons... | returns environment details for the app url specified |
def scheme(name, bins, bin_method='quantiles'):
return {
'name': name,
'bins': bins,
'bin_method': (bin_method if isinstance(bins, int) else ''),
} | Return a custom scheme based on CARTOColors.
Args:
name (str): Name of a CARTOColor.
bins (int or iterable): If an `int`, the number of bins for classifying
data. CARTOColors have 7 bins max for quantitative data, and 11 max
for qualitative data. If `bins` is a `list`, it is the... |
def stream_directory(directory,
recursive=False,
patterns='**',
chunk_size=default_chunk_size):
stream = DirectoryStream(directory,
recursive=recursive,
patterns=patterns,
... | Gets a buffered generator for streaming directories.
Returns a buffered generator which encodes a directory as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
directory : str
The filepath of the directory to stream
recursive : bool
Stream a... |
def get_layer_description_from_canvas(self, layer, purpose):
if not layer:
return ""
try:
keywords = self.keyword_io.read_keywords(layer)
if 'layer_purpose' not in keywords:
keywords = None
except (HashNotFoundError,
Operational... | Obtain the description of a canvas layer selected by user.
:param layer: The QGIS layer.
:type layer: QgsMapLayer
:param purpose: The layer purpose of the layer to get the description.
:type purpose: string
:returns: description of the selected layer.
:rtype: string |
def transition(trname='', field='', check=None, before=None, after=None):
if is_callable(trname):
raise ValueError(
"The @transition decorator should be called as "
"@transition(['transition_name'], **kwargs)")
if check or before or after:
warnings.warn(
"The ... | Decorator to declare a function as a transition implementation. |
def _show(self, message, indent=0, enable_verbose=True):
if enable_verbose:
print(" " * indent + message) | Message printer. |
def move_to(x, y):
for b in _button_state:
if _button_state[b]:
e = Quartz.CGEventCreateMouseEvent(
None,
_button_mapping[b][3],
(x, y),
_button_mapping[b][0])
break
else:
e = Quartz.CGEventCreateMouseEvent(
... | Sets the mouse's location to the specified coordinates. |
def _group(self, group_data):
if isinstance(group_data, dict):
xid = group_data.get('xid')
else:
xid = group_data.xid
if self.groups.get(xid) is not None:
group_data = self.groups.get(xid)
elif self.groups_shelf.get(xid) is not None:
group_... | Return previously stored group or new group.
Args:
group_data (dict|obj): An Group dict or instance of Group object.
Returns:
dict|obj: The new Group dict/object or the previously stored dict/object. |
def format_variable_map(variable_map, join_lines=True):
rows = []
rows.append(("Key", "Variable", "Shape", "Type", "Collections", "Device"))
var_to_collections = _get_vars_to_collections(variable_map)
sort_key = lambda item: (item[0], item[1].name)
for key, var in sorted(variable_map_items(variable_map), key=... | Takes a key-to-variable map and formats it as a table. |
def reset_option(self, key, subkey):
if not self.open:
return
key, subkey = _lower_keys(key, subkey)
_entry_must_exist(self.gc, key, subkey)
df = self.gc[(self.gc["k1"] == key) & (self.gc["k2"] == subkey)]
if df["locked"].values[0]:
raise ValueError("{0}.{... | Resets a single option to the default values.
:param str key: First identifier of the option.
:param str subkey: Second identifier of the option.
:raise:
:NotRegisteredError: If ``key`` or ``subkey`` do not define any
option.
:ValueError: If the targeted... |
def _check_split_list_validity(self):
if not (hasattr(self,"_splitListsSet") and (self._splitListsSet)):
return False
elif len(self) != self._splitListsLength:
return False
else:
return True | See _temporal_split_list above. This function checks if the current
split lists are still valid. |
def placeholdit(
width,
height,
background_color="cccccc",
text_color="969696",
text=None,
random_background_color=False
):
url = get_placeholdit_url(
width,
height,
background_color=background_color,
text_color=text_color,
text=text,
)
return ... | Creates a placeholder image using placehold.it
Usage format:
{% placeholdit [width] [height] [background_color] [text_color] [text] %}
Example usage:
Default image at 250 square
{% placeholdit 250 %}
100 wide and 200 high
{% placeholdit 100 200 %}
Custom backg... |
def do_state_tomography(preparation_program, nsamples, cxn, qubits=None, use_run=False):
return tomography._do_tomography(preparation_program, nsamples, cxn, qubits,
tomography.MAX_QUBITS_STATE_TOMO,
StateTomography, state_tomography_programs... | Method to perform both a QPU and QVM state tomography, and use the latter as
as reference to calculate the fidelity of the former.
:param Program preparation_program: Program to execute.
:param int nsamples: Number of samples to take for the program.
:param QVMConnection|QPUConnection cxn: Connection o... |
def finalize(self):
if self.total_instances > 1:
print('{} of {} instances contained dead code.'
.format(self.dead_code_instances, self.total_instances)) | Output the number of instances that contained dead code. |
def max_version(self):
data = self.version_downloads
if not data:
return None, 0
return max(data.items(), key=lambda item: item[1]) | Version with the most downloads.
:return: A tuple of the form (version, n_downloads) |
def ReadTriggers(self, collection_link, options=None):
if options is None:
options = {}
return self.QueryTriggers(collection_link, None, options) | Reads all triggers in a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
Query Iterable of Triggers.
:rtype:
query_iterable.QueryIterable |
def define_saver(exclude=None):
variables = []
exclude = exclude or []
exclude = [re.compile(regex) for regex in exclude]
for variable in tf.global_variables():
if any(regex.match(variable.name) for regex in exclude):
continue
variables.append(variable)
saver = tf.train.Saver(variables, keep_che... | Create a saver for the variables we want to checkpoint.
Args:
exclude: List of regexes to match variable names to exclude.
Returns:
Saver object. |
def _add_coverage_bedgraph_to_output(out, data):
out_file = "%s.coverage.bedgraph" % os.path.splitext(out["cns"])[0]
if utils.file_exists(out_file):
out["bedgraph"] = out_file
return out
bam_file = dd.get_align_bam(data)
bedtools = config_utils.get_program("bedtools", data["config"])
... | Add BedGraph representation of coverage to the output |
def in_timezone(self, tz):
tz = pendulum._safe_timezone(tz)
return tz.convert(self, dst_rule=pendulum.POST_TRANSITION) | Set the instance's timezone from a string or object. |
def basic_parse(response, buf_size=ijson.backend.BUFSIZE):
lexer = iter(IncrementalJsonParser.lexer(response, buf_size))
for value in ijson.backend.parse_value(lexer):
yield value
try:
next(lexer)
except StopIteration:
pass
else:
ra... | Iterator yielding unprefixed events.
Parameters:
- response: a stream response from requests |
def _left_zero_blocks(self, r):
if not self._include_off_diagonal:
return r
elif not self._upper:
return 0
elif self._include_diagonal:
return r
else:
return r + 1 | Number of blocks with zeros from the left in block row `r`. |
def is_compatible_with(self, other):
other = as_dtype(other)
return self._type_enum in (
other.as_datatype_enum,
other.base_dtype.as_datatype_enum,
) | Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
DType(T) .is_compatible_with(DType(T).as_ref) == True
DType(T).as_ref.is_compatible_with(DType(T))... |
def set_rendering_intent(self, rendering_intent):
if rendering_intent not in (None,
PERCEPTUAL,
RELATIVE_COLORIMETRIC,
SATURATION,
ABSOLUTE_COLORIMETRIC):
r... | Set rendering intent variant for sRGB chunk |
def proxy_protocol(self, error='raise', default=None, limit=None, authenticate=False):
if error not in ('raise', 'unread'):
raise ValueError('error="{0}" is not "raise" or "unread""')
if not isinstance(self.request, SocketBuffer):
self.request = SocketBuffer(self.request)
... | Parses, and optionally authenticates, proxy protocol information from
request. Note that ``self.request`` is wrapped by ``SocketBuffer``.
:param error:
How read (``exc.ReadError``) and parse (``exc.ParseError``) errors
are handled. One of:
- "raise" to propagate.
... |
def _define_range(self, sequences):
sequence_count = 0
total_sequence = 0
for record in SeqIO.parse(open(sequences), 'fasta'):
total_sequence+=1
sequence_count+=len(record.seq)
max_range = (sequence_count/total_sequence)*1.5
return max_range | define_range - define the maximum range within which two hits in a db
search can be linked. This is defined as 1.5X the average length of all
reads in the database.
Parameters
----------
sequences : str
A path to the sequences in FASTA format. This fasta file is assu... |
def parser_factory(fake_args=None):
parser = ArgumentParser(description='aomi')
subparsers = parser.add_subparsers(dest='operation',
help='Specify the data '
' or extraction operation')
extract_file_args(subparsers)
environmen... | Return a proper contextual OptionParser |
def get_whitelist_page(self, page_number=None, page_size=None):
params = {
'pageNumber': page_number,
'pageSize': page_size
}
resp = self._client.get("whitelist", params=params)
return Page.from_dict(resp.json(), content_type=Indicator) | Gets a paginated list of indicators that the user's company has whitelisted.
:param int page_number: the page number to get.
:param int page_size: the size of the page to be returned.
:return: A |Page| of |Indicator| objects. |
def _cmp_date(self):
dates = sorted(val for val in self.kw.values()
if isinstance(val, CalendarDate))
if dates:
return dates[0]
return CalendarDate() | Returns Calendar date used for comparison.
Use the earliest date out of all CalendarDates in this instance,
or some date in the future if there are no CalendarDates (e.g.
when Date is a phrase). |
def model_base(bind_label=None, info=None):
Model = type('Model', (BaseModel,), {'__odm_abstract__': True})
info = {}
Model.__table_args__ = table_args(info=info)
if bind_label:
info['bind_label'] = bind_label
return Model | Create a base declarative class |
def sparql_query(self, query, flush=None, limit=None):
return self.find_statements(query, language='sparql', type='tuples',
flush=flush, limit=limit) | Run a Sparql query.
:param query: sparql query string
:rtype: list of dictionary |
def query_symbol(self, asset: str) -> str:
contract_address = self.get_asset_address(asset)
method = 'symbol'
invoke_code = build_native_invoke_code(contract_address, b'\x00', method, bytearray())
tx = Transaction(0, 0xd1, int(time()), 0, 0, None, invoke_code, bytearray(), list())
... | This interface is used to query the asset's symbol of ONT or ONG.
:param asset: a string which is used to indicate which asset's symbol we want to get.
:return: asset's symbol in the form of string. |
def getStartdatetime(self):
return datetime(self.startdate_year, self.startdate_month, self.startdate_day,
self.starttime_hour, self.starttime_minute, self.starttime_second) | Returns the date and starttime as datetime object
Parameters
----------
None
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getStartdatetime()
datetime.datetime(2011, 4, 4, 12, 57, 2)
>>> f._close()
... |
def paths(self, destination_account, destination_amount, source_account, destination_asset_code,
destination_asset_issuer=None):
destination_asset = Asset(destination_asset_code, destination_asset_issuer)
destination_asset_params = {
'destination_asset_type': destination_asset.... | Load a list of assets available to the source account id and find
any payment paths from those source assets to the desired
destination asset.
See the below docs for more information on required and optional
parameters for further specifying your search.
`GET /paths
<ht... |
def annotate_gemini(data, retriever=None):
r = dd.get_variation_resources(data)
return all([r.get(k) and objectstore.file_exists_or_remote(r[k]) for k in ["exac", "gnomad_exome"]]) | Annotate with population calls if have data installed. |
def _get_default_router(self, routers, router_name=None):
if router_name is None:
for router in routers:
if router['id'] is not None:
return router['id']
else:
for router in routers:
if router['hostname'] == router_name:
... | Returns the default router for ordering a dedicated host. |
def postags(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(POSTAG) | The list of word part-of-speech tags.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. |
def check_existing_results(self, benchmark):
if os.path.exists(benchmark.log_folder):
sys.exit('Output directory {0} already exists, will not overwrite existing results.'.format(benchmark.log_folder))
if os.path.exists(benchmark.log_zip):
sys.exit('Output archive {0} already exis... | Check and abort if the target directory for the benchmark results
already exists in order to avoid overwriting results. |
def compare(self, compare_recipe, suffix='_compare'):
assert isinstance(compare_recipe, Recipe)
assert isinstance(suffix, basestring)
self.compare_recipe.append(compare_recipe)
self.suffix.append(suffix)
self.dirty = True
return self.recipe | Adds a comparison recipe to a base recipe. |
def dump(props, output):
def escape(token):
return re.sub(r'([=:\s])', r'\\\1', token)
def write(out):
for k, v in props.items():
out.write('%s=%s\n' % (escape(str(k)), escape(str(v))))
if hasattr(output, 'write') and callable(output.write):
write(output)
elif isinstance(output... | Dumps a dict of properties to the specified open stream or file path.
:API: public |
def to_list(self):
output = []
for i in range(1, len(self.elements), 2):
output.append(self.elements[i])
return output | Converts the vector to an array of the elements within the vector |
def assert_trigger(self, session, protocol):
try:
return self.sessions[session].assert_trigger(protocol)
except KeyError:
return constants.StatusCode.error_invalid_object | Asserts software or hardware trigger.
Corresponds to viAssertTrigger function of the VISA library.
:param session: Unique logical identifier to a session.
:param protocol: Trigger protocol to use during assertion. (Constants.PROT*)
:return: return value of the library call.
:rt... |
def extract_facts(rule):
def _extract_facts(ce):
if isinstance(ce, Fact):
yield ce
elif isinstance(ce, TEST):
pass
else:
for e in ce:
yield from _extract_facts(e)
return set(_extract_facts(rule)) | Given a rule, return a set containing all rule LHS facts. |
def render_subgraph(self, ontol, nodes, **args):
subont = ontol.subontology(nodes, **args)
return self.render(subont, **args) | Render a `ontology` object after inducing a subgraph |
def get_post(self, slug):
cache_key = self.get_cache_key(post_slug=slug)
content = cache.get(cache_key)
if not content:
post = Post.objects.get(slug=slug)
content = self._format(post)
cache_duration = conf.GOSCALE_CACHE_DURATION if post else 1
cach... | This method returns a single post by slug |
def make_blastdb(self):
db = os.path.splitext(self.formattedprimers)[0]
nhr = '{db}.nhr'.format(db=db)
if not os.path.isfile(str(nhr)):
command = 'makeblastdb -in {primerfile} -parse_seqids -max_file_sz 2GB -dbtype nucl -out {outfile}'\
.format(primerfile=self.formatt... | Create a BLAST database of the primer file |
def to_bytes(s, encoding="utf8"):
if PY_VERSION == 2:
b = bytes(s)
elif PY_VERSION == 3:
b = bytes(s, encoding)
else:
raise ValueError("Is Python 4 out already?")
return b | Converts str s to bytes |
def mmi_to_delimited_file(self, force_flag=True):
LOGGER.debug('mmi_to_delimited_text requested.')
csv_path = os.path.join(
self.output_dir, 'mmi.csv')
if os.path.exists(csv_path) and force_flag is not True:
return csv_path
csv_file = open(csv_path, 'w')
c... | Save mmi_data to delimited text file suitable for gdal_grid.
The output file will be of the same format as strings returned from
:func:`mmi_to_delimited_text`.
:param force_flag: Whether to force the regeneration of the output
file. Defaults to False.
:type force_flag: bool... |
def updateActiveMarkupClass(self):
previousMarkupClass = self.activeMarkupClass
self.activeMarkupClass = find_markup_class_by_name(globalSettings.defaultMarkup)
if self._fileName:
markupClass = get_markup_for_file_name(
self._fileName, return_class=True)
if markupClass:
self.activeMarkupClass = mark... | Update the active markup class based on the default class and
the current filename. If the active markup class changes, the
highlighter is rerun on the input text, the markup object of
this tab is replaced with one of the new class and the
activeMarkupChanged signal is emitted. |
def copy_assets(self, path='assets'):
path = os.path.join(self.root_path, path)
for root, _, files in os.walk(path):
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, path)
copy_to = os.path.join(self._get_dist_path(relpath, directory='assets'))
LOG.debug(... | Copy assets into the destination directory. |
def delete_user(self, id):
self.assert_has_permission('scim.write')
uri = self.uri + '/Users/%s' % id
headers = self._get_headers()
logging.debug("URI=" + str(uri))
logging.debug("HEADERS=" + str(headers))
response = self.session.delete(uri, headers=headers)
loggi... | Delete user with given id. |
def iter_sources(self):
for src_id in xrange(self.get_source_count()):
yield src_id, self.get_source_name(src_id) | Iterates over all source names and IDs. |
def _realwavelets(s_freq, freqs, dur, width):
x = arange(-dur / 2, dur / 2, 1 / s_freq)
wavelets = empty((len(freqs), len(x)))
g = exp(-(pi * x ** 2) / width ** 2)
for i, one_freq in enumerate(freqs):
y = cos(2 * pi * x * one_freq)
wavelets[i, :] = y * g
return wavelets | Create real wavelets, for UCSD.
Parameters
----------
s_freq : int
sampling frequency
freqs : ndarray
vector with frequencies of interest
dur : float
duration of the wavelets in s
width : float
parameter controlling gaussian shape
Returns
-------
nda... |
def klucher(surface_tilt, surface_azimuth, dhi, ghi, solar_zenith,
solar_azimuth):
r
cos_tt = aoi_projection(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
cos_tt = np.maximum(cos_tt, 0)
F = 1 - ((dhi / ghi) ** 2)
try:
F.fillna(0, inplace=... | r'''
Determine diffuse irradiance from the sky on a tilted surface
using Klucher's 1979 model
.. math::
I_{d} = DHI \frac{1 + \cos\beta}{2} (1 + F' \sin^3(\beta/2))
(1 + F' \cos^2\theta\sin^3\theta_z)
where
.. math::
F' = 1 - (I_{d0} / GHI)
Klucher's 1979 model determi... |
def toggle_badge(self, kind):
badge = self.get_badge(kind)
if badge:
return self.remove_badge(kind)
else:
return self.add_badge(kind) | Toggle a bdage given its kind |
def _cache_get_for_dn(self, dn: str) -> Dict[str, bytes]:
self._do_with_retry(
lambda obj: obj.search(
dn,
'(objectclass=*)',
ldap3.BASE,
attributes=['*', '+']))
results = self._obj.response
if len(results) < 1:
... | Object state is cached. When an update is required the update will be
simulated on this cache, so that rollback information can be correct.
This function retrieves the cached data. |
def is_won(grid):
"Did the latest move win the game?"
p, q = grid
return any(way == (way & q) for way in ways_to_win) | Did the latest move win the game? |
def get_id_head(self):
id_head = None
for target_node in self:
if target_node.is_head():
id_head = target_node.get_id()
break
return id_head | Returns the id of the target that is set as "head"
@rtype: string
@return: the target id (or None) of the head target |
def assess_content(member,file_filter):
member_path = member.name.replace('.','',1)
if len(member_path) == 0:
return False
if "skip_files" in file_filter:
if member_path in file_filter['skip_files']:
return False
if "assess_content" in file_filter:
if member_path in f... | Determine if the filter wants the file to be read for content.
In the case of yes, we would then want to add the content to the
hash and not the file object. |
def scale_samples(params, bounds):
b = np.array(bounds)
lower_bounds = b[:, 0]
upper_bounds = b[:, 1]
if np.any(lower_bounds >= upper_bounds):
raise ValueError("Bounds are not legal")
np.add(np.multiply(params,
(upper_bounds - lower_bounds),
out=... | Rescale samples in 0-to-1 range to arbitrary bounds
Arguments
---------
bounds : list
list of lists of dimensions `num_params`-by-2
params : numpy.ndarray
numpy array of dimensions `num_params`-by-:math:`N`,
where :math:`N` is the number of samples |
def get_duration_metadata(self):
metadata = dict(self._mdata['duration'])
metadata.update({'existing_duration_values': self._my_map['duration']})
return Metadata(**metadata) | Gets the metadata for the assessment duration.
return: (osid.Metadata) - metadata for the duration
*compliance: mandatory -- This method must be implemented.* |
def _in(ins):
output = _16bit_oper(ins.quad[1])
output.append('ld b, h')
output.append('ld c, l')
output.append('in a, (c)')
output.append('push af')
return output | Translates IN to asm. |
def encrypt(self, s, mac_bytes=10):
if isinstance(s, six.text_type):
raise ValueError(
"Encode `s` to a bytestring yourself to" +
" prevent problems with different default encodings")
out = BytesIO()
with self.encrypt_to(out, mac_bytes) as f:
... | Encrypt `s' for this pubkey. |
def _metadata_is_invalid(cls, fact):
return any(isinstance(token, URIRef) and ' ' in token
for token in fact) | Determines if the fact is not well formed. |
def post(self, path, auth=None, **kwargs):
return self._check_ok(self._post(path, auth=auth, **kwargs)) | Manually make a POST request.
:param str path: relative url of the request (e.g. `/users/username`)
:param auth.Authentication auth: authentication object
:param kwargs dict: Extra arguments for the request, as supported by the
`requests <http://docs.python-requests.... |
def add_context_action(self, action):
self.main_tab_widget.context_actions.append(action)
for child_splitter in self.child_splitters:
child_splitter.add_context_action(action) | Adds a custom context menu action
:param action: action to add. |
def rolling_window_sequences(X, index, window_size, target_size, target_column):
out_X = list()
out_y = list()
X_index = list()
y_index = list()
target = X[:, target_column]
for start in range(len(X) - window_size - target_size + 1):
end = start + window_size
out_X.append(X[start... | Create rolling window sequences out of timeseries data. |
def redirect_logging(tqdm_obj, logger=logging.getLogger()):
assert(len(logger.handlers) == 1)
prev_handler = logger.handlers[0]
logger.removeHandler(prev_handler)
tqdm_handler = TqdmLoggingHandler(tqdm_obj)
if prev_handler.formatter is not None:
tqdm_handler.setFormatter(prev_handler.formatter)
logger.a... | Context manager to redirect logging to a TqdmLoggingHandler object and then restore the original. |
def horizon_dashboard_nav(context):
if 'request' not in context:
return {}
dashboard = context['request'].horizon['dashboard']
panel_groups = dashboard.get_panel_groups()
non_empty_groups = []
for group in panel_groups.values():
allowed_panels = []
for panel in group:
... | Generates sub-navigation entries for the current dashboard. |
def lookup_function(val):
"Look-up and return a pretty-printer that can print va."
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
typename = type.tag
if typename == None:
return None
for function in pretty_printers_dict:
if function.search... | Look-up and return a pretty-printer that can print va. |
def start(self):
self.parse_opt()
self.parse_cfg()
if self.options.browse or self.options.browse_big or self.options.progress:
self.browse()
raise SystemExit
paramlist = []
for exp in self.cfgparser.sections():
if not self.options.experiments o... | starts the experiments as given in the config file. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.