code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def update_work_as_completed(self, worker_id, work_id, other_values=None,
error=None):
client = self._datastore_client
try:
with client.transaction() as transaction:
work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id,
KIND_W... | Updates work piece in datastore as completed.
Args:
worker_id: ID of the worker which did the work
work_id: ID of the work which was done
other_values: dictionary with additonal values which should be saved
with the work piece
error: if not None then error occurred during computatio... |
def set_shortcut(self, name, shortcut):
name = self.__normalize_name(name)
action = self.get_action(name)
if not action:
return
action.setShortcut(QKeySequence(shortcut))
return True | Sets given action shortcut.
:param name: Action to set the shortcut.
:type name: unicode
:param shortcut: Shortcut to set.
:type shortcut: unicode
:return: Method success.
:rtype: bool |
def fetchJobStoreFiles(jobStore, options):
for jobStoreFile in options.fetch:
jobStoreHits = recursiveGlob(directoryname=options.jobStore,
glob_pattern=jobStoreFile)
for jobStoreFileID in jobStoreHits:
logger.debug("Copying job store file: %s to %s",
... | Takes a list of file names as glob patterns, searches for these within a
given directory, and attempts to take all of the files found and copy them
into options.localFilePath.
:param jobStore: A fileJobStore object.
:param options.fetch: List of file glob patterns to search
for in the jobStore ... |
def get_service_name(*args):
raw_services = _get_services()
services = dict()
for raw_service in raw_services:
if args:
if raw_service['DisplayName'] in args or \
raw_service['ServiceName'] in args or \
raw_service['ServiceName'].lower() in args:
... | The Display Name is what is displayed in Windows when services.msc is
executed. Each Display Name has an associated Service Name which is the
actual name of the service. This function allows you to discover the
Service Name by returning a dictionary of Display Names and Service Names,
or filter by add... |
def _identify_all(header, footer, ext=None):
matches = list()
for magic_row in magic_header_array:
start = magic_row.offset
end = magic_row.offset + len(magic_row.byte_match)
if end > len(header):
continue
if header[start:end] == magic_row.byte_match:
matc... | Attempt to identify 'data' by its magic numbers |
def do_list(self, line):
repo_names = self.network.repo_names
print('Known repos:')
print(' ' + '\n '.join(repo_names)) | List known repos |
def create_version_model(self, task, releasetype, descriptor):
rootdata = treemodel.ListItemData(['Version', 'Releasetype', 'Path'])
rootitem = treemodel.TreeItem(rootdata)
for tf in task.taskfile_set.filter(releasetype=releasetype, descriptor=descriptor).order_by('-version'):
tfdata... | Create and return a new model that represents taskfiles for the given task, releasetpye and descriptor
:param task: the task of the taskfiles
:type task: :class:`djadapter.models.Task`
:param releasetype: the releasetype
:type releasetype: str
:param descriptor: the descirptor
... |
def _dict_to_report_line(cls, report_dict):
return '\t'.join([str(report_dict[x]) for x in report.columns]) | Takes a report_dict as input and returns a report line |
def append_rally_point(self, p):
if (self.rally_count() > 9):
print("Can't have more than 10 rally points, not adding.")
return
self.rally_points.append(p)
self.reindex() | add rallypoint to end of list |
def issuetypes(accountable, project_key):
projects = accountable.issue_types(project_key)
headers = sorted(['id', 'name', 'description'])
rows = []
for key, issue_types in sorted(projects.items()):
for issue_type in issue_types:
rows.append(
[key] + [v for k, v in sor... | List all issue types. Optional parameter to list issue types by a given
project. |
def _scalar2array(d):
da = {}
for k, v in d.items():
if '_' not in k:
da[k] = v
else:
name = ''.join(k.split('_')[:-1])
ind = k.split('_')[-1]
dim = len(ind)
if name not in da:
shape = tuple(3 for i in range(dim))
... | Convert a dictionary with scalar elements and string indices '_1234'
to a dictionary of arrays. Unspecified entries are np.nan. |
def ensure_file(path):
try:
exists = isfile(path)
if not exists:
with open(path, 'w+') as fname:
fname.write('initialized')
return (True, path)
return (True, 'exists')
except OSError as e:
return (False, ... | Checks if file exists, if fails, tries to create file |
def _get_keycache(self, parentity, branch, turn, tick, *, forward):
lru_append(self.keycache, self._kc_lru, (parentity+(branch,), turn, tick), KEYCACHE_MAXSIZE)
return self._get_keycachelike(
self.keycache, self.keys, self._get_adds_dels,
parentity, branch, turn, tick, forward=fo... | Get a frozenset of keys that exist in the entity at the moment.
With ``forward=True``, enable an optimization that copies old key sets
forward and updates them. |
def write_pruned_iocs(self, directory=None, pruned_source=None):
if pruned_source is None:
pruned_source = self.pruned_11_iocs
if len(pruned_source) < 1:
log.error('no iocs available to write out')
return False
if not directory:
directory = os.getc... | Writes IOCs to a directory that have been pruned of some or all IOCs.
:param directory: Directory to write IOCs to. If not provided, the current working directory is used.
:param pruned_source: Iterable containing a set of iocids. Defaults to self.iocs_10.
:return: |
def update(self):
for node in self.get_all_nodes():
try:
node.update_ips()
if node.ips and \
not (node.preferred_ip and \
node.preferred_ip in node.ips):
node.connect()
ex... | Update connection information of all nodes in this cluster.
It happens, for example, that public ip's are not available
immediately, therefore calling this method might help. |
def run(config, clear_opt=False):
flickr = flickrapi.FlickrAPI(config.get('walls', 'api_key'),
config.get('walls', 'api_secret'))
width = config.getint('walls', 'width')
height = config.getint('walls', 'height')
if clear_opt:
clear_dir(os.path.expanduser(config.g... | Find an image and download it. |
def get_all_snapshots(self, snapshot_ids=None,
owner=None, restorable_by=None,
filters=None):
params = {}
if snapshot_ids:
self.build_list_params(params, snapshot_ids, 'SnapshotId')
if owner:
params['Owner'] = owner
... | Get all EBS Snapshots associated with the current credentials.
:type snapshot_ids: list
:param snapshot_ids: Optional list of snapshot ids. If this list is
present, only the Snapshots associated with
these snapshot ids will be returned.
... |
def _ostaunicode(src):
if have_py_3:
bytename = src
else:
bytename = src.decode('utf-8')
try:
enc = bytename.encode('latin-1')
encbyte = b'\x08'
except (UnicodeEncodeError, UnicodeDecodeError):
enc = bytename.encode('utf-16_be')
encbyte = b'\x10'
retur... | Internal function to create an OSTA byte string from a source string. |
def virtual_temperature(temperature, mixing, molecular_weight_ratio=mpconsts.epsilon):
r
return temperature * ((mixing + molecular_weight_ratio)
/ (molecular_weight_ratio * (1 + mixing))) | r"""Calculate virtual temperature.
This calculation must be given an air parcel's temperature and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.80.
Parameters
----------
temperature: `pint.Quantity`
The temperature
mixing : `pint.Quantity`
dimens... |
def check_validation(self, cert):
if self.certificate_registry.is_ca(cert) and cert.signature not in self._validate_map:
self._validate_map[cert.signature] = ValidationPath(cert)
return self._validate_map.get(cert.signature) | Checks to see if a certificate has been validated, and if so, returns
the ValidationPath used to validate it.
:param cert:
An asn1crypto.x509.Certificate object
:return:
None if not validated, or a certvalidator.path.ValidationPath
object of the validation p... |
def gene_list(self, list_id):
return self.query(GeneList).filter_by(list_id=list_id).first() | Get a gene list from the database. |
def build_news(ctx, draft=False, yes=False):
report.info(ctx, "docs.build-news", "building changelog from news fragments")
build_command = f"towncrier --version {ctx.metadata['version']}"
if draft:
report.warn(
ctx,
"docs.build-news",
"building changelog as draft ... | Build towncrier newsfragments. |
def reset(self):
for shard_id in self._shards:
if self._shards[shard_id].get('isReplicaSet'):
singleton = ReplicaSets()
elif self._shards[shard_id].get('isServer'):
singleton = Servers()
singleton.command(self._shards[shard_id]['_id'], 'reset')... | Ensure all shards, configs, and routers are running and available. |
def connect(self, host, port, name=None):
client = self._clients.get(name)
client.connect_to(host, port) | Connects a client to given `host` and `port`. If client `name` is not
given then connects the latest client.
Examples:
| Connect | 127.0.0.1 | 8080 |
| Connect | 127.0.0.1 | 8080 | Client1 | |
def count(self, eventRegistry):
self.setRequestedResult(RequestEventsInfo())
res = eventRegistry.execQuery(self)
if "error" in res:
print(res["error"])
count = res.get("events", {}).get("totalResults", 0)
return count | return the number of events that match the criteria |
def new(self, dev_t_high, dev_t_low):
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('PN record already initialized!')
self.dev_t_high = dev_t_high
self.dev_t_low = dev_t_low
self._initialized = True | Create a new Rock Ridge POSIX device number record.
Parameters:
dev_t_high - The high-order 32-bits of the device number.
dev_t_low - The low-order 32-bits of the device number.
Returns:
Nothing. |
def extract_image_size(self):
width, _ = self._extract_alternative_fields(
['Image ImageWidth', 'EXIF ExifImageWidth'], -1, int)
height, _ = self._extract_alternative_fields(
['Image ImageLength', 'EXIF ExifImageLength'], -1, int)
return width, height | Extract image height and width |
def export_file(file_path):
if not os.path.isfile(file_path):
return error("Referenced file does not exist: '{}'.".format(file_path))
return "export {}".format(file_path) | Prepend the given parameter with ``export`` |
def make_archive(self, path):
zf = zipfile.ZipFile(path, 'w', zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(self.path):
relative_path = dirpath[len(self.path) + 1:]
if relative_path and not self._ignore(relative_path):
zf.write(dirpath, relativ... | Create archive of directory and write to ``path``.
:param path: Path to archive
Ignored::
* build/* - This is used for packing the charm itself and any
similar tasks.
* */.* - Hidden files are all ignored for now. This will most
... |
def timescales_(self):
u, lv, rv = self._get_eigensystem()
with np.errstate(invalid='ignore', divide='ignore'):
timescales = - self.lag_time / np.log(u[1:])
return timescales | Implied relaxation timescales of the model.
The relaxation of any initial distribution towards equilibrium is
given, according to this model, by a sum of terms -- each corresponding
to the relaxation along a specific direction (eigenvector) in state
space -- which decay exponentially in... |
def get_remote_info(url_id):
try:
data = _send_request(url_id)
except Exception as e:
sys.stderr.write("Seeder GET error: ")
sys.stderr.write(str(e.message))
return None
return _convert_to_wakat_format(data) | Download data and convert them to dict used in frontend.
Args:
url_id (str): ID used as identification in Seeder.
Returns:
dict: Dict with data for frontend or None in case of error. |
def update(self, item):
self.model.set(self._iter_for(item), 0, item) | Manually update an item's display in the list
:param item: The item to be updated. |
def render_js_code(self, id_, *args, **kwargs):
if id_:
options = self.render_select2_options_code(
dict(self.get_options()), id_)
return mark_safe(self.html.format(id=id_, options=options))
return u'' | Render html container for Select2 widget with options. |
def wildcard_allowed_principals(self, pattern=None):
wildcard_allowed = []
for statement in self.statements:
if statement.wildcard_principals(pattern) and statement.effect == "Allow":
wildcard_allowed.append(statement)
return wildcard_allowed | Find statements which allow wildcard principals.
A pattern can be specified for the wildcard principal |
def to_content_range_header(self, length):
range_for_length = self.range_for_length(length)
if range_for_length is not None:
return "%s %d-%d/%d" % (
self.units,
range_for_length[0],
range_for_length[1] - 1,
length,
... | Converts the object into `Content-Range` HTTP header,
based on given length |
def _concat(self, egdfs):
egdfs = list(egdfs)
edata = pd.concat(egdfs, axis=0, ignore_index=False, copy=False)
one2one = (
self.keep_index and
not any(edata.index.duplicated()) and
len(edata.index) == len(self.data.index))
if one2one:
edata... | Concatenate evaluated group dataframes
Parameters
----------
egdfs : iterable
Evaluated dataframes
Returns
-------
edata : pandas.DataFrame
Evaluated data |
async def get_state_json(
self,
rr_state_builder: Callable[['Verifier', str, int], Awaitable[Tuple[str, int]]],
fro: int,
to: int) -> (str, int):
LOGGER.debug(
'RevoCacheEntry.get_state_json >>> rr_state_builder: %s, fro: %s, to: %s',
rr_st... | Get rev reg state json, and its timestamp on the distributed ledger,
from cached rev reg state frames list or distributed ledger,
updating cache as necessary.
Raise BadRevStateTime if caller asks for a state in the future.
On return of any previously existing rev reg state frame, alway... |
def append(self, p_todo, p_string):
if len(p_string) > 0:
new_text = p_todo.source() + ' ' + p_string
p_todo.set_source_text(new_text)
self._update_todo_ids()
self.dirty = True | Appends a text to the todo, specified by its number.
The todo will be parsed again, such that tags and projects in de
appended string are processed. |
def from_gtp(gtpc):
gtpc = gtpc.upper()
if gtpc == 'PASS':
return None
col = _GTP_COLUMNS.index(gtpc[0])
row_from_bottom = int(gtpc[1:])
return go.N - row_from_bottom, col | Converts from a GTP coordinate to a Minigo coordinate. |
def zoom_bbox(self, bbox):
try:
bbox.transform(self.map.srs)
except gdal.GDALException:
pass
else:
self.map.zoom_to_box(mapnik.Box2d(*bbox.extent)) | Zoom map to geometry extent.
Arguments:
bbox -- OGRGeometry polygon to zoom map extent |
def lookup(self, iterable, gather=False):
for result in self.root.lookup(iterable,
gather=gather,
edit_distance=0,
max_edit_distance=self.max_edit_distance,
... | Call the lookup on the root node with the given parameters.
Args
iterable(index or key): Used to retrive nodes from tree
gather(bool): this is passed down to the root node lookup
Notes:
max_edit_distance and match_threshold come from the init |
def flatten(inputs, scope=None):
if len(inputs.get_shape()) < 2:
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.name_scope(scope, 'Flatten', [inputs]):
return tf.reshape(inputs, [-1, k]) | Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is ... |
def parse(s):
r
stuff = []
rest = s
while True:
front, token, rest = peel_off_esc_code(rest)
if front:
stuff.append(front)
if token:
try:
tok = token_type(token)
if tok:
stuff.extend(tok)
exce... | r"""
Returns a list of strings or format dictionaries to describe the strings.
May raise a ValueError if it can't be parsed.
>>> parse(">>> []")
['>>> []']
>>> #parse("\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m") |
def save(self, *args, **kwargs):
letter = getattr(self, "block_letter", None)
if letter and len(letter) >= 1:
self.block_letter = letter[:1].upper() + letter[1:]
super(EighthBlock, self).save(*args, **kwargs) | Capitalize the first letter of the block name. |
def compose(self, mapping):
items = [f.compose(mapping) for f in self._items]
return self.__class__(items, self.shape, self.ftype) | Apply the ``compose`` method to all functions.
Returns a new farray. |
def render(self, context):
user = self._get_value(self.user_key, context)
feature = self._get_value(self.feature, context)
if feature is None:
return ''
allowed = show_feature(user, feature)
return self.nodelist.render(context) if allowed else '' | Handle the actual rendering. |
def synchronizeLayout(primary, secondary, surface_size):
primary.configure_bound(surface_size)
secondary.configure_bound(surface_size)
if (primary.key_size < secondary.key_size):
logging.warning('Normalizing key size from secondary to primary')
secondary.key_size = primary.key_size
elif ... | Synchronizes given layouts by normalizing height by using
max height of given layouts to avoid transistion dirty effects.
:param primary: Primary layout used.
:param secondary: Secondary layout used.
:param surface_size: Target surface size on which layout will be displayed. |
def save_project(self, project, filename=''):
r
if filename == '':
filename = project.name
filename = self._parse_filename(filename=filename, ext='pnm')
d = {project.name: project}
with open(filename, 'wb') as f:
pickle.dump(d, f) | r"""
Saves given Project to a 'pnm' file
This will include all of associated objects, including algorithms.
Parameters
----------
project : OpenPNM Project
The project to save.
filename : string, optional
If no filename is given, the given proje... |
def real_space(self):
if not is_numeric_dtype(self.dtype):
raise ValueError(
'`real_space` not defined for non-numeric `dtype`')
return self.astype(self.real_dtype) | The space corresponding to this space's `real_dtype`.
Raises
------
ValueError
If `dtype` is not a numeric data type. |
def __update_paths(self, settings):
if not isinstance(settings, dict):
return
if 'custom_base_path' in settings:
base_path = settings['custom_base_path']
base_path = join(dirname(__file__), base_path)
self.__load_paths(base_path) | Set custom paths if necessary |
def ms_cutall(self, viewer, event, data_x, data_y):
if not self.cancut:
return True
x, y = self.get_win_xy(viewer)
if event.state == 'move':
self._cutboth_xy(viewer, x, y)
elif event.state == 'down':
self._start_x, self._start_y = x, y
imag... | An interactive way to set the low AND high cut levels. |
def _parse_box_list(self, output):
boxes = []
name = provider = version = None
for timestamp, target, kind, data in self._parse_machine_readable_output(output):
if kind == 'box-name':
if name is not None:
boxes.append(Box(name=name, provider=provid... | Remove Vagrant usage for unit testing |
def isempty(path):
if op.isdir(path):
return [] == os.listdir(path)
elif op.isfile(path):
return 0 == os.stat(path).st_size
return None | Returns True if the given file or directory path is empty.
**Examples**:
::
auxly.filesys.isempty("foo.txt") # Works on files...
auxly.filesys.isempty("bar") # ...or directories! |
def start(self):
self.streams.append(sys.stdout)
sys.stdout = self.stream | Activate the TypingStream on stdout |
def output_package(dist):
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
return '%s (%s)' % (dist.project_name, dist.version) | Return string displaying package information. |
def _regressor_names(con_name, hrf_model, fir_delays=None):
if hrf_model in ['glover', 'spm', None]:
return [con_name]
elif hrf_model in ["glover + derivative", 'spm + derivative']:
return [con_name, con_name + "_derivative"]
elif hrf_model in ['spm + derivative + dispersion',
... | Returns a list of regressor names, computed from con-name and hrf type
Parameters
----------
con_name: string
identifier of the condition
hrf_model: string or None,
hrf model chosen
fir_delays: 1D array_like, optional,
Delays used in case of an FIR model
Returns
--... |
def page(self, end=values.unset, start=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
params = values.of({
'End': serialize.iso8601_datetime(end),
'Start': serialize.iso8601_datetime(start),
'PageToken': page_token,
... | Retrieve a single page of DataSessionInstance records from the API.
Request is executed immediately
:param datetime end: The end
:param datetime start: The start
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for cl... |
def get_blocked(self):
url = self.reddit_session.config['blocked']
return self.reddit_session.request_json(url) | Return a UserList of Redditors with whom the user has blocked. |
def _id_to_subword(self, subword_id):
if subword_id < 0 or subword_id >= (self.vocab_size - 1):
raise ValueError("Received id %d which is invalid. Ids must be within "
"[0, %d)." % (subword_id + 1, self.vocab_size))
if 0 <= subword_id < len(self._subwords):
return self._subwor... | Converts a subword integer ID to a subword string. |
def find_entry_name_of_alias(self, alias):
if alias in self.aliases:
name = self.aliases[alias]
if name in self.entries:
return name
else:
for name, entry in self.entries.items():
aliases = entry.get_aliases(includename=Fals... | Return the first entry name with the given 'alias' included in its
list of aliases.
Returns
-------
name of matching entry (str) or 'None' if no matches |
def __SetDefaultUploadStrategy(self, upload_config, http_request):
if upload_config.resumable_path is None:
self.strategy = SIMPLE_UPLOAD
if self.strategy is not None:
return
strategy = SIMPLE_UPLOAD
if (self.total_size is not None and
self.total_s... | Determine and set the default upload strategy for this upload.
We generally prefer simple or multipart, unless we're forced to
use resumable. This happens when any of (1) the upload is too
large, (2) the simple endpoint doesn't support multipart requests
and we have metadata, or (3) the... |
def _is_interactive(self):
return not (
self.realworld and (dt.date.today() > self.datetime.date())) | Prevent middlewares and orders to work outside live mode |
def cancelUpdate(self):
key = '/library/sections/%s/refresh' % self.key
self._server.query(key, method=self._server._session.delete) | Cancel update of this Library Section. |
def mediatype_create(name, mediatype, **kwargs):
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'mediatype.create'
params = {"description": name}
params['type'] = mediatype
params = _params_extend(params, _ignore_name=True, **kwargs)... | Create new mediatype
.. note::
This function accepts all standard mediatype properties: keyword
argument names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/3.0/manual/api/reference/mediatype/object
:param mediatype: media type - 0: e... |
def sojourn_time(p):
p = np.asarray(p)
pii = p.diagonal()
if not (1 - pii).all():
print("Sojourn times are infinite for absorbing states!")
return 1 / (1 - pii) | Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
... |
def trace_in_process_link(self, link_bytes):
return tracers.InProcessLinkTracer(self._nsdk,
self._nsdk.trace_in_process_link(link_bytes)) | Creates a tracer for tracing asynchronous related processing in the same process.
For more information see :meth:`create_in_process_link`.
:param bytes link_bytes: An in-process link created using :meth:`create_in_process_link`.
:rtype: tracers.InProcessLinkTracer
.. versionadded:: 1... |
def create_environment(self, name, default=False, zone=None):
from qubell.api.private.environment import Environment
return Environment.new(organization=self, name=name, zone_id=zone, default=default, router=self._router) | Creates environment and returns Environment object. |
async def _do(self, ctx, times: int, *, command):
msg = copy.copy(ctx.message)
msg.content = command
for i in range(times):
await self.bot.process_commands(msg) | Repeats a command a specified number of times. |
def flatten(self):
args = list(self.args)
i = 0
for arg in self.args:
if isinstance(arg, self.__class__):
args[i:i + 1] = arg.args
i += len(arg.args)
else:
i += 1
return self.__class__(*args) | Return a new expression where nested terms of this expression are
flattened as far as possible.
E.g. A & (B & C) becomes A & B & C. |
def parse(self, data_model, crit):
tables = pd.DataFrame(data_model)
data_model = {}
for table_name in tables.columns:
data_model[table_name] = pd.DataFrame(tables[table_name]['columns']).T
data_model[table_name] = data_model[table_name].where((pd.notnull(data_model[table... | Take the relevant pieces of the data model json
and parse into data model and criteria map.
Parameters
----------
data_model : data model piece of json (nested dicts)
crit : criteria map piece of json (nested dicts)
Returns
----------
data_model : dictio... |
def signup_verify(request, uidb36=None, token=None):
user = authenticate(uidb36=uidb36, token=token, is_active=False)
if user is not None:
user.is_active = True
user.save()
auth_login(request, user)
info(request, _("Successfully signed up"))
return login_redirect(request)... | View for the link in the verification email sent to a new user
when they create an account and ``ACCOUNTS_VERIFICATION_REQUIRED``
is set to ``True``. Activates the user and logs them in,
redirecting to the URL they tried to access when signing up. |
def _prepare_value(val, maxlen=50, notype=False):
if val is None or val is True or val is False:
return str(val)
sval = repr(val)
sval = sval.replace("\n", " ").replace("\t", " ").replace("`", "'")
if len(sval) > maxlen:
sval = sval[:maxlen - 4] + "..." + sval[-1]
if notype:
... | Stringify value `val`, ensuring that it is not too long. |
def dimod_object_hook(obj):
if _is_sampleset_v2(obj):
return SampleSet.from_serializable(obj)
elif _is_bqm_v2(obj):
return BinaryQuadraticModel.from_serializable(obj)
return obj | JSON-decoding for dimod objects.
See Also:
:class:`json.JSONDecoder` for using custom decoders. |
def template_string(
task: Task, template: str, jinja_filters: FiltersDict = None, **kwargs: Any
) -> Result:
jinja_filters = jinja_filters or {} or task.nornir.config.jinja2.filters
text = jinja_helper.render_from_string(
template=template, host=task.host, jinja_filters=jinja_filters, **kwargs
... | Renders a string with jinja2. All the host data is available in the template
Arguments:
template (string): template string
jinja_filters (dict): jinja filters to enable. Defaults to nornir.config.jinja2.filters
**kwargs: additional data to pass to the template
Returns:
Result o... |
def raster_reclassify(srcfile, v_dict, dstfile, gdaltype=GDT_Float32):
src_r = RasterUtilClass.read_raster(srcfile)
src_data = src_r.data
dst_data = numpy.copy(src_data)
if gdaltype == GDT_Float32 and src_r.dataType != GDT_Float32:
gdaltype = src_r.dataType
no_data = ... | Reclassify raster by given classifier dict.
Args:
srcfile: source raster file.
v_dict: classifier dict.
dstfile: destination file path.
gdaltype (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default. |
def kernel_command_line(self, kernel_command_line):
log.info('QEMU VM "{name}" [{id}] has set the QEMU kernel command line to {kernel_command_line}'.format(name=self._name,
id=self._id,
... | Sets the kernel command line for this QEMU VM.
:param kernel_command_line: QEMU kernel command line |
def post_task(task_data, task_uri='/tasks'):
url = '{}/{}'.format(API_URL, task_uri.lstrip('/'))
if isinstance(task_data, str):
task_json = task_data
else:
task_json = json.dumps(task_data)
resp = requests.post(url, data=task_json, headers=HEADERS, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT... | Create Spinnaker Task.
Args:
task_data (str): Task JSON definition.
Returns:
str: Spinnaker Task ID.
Raises:
AssertionError: Error response from Spinnaker. |
def _get_md_files(self):
all_f = _all_files_matching_ext(os.getcwd(), "md")
exclusions = [
"*.egg/*",
"*.eggs/*",
"*build/*"
] + self.exclusions
return sorted([f for f in all_f if not _is_excluded(f, exclusions)]) | Get all markdown files. |
def plot_posterior_contour(self, idx_param1=0, idx_param2=1, res1=100, res2=100, smoothing=0.01):
return plt.contour(*self.posterior_mesh(idx_param1, idx_param2, res1, res2, smoothing)) | Plots a contour of the kernel density estimation
of a 2D projection of the current posterior distribution.
:param int idx_param1: Parameter to be treated as :math:`x` when
plotting.
:param int idx_param2: Parameter to be treated as :math:`y` when
plotting.
:param... |
def getItem(self, index, altItem=None):
if index.isValid():
item = index.internalPointer()
if item:
return item
return altItem | Returns the TreeItem for the given index. Returns the altItem if the index is invalid. |
def run_qpoints(self,
q_points,
with_eigenvectors=False,
with_group_velocities=False,
with_dynamical_matrices=False,
nac_q_direction=None):
if self._dynamical_matrix is None:
msg = ("Dynamical matrix ... | Phonon calculations on q-points.
Parameters
----------
q_points: array_like or float, optional
q-points in reduced coordinates.
dtype='double', shape=(q-points, 3)
with_eigenvectors: bool, optional
Eigenvectors are stored by setting True. Default Fals... |
def get_image_upload_to(self, filename):
dummy, ext = os.path.splitext(filename)
return os.path.join(
machina_settings.FORUM_IMAGE_UPLOAD_TO,
'{id}{ext}'.format(id=str(uuid.uuid4()).replace('-', ''), ext=ext),
) | Returns the path to upload a new associated image to. |
def update_credit_note(self, credit_note_id, credit_note_dict):
return self._create_put_request(resource=CREDIT_NOTES, billomat_id=credit_note_id, send_data=credit_note_dict) | Updates a credit note
:param credit_note_id: the credit note id
:param credit_note_dict: dict
:return: dict |
def _parse_options(options: List[str]) -> Dict[str, str]:
try:
return dict(i.split('=', maxsplit=1) for i in options)
except ValueError:
raise ArgumentError(
f'Option must be in format <key>=<value>, got: {options}') | Parse repeatable CLI options
>>> opts = _parse_options(['cluster.name=foo', 'CRATE_JAVA_OPTS="-Dxy=foo"'])
>>> print(json.dumps(opts, sort_keys=True))
{"CRATE_JAVA_OPTS": "\\"-Dxy=foo\\"", "cluster.name": "foo"} |
def set_toolBox_height(tool_box, height=32):
for button in tool_box.findChildren(QAbstractButton):
button.setMinimumHeight(height)
return True | Sets given height to given QToolBox widget.
:param toolbox: ToolBox.
:type toolbox: QToolBox
:param height: Height.
:type height: int
:return: Definition success.
:rtype: bool |
def post_message(self, msg):
super(mavlogfile, self).post_message(msg)
if self.planner_format:
self.f.read(1)
self.timestamp = msg._timestamp
self._last_message = msg
if msg.get_type() != "BAD_DATA":
self._last_timestamp = msg._timestamp
msg._link ... | add timestamp to message |
def wrap_generator(func):
async def _wrapped(*a, **k):
r, ret = None, []
gen = func(*a, **k)
while True:
try:
item = gen.send(r)
except StopIteration:
break
if inspect.isawaitable(item):
r = await item
... | Decorator to convert a generator function to an async function which collects
and returns generator results, returning a list if there are multiple results |
def force_lazy_import(name):
obj = import_object(name)
module_items = list(getattr(obj, '__dict__', {}).items())
for key, value in module_items:
if getattr(value, '__module__', None):
import_object(name + '.' + key) | Import any modules off of "name" by iterating a new list rather than a generator so that this
library works with lazy imports. |
def to_adb_message(self, data):
message = AdbMessage(AdbMessage.WIRE_TO_CMD.get(self.cmd),
self.arg0, self.arg1, data)
if (len(data) != self.data_length or
message.data_crc32 != self.data_checksum):
raise usb_exceptions.AdbDataIntegrityError(
'%s (%s) received in... | Turn the data into an ADB message. |
def default_value(self, default_value):
if default_value not in self.default_values:
if len(self.default_labels) == len(self.default_values):
self.default_values[-1] = default_value
else:
self.default_values.append(default_value)
self._default_valu... | Setter for default_value.
:param default_value: The default value.
:type default_value: object |
def channel(self, channel_id=None):
if channel_id in self.channels:
return self.channels[channel_id]
return Channel(self, channel_id) | Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist. |
def put(request, obj_id=None):
res = Result()
data = request.PUT or json.loads(request.body)['body']
if obj_id:
tag = Tag.objects.get(pk=obj_id)
tag.name = data.get('name', tag.name)
tag.artist = data.get('artist', tag.artist)
tag.save()
else:
tags = [_ for _ in d... | Adds tags from objects resolved from guids
:param tags: Tags to add
:type tags: list
:param guids: Guids to add tags from
:type guids: list
:returns: json |
def _internal_function_call(self, call_conf):
def stub(*args, **kwargs):
message = 'Function {} is not available'.format(call_conf['fun'])
self.out.error(message)
log.debug(
'Attempt to run "%s" with %s arguments and %s parameters.',
call_conf[... | Call internal function.
:param call_conf:
:return: |
def validate_json_field(dist, attr, value):
try:
is_json_compat(value)
except ValueError as e:
raise DistutilsSetupError("%r %s" % (attr, e))
return True | Check for json validity. |
def remove(self, parent, child):
self.remove_links(parent, (child,))
if parent not in self and parent in self._parent_to_not_ok:
del self._parent_to_not_ok[parent]
if child not in self and child in self._parent_to_not_ok:
del self._parent_to_not_ok[child] | Remove a dependency between parent and child.
Parameters
----------
parent : boolean instance of :class:`katcp.Sensor`
The sensor that used to depend on child.
child : boolean instance of :class:`katcp.Sensor` or None
The sensor parent used to depend on. |
def queue_scan_command(self, server_info: ServerConnectivityInfo, scan_command: PluginScanCommand) -> None:
self._check_and_create_process(server_info.hostname)
self._queued_tasks_nb += 1
if scan_command.is_aggressive:
self._hostname_queues_dict[server_info.hostname].put((server_info... | Queue a scan command targeting a specific server.
Args:
server_info: The server's connectivity information. The test_connectivity_to_server() method must have been
called first to ensure that the server is online and accessible.
scan_command: The scan command to run agai... |
def get_config_window_bounds(self):
bounds_x = int(self.config.get_optional('Driver', 'bounds_x') or 0)
bounds_y = int(self.config.get_optional('Driver', 'bounds_y') or 0)
monitor_index = int(self.config.get_optional('Driver', 'monitor') or -1)
if monitor_index > -1:
try:
... | Reads bounds from config and, if monitor is specified, modify the values to match with the specified monitor
:return: coords X and Y where set the browser window. |
def wnsumd(window):
assert isinstance(window, stypes.SpiceCell)
assert window.dtype == 1
meas = ctypes.c_double()
avg = ctypes.c_double()
stddev = ctypes.c_double()
shortest = ctypes.c_int()
longest = ctypes.c_int()
libspice.wnsumd_c(ctypes.byref(window), ctypes.byref(meas),
... | Summarize the contents of a double precision window.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnsumd_c.html
:param window: Window to be summarized.
:type window: spiceypy.utils.support_types.SpiceCell
:return:
Total measure of intervals in window,
Average measur... |
def start(self):
origin = inspect.stack()[1][0]
self.reset()
self._start_tracer(origin) | Start collecting trace information. |
def build_binary_op(self, op, other):
if isinstance(other, NumericalExpression):
self_expr, other_expr, new_inputs = self._merge_expressions(other)
elif isinstance(other, Term):
self_expr = self._expr
new_inputs, other_idx = _ensure_element(self.inputs, other)
... | Compute new expression strings and a new inputs tuple for combining
self and other with a binary operator. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.