code stringlengths 59 4.4k | docstring stringlengths 5 7.69k |
|---|---|
def _process_pagination_values(request):
size = 20
page = 0
from_ = 0
if "page_size" in request.POST:
size = int(request.POST["page_size"])
max_page_size = getattr(settings, "SEARCH_MAX_PAGE_SIZE", 100)
if not (0 < size <= max_page_size):
raise ValueError(_('Invalid p... | process pagination requests from request parameter |
def microcanonical_statistics_dtype(spanning_cluster=True):
fields = list()
fields.extend([
('n', 'uint32'),
('edge', 'uint32'),
])
if spanning_cluster:
fields.extend([
('has_spanning_cluster', 'bool'),
])
fields.extend([
('max_cluster_size', 'uint... | Return the numpy structured array data type for sample states
Helper function
Parameters
----------
spanning_cluster : bool, optional
Whether to detect a spanning cluster or not.
Defaults to ``True``.
Returns
-------
ret : list of pairs of str
A list of tuples of f... |
def loopless_solution(model, fluxes=None):
if fluxes is None:
sol = model.optimize(objective_sense=None)
fluxes = sol.fluxes
with model:
prob = model.problem
loopless_obj_constraint = prob.Constraint(
model.objective.expression,
lb=-1e32, name="loopless_ob... | Convert an existing solution to a loopless one.
Removes as many loops as possible (see Notes).
Uses the method from CycleFreeFlux [1]_ and is much faster than
`add_loopless` and should therefore be the preferred option to get loopless
flux distributions.
Parameters
----------
model : cobra... |
def is_installed(pkg_name):
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = run("dpkg -s %(pkg_name)s" % locals())
for line in res.splitlines():
if line.startswith("Status: "):
status = line[8:]
if "installed" in statu... | Check if a package is installed. |
def set(self, model, value):
self.validate(value)
self._pop(model)
value = self.serialize(value)
model.tags.append(value) | Set tag on model object. |
def actions(connection):
session = _make_session(connection=connection)
for action in Action.ls(session=session):
click.echo(f'{action.created} {action.action} {action.resource}') | List all actions. |
def _getFuncPtrAndParams(self, funcName):
params = None
if isinstance(funcName, basestring):
if funcName == 'sum':
fp = _aggr_sum
elif funcName == 'first':
fp = _aggr_first
elif funcName == 'last':
fp = _aggr_last
elif funcName == 'mean':
fp = _aggr_mean
... | Given the name of an aggregation function, returns the function pointer
and param.
Parameters:
------------------------------------------------------------------------
funcName: a string (name of function) or funcPtr
retval: (funcPtr, param) |
def supported_versions(django, cms):
cms_version = None
django_version = None
try:
cms_version = Decimal(cms)
except (ValueError, InvalidOperation):
try:
cms_version = CMS_VERSION_MATRIX[str(cms)]
except KeyError:
pass
try:
django_version = Dec... | Convert numeric and literal version information to numeric format |
def write_log(log_path, data, allow_append=True):
append = os.path.isfile(log_path)
islist = isinstance(data, list)
if append and not allow_append:
raise Exception('Appending has been disabled'
' and file %s exists' % log_path)
if not (islist or is... | Writes the supplied specifications to the log path. The data
may be supplied as either as a an Args or as a list of
dictionaries.
By default, specifications will be appropriately appended to
an existing log file. This can be disabled by setting
allow_append to False. |
def walk_up(start_dir, depth=20):
root = start_dir
for i in xrange(depth):
contents = os.listdir(root)
subdirs, files = [], []
for f in contents:
if os.path.isdir(os.path.join(root, f)):
subdirs.append(f)
else:
files.append(f)
... | Walk up a directory tree |
def disconnect(self):
for name, connection in self.items():
if not connection.is_closed():
connection.close() | Disconnect from all databases |
def lock_key(key_name,
stash,
passphrase,
backend):
stash = _get_stash(backend, stash, passphrase)
try:
click.echo('Locking key...')
stash.lock(key_name=key_name)
click.echo('Key locked successfully')
except GhostError as ex:
sys.exit(ex... | Lock a key to prevent it from being deleted, purged or modified
`KEY_NAME` is the name of the key to lock |
def add_business_days(self, days_int, holiday_obj=None):
res = self
if days_int >= 0:
count = 0
while count < days_int:
res = BusinessDate.add_days(res, 1)
if BusinessDate.is_business_day(res, holiday_obj):
count += 1
el... | private method for the addition of business days, used in the addition of a BusinessPeriod only
:param BusinessDate d:
:param int days_int:
:param list holiday_obj:
:return: BusinessDate |
def commit(self):
if self._child_consents:
consents = []
for consent in self._child_consents:
consent.granted = self.granted
consents.append(consent.save() or consent)
return ProxyDataSharingConsent.from_children(self.program_uuid, *consents)
... | Commit a real ``DataSharingConsent`` object to the database, mirroring current field settings.
:return: A ``DataSharingConsent`` object if validation is successful, otherwise ``None``. |
def link_file(self, path, prefixed_path, source_storage):
if prefixed_path in self.symlinked_files:
return self.log("Skipping '%s' (already linked earlier)" % path)
if not self.delete_file(path, prefixed_path, source_storage):
return
source_path = source_storage.path(path... | Attempt to link ``path`` |
def set_user_method(self, user_methods, forced=False):
r
if isinstance(user_methods, str):
user_methods = [user_methods]
self.user_methods = user_methods
self.forced = forced
if set(self.user_methods).difference(self.all_methods):
raise Exception("One of t... | r'''Method to set the T, P, and composition dependent property methods
desired for consideration by the user. Can be used to exclude certain
methods which might have unacceptable accuracy.
As a side effect, the previously selected method is removed when
this method is called to ensure... |
def run_multiple(self, eventLoops):
self.nruns += len(eventLoops)
return self.communicationChannel.put_multiple(eventLoops) | run the event loops in the background.
Args:
eventLoops (list): a list of event loops to run |
def verify_signed_jwt_with_certs(jwt, certs, audience=None):
jwt = _helpers._to_bytes(jwt)
if jwt.count(b'.') != 2:
raise AppIdentityError(
'Wrong number of segments in token: {0}'.format(jwt))
header, payload, signature = jwt.split(b'.')
message_to_sign = header + b'.' + payload
... | Verify a JWT against public certs.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
jwt: string, A JWT.
certs: dict, Dictionary where values of public keys in PEM format.
audience: string, The audience, 'aud', that this JWT should contain. If
No... |
def quad_2d(width, height, xpos=0.0, ypos=0.0) -> VAO:
pos = numpy.array([
xpos - width / 2.0, ypos + height / 2.0, 0.0,
xpos - width / 2.0, ypos - height / 2.0, 0.0,
xpos + width / 2.0, ypos - height / 2.0, 0.0,
xpos - width / 2.0, ypos + height / 2.0, 0.0,
xpos + width / 2.... | Creates a 2D quad VAO using 2 triangles with normals and texture coordinates.
Args:
width (float): Width of the quad
height (float): Height of the quad
Keyword Args:
xpos (float): Center position x
ypos (float): Center position y
Returns:
A :py:class:`demosys.openg... |
def convert(self, value, param, ctx):
if not isinstance(value, str):
return value
if isinstance(value, six.binary_type):
value = value.decode('UTF-8')
if value.startswith('@'):
filename = os.path.expanduser(value[1:])
file_obj = super(Variables, se... | Return file content if file, else, return value as-is |
def shell(self):
r = self.local_renderer
if '@' in self.genv.host_string:
r.env.shell_host_string = self.genv.host_string
else:
r.env.shell_host_string = '{user}@{host_string}'
r.env.shell_default_dir = self.genv.shell_default_dir_template
r.env.shell_inte... | Opens a Django focussed Python shell.
Essentially the equivalent of running `manage.py shell`. |
def run(self):
descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
self._experimentDir)
expIface = helpers.getExperimentDescriptionInterfaceFromModule(
descriptionPyModule)
expIface.normalizeStreamSources()
modelDescription = expIface.getModelDescription()
self._modelCo... | Runs the OPF Model
Parameters:
-------------------------------------------------------------------------
retval: (completionReason, completionMsg)
where completionReason is one of the ClientJobsDAO.CMPL_REASON_XXX
equates. |
def overdrive(self, gain_db=20.0, colour=20.0):
if not is_number(gain_db):
raise ValueError('db_level must be a number.')
if not is_number(colour):
raise ValueError('colour must be a number.')
effect_args = [
'overdrive',
'{:f}'.format(gain_db),
... | Apply non-linear distortion.
Parameters
----------
gain_db : float, default=20
Controls the amount of distortion (dB).
colour : float, default=20
Controls the amount of even harmonic content in the output (dB). |
def unregister_hook(self, func):
if func in self.hooks:
self.hooks.remove(func) | Unregisters a hook. For further explanation, please have a look at ``register_hook``. |
def _format_task_name(job_id, task_id, task_attempt):
docker_name = '%s.%s' % (job_id, 'task' if task_id is None else task_id)
if task_attempt is not None:
docker_name += '.' + str(task_attempt)
return 'dsub-{}'.format(_convert_suffix_to_docker_chars(docker_name)) | Create a task name from a job-id, task-id, and task-attempt.
Task names are used internally by dsub as well as by the docker task runner.
The name is formatted as "<job-id>.<task-id>[.task-attempt]". Task names
follow formatting conventions allowing them to be safely used as a docker
name.
Args:
job_id:... |
def convert(self, value, param, ctx):
choice = super(MappedChoice, self).convert(value, param, ctx)
ix = self.choices.index(choice)
return self.actual_choices[ix] | Match against the appropriate choice value using the superclass
implementation, and then return the actual choice. |
def make_user_agent(component=None):
packageinfo = pkg_resources.require("harvestingkit")[0]
useragent = "{0}/{1}".format(packageinfo.project_name, packageinfo.version)
if component is not None:
useragent += " {0}".format(component)
return useragent | create string suitable for HTTP User-Agent header |
def _adaptSegment(cls, connections, segment, prevActiveCells,
permanenceIncrement, permanenceDecrement):
synapsesToDestroy = []
for synapse in connections.synapsesForSegment(segment):
permanence = synapse.permanence
if binSearch(prevActiveCells, synapse.presynapticCell) != -1:
... | Updates synapses on segment.
Strengthens active synapses; weakens inactive synapses.
:param connections: (Object) Connections instance for the tm
:param segment: (int) Segment to adapt
:param prevActiveCells: (list) Active cells in `t-1`
:param permanenceIncrement: ... |
def check_link_tag(self):
node = self.article.raw_doc
meta = self.parser.getElementsByTag(node, tag='link', attr='rel', value='image_src')
for item in meta:
src = self.parser.getAttribute(item, attr='href')
if src:
return self.get_image(src, extraction_typ... | \
checks to see if we were able to
find open link_src on this page |
def add_path_object(self, *args):
for obj in args:
obj.bundle = self
self.files.append(obj) | Add custom path objects
:type: path_object: static_bundle.paths.AbstractPath |
def orient_undirected_graph(self, data, umg, alg='HC'):
warnings.warn("The pairwise GNN model is computed on each edge of the UMG "
"to initialize the model and start CGNN with a DAG")
gnn = GNN(nh=self.nh, lr=self.lr)
og = gnn.orient_graph(data, umg, nb_runs=self.nb_runs, ... | Orient the undirected graph using GNN and apply CGNN to improve the graph.
Args:
data (pandas.DataFrame): Observational data on which causal
discovery has to be performed.
umg (nx.Graph): Graph that provides the skeleton, on which the GNN
then the CGNN algo... |
def kvectors(self, norm=False, form='broadcast', real=False, shift=False):
if norm is False:
norm = 1
if norm is True:
norm = np.array(self.shape)
norm = aN(norm, self.dim, dtype='float')
v = list(np.fft.fftfreq(self.shape[i])/norm[i] for i in range(self.dim))
... | Return the kvectors associated with this tile, given the standard form
of -0.5 to 0.5. `norm` and `form` arguments arethe same as that passed to
`Tile.coords`.
Parameters
-----------
real : boolean
whether to return kvectors associated with the real fft instead |
def get_user(self, id = None):
if not id:
id = self._user.id
if id not in self._users:
self._users[id] = self._user if id == self._user.id else User(self, id)
return self._users[id] | Get user.
Returns:
:class:`User`. User |
def startprocessmonitor(self, process_name, interval=2):
if process_name in self._process_stats:
self._process_stats[process_name].stop()
self._process_stats[process_name] = ProcessStats(process_name, interval)
self._process_stats[process_name].start()
return 1 | Start memory and CPU monitoring, with the time interval between
each process scan
@param process_name: Process name, ex: firefox-bin.
@type process_name: string
@param interval: Time interval between each process scan
@type interval: double
@return: 1 on success
... |
def update(ctx, name, description, tags):
user, project_name, _build = get_build_or_local(ctx.obj.get('project'), ctx.obj.get('build'))
update_dict = {}
if name:
update_dict['name'] = name
if description:
update_dict['description'] = description
tags = validate_tags(tags)
if tags... | Update build.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon build -b 2 update --description="new description for my build"
``` |
def get_address_coords(self, address):
url = "https://maps.googleapis.com/maps/api/geocode/json?&address=" + address
r = requests.get(url)
r.raise_for_status()
results = r.json()['results']
lat = results[0]['geometry']['location']['lat']
lng = results[0]['geometry']['loca... | Use the google geocoder to get latitude and longitude for an address string
Args:
address: any address string
Returns:
A tuple of (lat,lng) |
def _execute(self, command, data=None, unpack=True):
if not data:
data = {}
data.setdefault('element_id', self.element_id)
return self._driver._execute(command, data, unpack) | Private method to execute command with data.
Args:
command(Command): The defined command.
data(dict): The uri variable and body.
Returns:
The unwrapped value field in the json response. |
def _mouse_pointer_moved(self, x, y):
self._namespace['MOUSEX'] = x
self._namespace['MOUSEY'] = y | GUI callback for mouse moved |
def safe_repr(obj):
try:
obj_repr = repr(obj)
except:
obj_repr = "({0}<{1}> repr error)".format(type(obj), id(obj))
return obj_repr | Returns a repr of an object and falls back to a minimal representation of type and ID if the call to repr raised
an error.
:param obj: object to safe repr
:returns: repr string or '(type<id> repr error)' string
:rtype: str |
def Ra(L: float, Ts: float, Tf: float, alpha: float, beta: float, nu: float
) -> float:
return g * beta * (Ts - Tinf) * L**3.0 / (nu * alpha) | Calculate the Ralleigh number.
:param L: [m] heat transfer surface characteristic length.
:param Ts: [K] heat transfer surface temperature.
:param Tf: [K] bulk fluid temperature.
:param alpha: [m2/s] fluid thermal diffusivity.
:param beta: [1/K] fluid coefficient of thermal expansion.
:param nu... |
def _getDistances(self, inputPattern, partitionId=None):
if not self._finishedLearning:
self.finishLearning()
self._finishedLearning = True
if self._vt is not None and len(self._vt) > 0:
inputPattern = numpy.dot(self._vt, inputPattern - self._mean)
sparseInput = self._sparsifyVector(inputP... | Return the distances from inputPattern to all stored patterns.
:param inputPattern The pattern from which distances to all other patterns
are returned
:param partitionId If provided, ignore all training vectors with this
partitionId. |
def discover(cls, *args, **kwargs):
file = os.path.join(Cache.cache_dir, Cache.cache_name)
return cls.from_file(file, *args, **kwargs) | Make a guess about the cache file location an try loading it. |
def launchAppByBundleId(bundleID):
ws = AppKit.NSWorkspace.sharedWorkspace()
r = ws.launchAppWithBundleIdentifier_options_additionalEventParamDescriptor_launchIdentifier_(
bundleID,
AppKit.NSWorkspaceLaunchAllowingClassicStartup,
AppKit.NSAppleEventDescriptor.nullDesc... | Launch the application with the specified bundle ID |
def fix_journal_name(journal, knowledge_base):
if not journal:
return '', ''
if not knowledge_base:
return journal, ''
if len(journal) < 2:
return journal, ''
volume = ''
if (journal[-1] <= 'Z' and journal[-1] >= 'A') \
and (journal[-2] == '.' or journal[-2] == ' ... | Convert journal name to Inspire's short form. |
def json_doc_to_xml(json_obj, lang='en', custom_namespace=None):
if 'meta' not in json_obj:
raise Exception("This function requires a conforming Open511 JSON document with a 'meta' section.")
json_obj = dict(json_obj)
meta = json_obj.pop('meta')
elem = get_base_open511_element(lang=lang, version... | Converts a Open511 JSON document to XML.
lang: the appropriate language code
Takes a dict deserialized from JSON, returns an lxml Element.
Accepts only the full root-level JSON object from an Open511 response. |
def get_shares(self):
self.shares = self.api.get(url=PATHS['GET_SHARES'] % self.url)['shares']
return self.shares | Returns an object with a the numbers of shares a link has had using
Buffer.
www will be stripped, but other subdomains will not. |
def run(self):
config = config_creator()
debug = config.debug
branch_thread_sleep = config.branch_thread_sleep
while 1:
url = self.branch_queue.get()
if debug:
print('branch thread-{} start'.format(url))
branch_spider = self.branch_spid... | run your main spider here
as for branch spider result data, you can return everything or do whatever with it
in your own code
:return: None |
def _named_stream(self, name, binary=False):
with self._store.save_stream(self._named_key(name), binary=binary) as s:
yield s | Create an indexed output stream i.e. 'test_00000001.name'
:param name: Identifier for the stream
:return: A context-managed stream-like object |
def has_env_vars(*env_vars):
for env_var in env_vars:
if not os.environ.get(env_var):
msg = (
'Must set {} environment variable. View docs for setting up environment at {}'
).format(env_var, temple.constants.TEMPLE_DOCS_URL)
raise temple.exceptions.Invalid... | Raises `InvalidEnvironmentError` when one isnt set |
def open(self, input_id, **kwargs):
if not isinstance(input_id, str):
return input_id.open(self.tile, **kwargs)
if input_id not in self.params["input"]:
raise ValueError("%s not found in config as input file" % input_id)
return self.params["input"][input_id].open(self.til... | Open input data.
Parameters
----------
input_id : string
input identifier from configuration file or file path
kwargs : driver specific parameters (e.g. resampling)
Returns
-------
tiled input data : InputTile
reprojected input data withi... |
def as_dict(self):
self_as_dict = dict()
self_as_dict['sequence'] = self.sequence
if hasattr(self, 'frequency'):
self_as_dict['frequency'] = self.frequency
return self_as_dict | Return Allele data as dict object. |
def attrnum(self, attr):
"Returns the number used for attr, which can be a name, or -n .. n-1."
if attr < 0:
return len(self.attrs) + attr
elif isinstance(attr, str):
return self.attrnames.index(attr)
else:
return attr | Returns the number used for attr, which can be a name, or -n .. n-1. |
def _deleteRecordsFromKNN(self, recordsToDelete):
classifier = self.htm_prediction_model._getAnomalyClassifier()
knn = classifier.getSelf()._knn
prototype_idx = classifier.getSelf().getParameter('categoryRecencyList')
idsToDelete = [r.ROWID for r in recordsToDelete if \
not r.setByUser and r.ROWID... | This method will remove the given records from the classifier.
parameters
------------
recordsToDelete - list of records to delete from the classififier |
def get_publication_list(context, list, template='publications/publications.html'):
list = List.objects.filter(list__iexact=list)
if not list:
return ''
list = list[0]
publications = list.publication_set.all()
publications = publications.order_by('-year', '-month', '-id')
if not publications:
return ''
popul... | Get a publication list. |
def id_by_name(self, hostname):
addr = self._gethostbyname(hostname)
return self.id_by_addr(addr) | Returns the database ID for specified hostname.
The id might be useful as array index. 0 is unknown.
:arg hostname: Hostname to get ID from. |
def default_content_filter(sender, instance, **kwargs):
if kwargs['created'] and not instance.content_filter:
instance.content_filter = get_default_catalog_content_filter()
instance.save() | Set default value for `EnterpriseCustomerCatalog.content_filter` if not already set. |
def optimize_wsgi_processes(self):
r = self.local_renderer
r.env.wsgi_server_memory_gb = 8
verbose = self.verbose
all_sites = list(self.iter_sites(site=ALL, setter=self.set_site_specifics)) | Based on the number of sites per server and the number of resources on the server,
calculates the optimal number of processes that should be allocated for each WSGI site. |
def missing(data, *args):
not_found = object()
if args and isinstance(args[0], list):
args = args[0]
ret = []
for arg in args:
if get_var(data, arg, not_found) is not_found:
ret.append(arg)
return ret | Implements the missing operator for finding missing variables. |
def get_subgraph_by_node_search(graph: BELGraph, query: Strings) -> BELGraph:
nodes = search_node_names(graph, query)
return get_subgraph_by_induction(graph, nodes) | Get a sub-graph induced over all nodes matching the query string.
:param graph: A BEL Graph
:param query: A query string or iterable of query strings for node names
Thinly wraps :func:`search_node_names` and :func:`get_subgraph_by_induction`. |
def _chooseCellsToLearnFrom(self, c, i, s, n, activeState):
if n <= 0:
return []
tmpCandidates = numpy.where(activeState == 1)
if len(tmpCandidates[0]) == 0:
return []
if s is None:
cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1])]
else:
synapsesAlreadyInSegmen... | Choose n random cells to learn from.
This function is called several times while learning with timeStep = t-1, so
we cache the set of candidates for that case. It's also called once with
timeStep = t, and we cache that set of candidates.
:returns: tuple (column index, cell index). |
def _create_air():
name = "Air"
namel = name.lower()
mm = 28.9645
ds_dict = _create_ds_dict([
"dataset-air-lienhard2015",
"dataset-air-lienhard2018"])
active_ds = "dataset-air-lienhard2018"
model_dict = {
"rho": IgRhoT(mm, 101325.0),
"beta": IgBetaT()}
model_t... | Create a dictionary of datasets and a material object for air.
:return: (Material, {str, DataSet}) |
def clip_out_of_image(self):
polys_cut = [
poly.clip_out_of_image(self.shape)
for poly
in self.polygons
if poly.is_partly_within_image(self.shape)
]
polys_cut_flat = [poly for poly_lst in polys_cut for poly in poly_lst]
return PolygonsOnIma... | Clip off all parts from all polygons that are outside of the image.
NOTE: The result can contain less polygons than the input did. That
happens when a polygon is fully outside of the image plane.
NOTE: The result can also contain *more* polygons than the input
did. That happens when di... |
def get_subfields(self, datafield, subfield, i1=None, i2=None,
exception=False):
if len(datafield) != 3:
raise ValueError(
"`datafield` parameter have to be exactly 3 chars long!"
)
if len(subfield) != 1:
raise ValueError(
... | Return content of given `subfield` in `datafield`.
Args:
datafield (str): Section name (for example "001", "100", "700").
subfield (str): Subfield name (for example "a", "1", etc..).
i1 (str, default None): Optional i1/ind1 parameter value, which
will be used... |
def sign_sha256(key, msg):
if isinstance(msg, text_type):
msg = msg.encode('utf-8')
return hmac.new(key, msg, hashlib.sha256).digest() | Generate an SHA256 HMAC, encoding msg to UTF-8 if not
already encoded.
key -- signing key. bytes.
msg -- message to sign. unicode or bytes. |
def rectangle(cls, vertices, **kwargs):
bottom_left, top_right = vertices
top_left = [bottom_left[0], top_right[1]]
bottom_right = [top_right[0], bottom_left[1]]
return cls([bottom_left, bottom_right, top_right, top_left], **kwargs) | Shortcut for creating a rectangle aligned with the screen axes from only two corners.
Parameters
----------
vertices : array-like
An array containing the ``[x, y]`` positions of two corners.
kwargs
Other keyword arguments are passed to the |Shape| constructor. |
def subroute(self, *components):
def _factory(f):
self._addRoute(f, subroute(*components))
return f
return _factory | See `txspinneret.route.subroute`.
This decorator can be stacked with itself to specify multiple routes
with a single handler. |
def play(args):
if args[0].lower() != "play":
args.insert(0, "play")
else:
args[0] = "play"
try:
logger.info("Executing: %s", " ".join(args))
process_handle = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
status = process... | Pass an argument list to play.
Parameters
----------
args : iterable
Argument list for play. The first item can, but does not
need to, be 'play'.
Returns:
--------
status : bool
True on success. |
def call_command(cmd_name, args_strings):
if not op.isabs(cmd_name):
cmd_fullpath = which(cmd_name)
else:
cmd_fullpath = cmd_name
try:
cmd_line = [cmd_fullpath] + args_strings
log.info('Calling: {}.'.format(cmd_line))
retval = subprocess.check_call(cmd_line)
excep... | Call CLI command with arguments and returns its return value.
Parameters
----------
cmd_name: str
Command name or full path to the binary file.
arg_strings: list of str
Argument strings list.
Returns
-------
return_value
Command return value. |
def get_img_info(image):
try:
img = check_img(image)
except Exception as exc:
raise Exception('Error reading file {0}.'.format(repr_imgs(image))) from exc
else:
return img.get_header(), img.get_affine() | Return the header and affine matrix from a Nifti file.
Parameters
----------
image: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a p... |
def post(self, pid, record):
uploaded_file = request.files['file']
key = secure_filename(
request.form.get('name') or uploaded_file.filename
)
if key in record.files:
raise FileAlreadyExists()
record.files[key] = uploaded_file.stream
record.commit(... | Handle POST deposit files.
Permission required: `update_permission_factory`.
:param pid: Pid object (from url).
:param record: Record object resolved from the pid. |
def run(self):
if self.args.roster_cache and os.path.exists(self.args.roster_cache):
logging.info(u"Loading roster from {0!r}"
.format(self.args.roster_cache))
try:
self.client.roster_client.load_roster(self.args.roster_cache)
... | Request client connection and start the main loop. |
def find_serial_devices(self):
if self.devices is not None:
return self.devices
self.devices = {}
hardware_id = "(?i)" + self.hardware_id
for ports in serial.tools.list_ports.grep(hardware_id):
port = ports[0]
try:
id = self.get_device_... | Scan and report all compatible serial devices on system.
:returns: List of discovered devices |
def reaction_weight(reaction):
if len(reaction.metabolites) != 1:
raise ValueError('Reaction weight is only defined for single '
'metabolite products or educts.')
met, coeff = next(iteritems(reaction.metabolites))
return [coeff * met.formula_weight] | Return the metabolite weight times its stoichiometric coefficient. |
def swap_buffers(self):
if not self.window.context:
return
self.frames += 1
self.window.flip()
self.window.dispatch_events() | Swap buffers, increment frame counter and pull events |
def abs_img(img):
bool_img = np.abs(read_img(img).get_data())
return bool_img.astype(int) | Return an image with the binarised version of the data of `img`. |
def Async(cls, token, session=None, **options):
return cls(token, session=session, is_async=True, **options) | Returns the client in async mode. |
def fillScreen(self, color=None):
md.fill_rect(self.set, 0, 0, self.width, self.height, color) | Fill the matrix with the given RGB color |
def _mask_data(self, data):
self._check_for_mask()
msk_data = self.mask.get_data()
if self.ndim == 3:
return data[msk_data], np.where(msk_data)
elif self.ndim == 4:
return _apply_mask_to_4d_data(data, self.mask)
else:
raise ValueError('Cannot m... | Return the data masked with self.mask
Parameters
----------
data: np.ndarray
Returns
-------
masked np.ndarray
Raises
------
ValueError if the data and mask dimensions are not compatible.
Other exceptions related to numpy computations. |
def get_sig_string(req, cano_req, scope):
amz_date = req.headers['x-amz-date']
hsh = hashlib.sha256(cano_req.encode())
sig_items = ['AWS4-HMAC-SHA256', amz_date, scope, hsh.hexdigest()]
sig_string = '\n'.join(sig_items)
return sig_string | Generate the AWS4 auth string to sign for the request.
req -- Requests PreparedRequest object. This should already
include an x-amz-date header.
cano_req -- The Canonical Request, as returned by
get_canonical_request() |
def set_topology_context(self, metrics_collector):
Log.debug("Setting topology context")
cluster_config = self.get_topology_config()
cluster_config.update(self._get_dict_from_config(self.my_component.config))
task_to_component_map = self._get_task_to_comp_map()
self.context = TopologyContextImpl(clu... | Sets a new topology context |
def get_finder(import_path):
Finder = import_string(import_path)
if not issubclass(Finder, BaseFileSystemFinder):
raise ImproperlyConfigured('Finder {} is not a subclass of core.finders.FileSystemFinder'.format(import_path))
return Finder() | Get a finder class from an import path.
Raises ``demosys.core.exceptions.ImproperlyConfigured`` if the finder is not found.
This function uses an lru cache.
:param import_path: string representing an import path
:return: An instance of the finder |
def get_share(self, group_id, resource_id, depth=1):
response = self._perform_request(
'/um/groups/%s/shares/%s?depth=%s'
% (group_id, resource_id, str(depth)))
return response | Retrieves a specific resource share available to a group.
:param group_id: The unique ID of the group.
:type group_id: ``str``
:param resource_id: The unique ID of the resource.
:type resource_id: ``str``
:param depth: The depth of the response data.... |
def is_course_in_catalog(self, catalog_id, course_id):
try:
course_run_id = str(CourseKey.from_string(course_id))
except InvalidKeyError:
course_run_id = None
endpoint = self.client.catalogs(catalog_id).contains
if course_run_id:
resp = endpoint.get(co... | Determine if the given course or course run ID is contained in the catalog with the given ID.
Args:
catalog_id (int): The ID of the catalog
course_id (str): The ID of the course or course run
Returns:
bool: Whether the course or course run is contained in the given ... |
def check_array(array, accept_sparse=None, dtype=None, order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False):
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dty... | Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representi... |
def compute_alpha(x):
threshold = _compute_threshold(x)
alpha1_temp1 = tf.where(tf.greater(x, threshold), x, tf.zeros_like(x, tf.float32))
alpha1_temp2 = tf.where(tf.less(x, -threshold), x, tf.zeros_like(x, tf.float32))
alpha_array = tf.add(alpha1_temp1, alpha1_temp2, name=None)
alpha_array_abs = tf... | Computing the scale parameter. |
def read(cls, proto):
anomalyLikelihood = object.__new__(cls)
anomalyLikelihood._iteration = proto.iteration
anomalyLikelihood._historicalScores = collections.deque(
maxlen=proto.historicWindowSize)
for i, score in enumerate(proto.historicalScores):
anomalyLikelihood._historicalScores.append... | capnp deserialization method for the anomaly likelihood object
:param proto: (Object) capnp proto object specified in
nupic.regions.anomaly_likelihood.capnp
:returns: (Object) the deserialized AnomalyLikelihood object |
async def connect(self):
if not self._consumer:
waiter = self._waiter = asyncio.Future()
try:
address = self._websocket_host()
self.logger.info('Connect to %s', address)
self._consumer = await self.http.get(address)
if self.... | Connect to a Pusher websocket |
def stop_scan(self, timeout_sec=TIMEOUT_SEC):
self._scan_stopped.clear()
self._adapter.StopDiscovery()
if not self._scan_stopped.wait(timeout_sec):
raise RuntimeError('Exceeded timeout waiting for adapter to stop scanning!') | Stop scanning for BLE devices with this adapter. |
def _getBundleId(self):
ra = AppKit.NSRunningApplication
app = ra.runningApplicationWithProcessIdentifier_(
self._getPid())
return app.bundleIdentifier() | Return the bundle ID of the application. |
def set_target_angles(self, angles):
j = 0
for joint in self.joints:
velocities = [
ctrl(tgt - cur, self.world.dt) for cur, tgt, ctrl in
zip(joint.angles, angles[j:j+joint.ADOF], joint.controllers)]
joint.velocities = velocities
j += jo... | Move each joint toward a target angle.
This method uses a PID controller to set a target angular velocity for
each degree of freedom in the skeleton, based on the difference between
the current and the target angle for the respective DOF.
PID parameters are by default set to achieve a ... |
def create(self, store_id, product_id, data):
self.store_id = store_id
self.product_id = product_id
if 'id' not in data:
raise KeyError('The product image must have an id')
if 'title' not in data:
raise KeyError('The product image must have a url')
respons... | Add a new image to the product.
:param store_id: The store id.
:type store_id: :py:class:`str`
:param product_id: The id for the product of a store.
:type product_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
... |
def calculate(self, T, method):
r
if method == DUTT_PRASAD:
A, B, C = self.DUTT_PRASAD_coeffs
mu = ViswanathNatarajan3(T, A, B, C, )
elif method == VISWANATH_NATARAJAN_3:
A, B, C = self.VISWANATH_NATARAJAN_3_coeffs
mu = ViswanathNatarajan3(T, A, B,... | r'''Method to calculate low-pressure liquid viscosity at tempearture
`T` with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate viscosity, [K]
... |
def _readNamelist(currentlyIncluding, cache, namFilename, unique_glyphs):
filename = os.path.abspath(os.path.normcase(namFilename))
if filename in currentlyIncluding:
raise NamelistRecursionError(filename)
currentlyIncluding.add(filename)
try:
result = __readNamelist(cache, filename, unique_glyphs)
fi... | Detect infinite recursion and prevent it.
This is an implementation detail of readNamelist.
Raises NamelistRecursionError if namFilename is in the process of being included |
def verifycheck(self, window_name, object_name):
try:
object_handle = self._get_object_handle(window_name, object_name,
wait_for_object=False)
if object_handle.AXValue == 1:
return 1
except LdtpServerException:
... | Verify check item.
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: s... |
def plot_prob_profit_trade(round_trips, ax=None):
x = np.linspace(0, 1., 500)
round_trips['profitable'] = round_trips.pnl > 0
dist = sp.stats.beta(round_trips.profitable.sum(),
(~round_trips.profitable).sum())
y = dist.pdf(x)
lower_perc = dist.ppf(.025)
upper_perc = dist... | Plots a probability distribution for the event of making
a profitable trade.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
ax : matplotlib.Axes, optional
Axes upon which... |
def create(self, campaign_id, data, **queryparams):
self.campaign_id = campaign_id
if 'message' not in data:
raise KeyError('The campaign feedback must have a message')
response = self._mc_client._post(url=self._build_path(campaign_id, 'feedback'), data=data, **queryparams)
i... | Add feedback on a specific campaign.
:param campaign_id: The unique id for the campaign.
:type campaign_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"message": string*
}
:param queryparams: The que... |
def create_nginx_config(self):
cfg = '
if not self._shared_hosting:
if self._user:
cfg += 'user {0};\n'.format(self._user)
cfg += 'worker_processes 1;\nerror_log {0}-errors.log;\n\
pid {1}_ nginx.pid;\n\n'.format(os.path.join(self._log_dir, \
se... | Creates the Nginx configuration for the project |
def get_epochs_given_midtimes_and_period(
t_mid,
period,
err_t_mid=None,
t0_fixed=None,
t0_percentile=None,
verbose=False
):
kwargarr = np.array([isinstance(err_t_mid,np.ndarray),
t0_fixed,
t0_percentile])
if not _... | This calculates the future epochs for a transit, given a period and a
starting epoch
The equation used is::
t_mid = period*epoch + t0
Default behavior if no kwargs are used is to define `t0` as the median
finite time of the passed `t_mid` array.
Only one of `err_t_mid` or `t0_fixed` shou... |
def poll(self):
self.runid_to_return.extend(self.dispatcher.poll())
ret = self._collect_all_finished_pkgidx_result_pairs()
return ret | return pairs of package indices and results of finished tasks
This method does not wait for tasks to finish.
Returns
-------
list
A list of pairs of package indices and results |
def get_evanno_table(self, kvalues, max_var_multiple=0, quiet=False):
if max_var_multiple:
if max_var_multiple < 1:
raise ValueError('max_variance_multiplier must be >1')
table = _get_evanno_table(self, kvalues, max_var_multiple, quiet)
return table | Calculates the Evanno table from results files for tests with
K-values in the input list kvalues. The values lnPK, lnPPK,
and deltaK are calculated. The max_var_multiplier arg can be used
to exclude results files based on variance of the likelihood as a
proxy for convergence.
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.