code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def get_branches(self): return [self._sanitize(branch) for branch in self._git.branch(color="never").splitlines()]
Returns a list of the branches
def _to_json(self, strip, to_serialize=None): if to_serialize is None: to_serialize = copy.copy(self.__dict__) pkcs12_val = to_serialize.get(_PKCS12_KEY) if pkcs12_val is not None: to_serialize[_PKCS12_KEY] = base64.b64encode(pkcs12_val) return super(ServiceAccoun...
Utility function that creates JSON repr. of a credentials object. Over-ride is needed since PKCS#12 keys will not in general be JSON serializable. Args: strip: array, An array of names of members to exclude from the JSON. to_serialize: dict, (Optional...
def insort_right(a, x, lo=0, hi=None): if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if x < a[mid]: hi = mid else: lo = mid+1 a.insert(lo, x)
Insert item x in list a, and keep it sorted assuming a is sorted. If x is already in a, insert it to the right of the rightmost x. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched.
def get_environments(self): response = self.ebs.describe_environments(application_name=self.app_name, include_deleted=False) return response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments']
Returns the environments
def forms_valid(self, form, inlines): response = self.form_valid(form) for formset in inlines: formset.save() return response
If the form and formsets are valid, save the associated models.
def _parse_csv_header_lcc_csv_v1(headerlines): commentchar = headerlines[1] separator = headerlines[2] headerlines = [x.lstrip('%s ' % commentchar) for x in headerlines[3:]] metadatastart = headerlines.index('OBJECT METADATA') columnstart = headerlines.index('COLUMN DEFINITIONS') lcstart = heade...
This parses the header of the LCC CSV V1 LC format.
def _dispatch_coroutine(self, event, listener, *args, **kwargs): try: coro = listener(*args, **kwargs) except Exception as exc: if event == self.LISTENER_ERROR_EVENT: raise return self.emit(self.LISTENER_ERROR_EVENT, event, listener, exc) async...
Schedule a coroutine for execution. Args: event (str): The name of the event that triggered this call. listener (async def): The async def that needs to be executed. *args: Any number of positional arguments. **kwargs: Any number of keyword arguments. Th...
def __create_dashboard_menu(self, dash_menu, kibiter_major): logger.info("Adding dashboard menu") if kibiter_major == "6": menu_resource = ".kibana/doc/metadashboard" mapping_resource = ".kibana/_mapping/doc" mapping = {"dynamic": "true"} menu = {'metadash...
Create the menu definition to access the panels in a dashboard. :param menu: dashboard menu to upload :param kibiter_major: major version of kibiter
def update(ctx, name, description, tags): user, project_name, _job = get_job_or_local(ctx.obj.get('project'), ctx.obj.get('job')) update_dict = {} if name: update_dict['name'] = name if description: update_dict['description'] = description tags = validate_tags(tags) if tags: ...
Update job. Uses [Caching](/references/polyaxon-cli/#caching) Example: \b ```bash $ polyaxon job -j 2 update --description="new description for my job" ```
def encode(cls, hex): out = [] for i in range(len(hex) // 8): word = endian_swap(hex[8*i:8*i+8]) x = int(word, 16) w1 = x % cls.n w2 = (x // cls.n + w1) % cls.n w3 = (x // cls.n // cls.n + w2) % cls.n out += [cls.word_list[w1], cls....
Convert hexadecimal string to mnemonic word representation with checksum.
def retype_file(src, pyi_dir, targets, *, quiet=False, hg=False): with tokenize.open(src) as src_buffer: src_encoding = src_buffer.encoding src_node = lib2to3_parse(src_buffer.read()) try: with open((pyi_dir / src.name).with_suffix('.pyi')) as pyi_file: pyi_txt = pyi_file.rea...
Retype `src`, finding types in `pyi_dir`. Save in `targets`. The file should remain formatted exactly as it was before, save for: - annotations - additional imports needed to satisfy annotations - additional module-level names needed to satisfy annotations Type comments in sources are normalized t...
def PMOVMSKB(cpu, op0, op1): arg0 = op0.read() arg1 = op1.read() res = 0 for i in reversed(range(7, op1.size, 8)): res = (res << 1) | ((arg1 >> i) & 1) op0.write(Operators.EXTRACT(res, 0, op0.size))
Moves byte mask to general-purpose register. Creates an 8-bit mask made up of the most significant bit of each byte of the source operand (second operand) and stores the result in the low byte or word of the destination operand (first operand). The source operand is an MMX(TM) technology or an ...
def trim_display_field(self, value, max_length): if not value: return '' if len(value) > max_length: return value[:max_length - 3] + '...' return value
Return a value for display; if longer than max length, use ellipsis.
def convert(self, schema_node, definition_handler): converted = { 'name': schema_node.name, 'in': self._in, 'required': schema_node.required } if schema_node.description: converted['description'] = schema_node.description if schema_node.def...
Convert node schema into a parameter object.
def get_aggregation_timestamp(self, timestamp, granularity='second'): if granularity is None or granularity.lower() == 'none': return int(timestamp), 1 elif granularity == 'hour': return (int(timestamp) / (3600 * 1000)) * 3600 * 1000, 3600 elif granularity == 'minute': return (int(timestam...
Return a timestamp from the raw epoch time based on the granularity preferences passed in. :param string timestamp: timestamp from the log line :param string granularity: aggregation granularity used for plots. :return: string aggregate_timestamp: timestamp used for metrics aggregation in all functions
def read_yaml_file(path, loader=ExtendedSafeLoader): with open(path) as fh: return load(fh, loader)
Open a file, read it and return its contents.
def copy_groups_to_folder(dicom_groups, folder_path, groupby_field_name): if dicom_groups is None or not dicom_groups: raise ValueError('Expected a boyle.dicom.sets.DicomFileSet.') if not os.path.exists(folder_path): os.makedirs(folder_path, exist_ok=False) for dcmg in dicom_groups: ...
Copy the DICOM file groups to folder_path. Each group will be copied into a subfolder with named given by groupby_field. Parameters ---------- dicom_groups: boyle.dicom.sets.DicomFileSet folder_path: str Path to where copy the DICOM files. groupby_field_name: str DICOM field name. W...
def get(self, batch_id, **queryparams): self.batch_id = batch_id self.operation_status = None return self._mc_client._get(url=self._build_path(batch_id), **queryparams)
Get the status of a batch request. :param batch_id: The unique id for the batch operation. :type batch_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []
def string_to_xml(text): try: return etree.XML(text) except Exception: _logger.error( "Error parsing XML string. " "If lxml is not available, and unicode is involved, then " "installing lxml _may_ solve this issue." ) _logger.error("XML source:...
Convert XML string into etree.Element.
def write_tersoff_potential(parameters): lines = [] for (e1, e2, e3), params in parameters.items(): if len(params) != 14: raise ValueError('tersoff three body potential expects 14 parameters') lines.append(' '.join([e1, e2, e3] + ['{:16.8g}'.format(_) for _ in params])) return '\...
Write tersoff potential file from parameters to string Parameters ---------- parameters: dict keys are tuple of elements with the values being the parameters length 14
def set_mute(self, value=False): mute = (yield from self.handle_set(self.API.get('mute'), int(value))) return bool(mute)
Mute or unmute the device.
async def leave_conversation(self, conv_id): logger.info('Leaving conversation: {}'.format(conv_id)) await self._conv_dict[conv_id].leave() del self._conv_dict[conv_id]
Leave a conversation. Args: conv_id (str): ID of conversation to leave.
def jobGetModelIDs(self, jobID): rows = self._getMatchingRowsWithRetries(self._models, dict(job_id=jobID), ['model_id']) return [r[0] for r in rows]
Fetch all the modelIDs that correspond to a given jobID; empty sequence if none
def _write(self, filename, frames, fps, loop=0, palette=256): from PIL import Image images = [] for f in frames: data = open(f, 'rb').read() images.append(Image.open(io.BytesIO(data))) duration = round(1 / fps, 2) im = images.pop(0) im.save(filenam...
Write a series of frames as a single animated GIF. :param str filename: the name of the GIF file to write :param list frames: a list of filenames, each of which represents a single frame of the animation. Each frame must have exactly the same dimensions, and the code has only ...
def bootstrap_paginate(parser, token): bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError("'%s' takes at least one argument" " (Page object reference)" % bits[0]) page = parser.compile_filter(bits[1]) kwargs = {} bits = bits[2:] kw...
Renders a Page object as a Twitter Bootstrap styled pagination bar. Compatible with Bootstrap 3.x and 4.x only. Example:: {% bootstrap_paginate page_obj range=10 %} Named Parameters:: range - The size of the pagination bar (ie, if set to 10 then, at most, 10 page numbers...
def calc_list_average(l): total = 0.0 for value in l: total += value return total / len(l)
Calculates the average value of a list of numbers Returns a float
def check_glfw_version(self): print("glfw version: {} (python wrapper version {})".format(glfw.get_version(), glfw.__version__)) if glfw.get_version() < self.min_glfw_version: raise ValueError("Please update glfw binaries to version {} or later".format(self.min_glfw_version))
Ensure glfw library version is compatible
def unlock(self, pwd): if self.store.is_encrypted(): return self.store.unlock(pwd)
Unlock the wallet database
def generate_rt_pic(process_data, para_meter, scale): pic_path = para_meter['filename'] + '.png' plt.figure(figsize=(5.6 * scale, 3.2 * scale)) for key in process_data.keys(): plt.plot(process_data[key][:, 0], process_data[key][:, 1], label=str(key)) plt.title(para_meter['title']) plt....
generate rater pic
def decode_timestamp(data: str) -> datetime.datetime: year = 2000 + int(data[0:2]) month = int(data[2:4]) day = int(data[4:6]) hour = int(data[6:8]) minute = int(data[8:10]) second = int(data[10:12]) if minute == 60: minute = 0 hour += 1 return datetime.datetime(year=year...
Decode timestamp using bespoke decoder. Cannot use simple strptime since the ness panel contains a bug that P199E zone and state updates emitted on the hour cause a minute value of `60` to be sent, causing strptime to fail. This decoder handles this edge case.
def render(node, strict=False): if isinstance(node, list): return render_list(node) elif isinstance(node, dict): return render_node(node, strict=strict) else: raise NotImplementedError("You tried to render a %s. Only list and dicts can be rendered." % node.__class__.__name__)
Recipe to render a given FST node. The FST is composed of branch nodes which are either lists or dicts and of leaf nodes which are strings. Branch nodes can have other list, dict or leaf nodes as childs. To render a string, simply output it. To render a list, render each of its elements in order. ...
def getCentreAndSpreadOffsets(spaceShape, spreadShape, stepSize=1): from nupic.math.cross import cross shape = spaceShape if shape[0] == 1 and shape[1] == 1: centerOffsets = [(0,0)] else: xMin = -1 * (shape[1] // 2) xMax = xMin + shape[1] -...
Generates centre offsets and spread offsets for block-mode based training regimes - star, cross, block. Parameters: ----------------------------------------------- spaceShape: The (height, width) of the 2-D space to explore. This sets the number of center-points. spreadShape: The...
def _loop_timeout_cb(self, main_loop): self._anything_done = True logger.debug("_loop_timeout_cb() called") main_loop.quit()
Stops the loop after the time specified in the `loop` call.
def symlink(real_path, link_path, overwrite=False, verbose=0): path = normpath(real_path) link = normpath(link_path) if not os.path.isabs(path): if _can_symlink(): path = os.path.relpath(path, os.path.dirname(link)) else: path = os.path.abspath(path) if verbose: ...
Create a symbolic link. This will work on linux or windows, however windows does have some corner cases. For more details see notes in `ubelt._win32_links`. Args: path (PathLike): path to real file or directory link_path (PathLike): path to desired location for symlink overwrite (b...
def contrast(self, amount=75): if not is_number(amount) or amount < 0 or amount > 100: raise ValueError('amount must be a number between 0 and 100.') effect_args = ['contrast', '{:f}'.format(amount)] self.effects.extend(effect_args) self.effects_log.append('contrast') ...
Comparable with compression, this effect modifies an audio signal to make it sound louder. Parameters ---------- amount : float Amount of enhancement between 0 and 100. See Also -------- compand, mcompand
def unquote(s): if len(s) > 1: if s.startswith('"') and s.endswith('"'): return s[1:-1].replace('\\\\', '\\').replace('\\"', '"') if s.startswith('<') and s.endswith('>'): return s[1:-1] return s
Remove quotes from a string.
def train(self, debug=True, force=False, single_thread=False, timeout=20): if not self.must_train and not force: return self.padaos.compile() self.train_thread = Thread(target=self._train, kwargs=dict( debug=debug, single_thread=single_thread, time...
Trains all the loaded intents that need to be updated If a cache file exists with the same hash as the intent file, the intent will not be trained and just loaded from file Args: debug (bool): Whether to print a message to stdout each time a new intent is trained force (...
def _blocks(self, name): i = len(self) while i >= 0: i -= 1 if name in self[i]['__names__']: for b in self[i]['__blocks__']: r = b.raw() if r and r == name: return b else: ...
Inner wrapper to search for blocks by name.
def _pop(self, model): tags = [] for tag in model.tags: if self.is_tag(tag): tags.append(tag) if tags: for tag in tags: model.tags.remove(tag) return tags
Pop all matching tags off the model and return them.
def get_program_type_by_slug(self, slug): return self._load_data( self.PROGRAM_TYPES_ENDPOINT, resource_id=slug, default=None, )
Get a program type by its slug. Arguments: slug (str): The slug to identify the program type. Returns: dict: A program type object.
def _check_field_validity(field): if type(field) not in (list, tuple): raise InvenioBibRecordFieldError( "Field of type '%s' should be either " "a list or a tuple." % type(field)) if len(field) != 5: raise InvenioBibRecordFieldError( "Field of length '%d' shou...
Check if a field is well-formed. :param field: A field tuple as returned by create_field() :type field: tuple :raise InvenioBibRecordFieldError: If the field is invalid.
def associate_notification_template(self, job_template, notification_template, status): return self._assoc('notification_templates_%s' % status, job_template, notification_template)
Associate a notification template from this job template. =====API DOCS===== Associate a notification template from this job template. :param job_template: The job template to associate to. :type job_template: str :param notification_template: The notification template to be as...
def list_namespaces(): print('{:30s}\t{:40s}'.format('NAME', 'DESCRIPTION')) print('-' * 78) for sch in sorted(__NAMESPACE__): desc = __NAMESPACE__[sch]['description'] desc = (desc[:44] + '..') if len(desc) > 46 else desc print('{:30s}\t{:40s}'.format(sch, desc))
Print out a listing of available namespaces
def _check_for_inception(self, root_dict): for key in root_dict: if isinstance(root_dict[key], dict): root_dict[key] = ResponseObject(root_dict[key]) return root_dict
Used to check if there is a dict in a dict
def union(self, a, b): s1, s2 = self.find(a), self.find(b) if s1 != s2: r1, r2 = self._rank[s1], self._rank[s2] if r2 > r1: r1, r2 = r2, r1 s1, s2 = s2, s1 if r1 == r2: self._rank[s1] += 1 self._leader[s2] =...
Merges the set that contains ``a`` with the set that contains ``b``. Parameters ---------- a, b : objects Two objects whose sets are to be merged.
def _StructMessageToJsonObject(message, unused_including_default=False): fields = message.fields ret = {} for key in fields: ret[key] = _ValueMessageToJsonObject(fields[key]) return ret
Converts Struct message according to Proto3 JSON Specification.
def add_torque(self, torque, relative=False): op = self.ode_body.addRelTorque if relative else self.ode_body.addTorque op(torque)
Add a torque to this body. Parameters ---------- force : 3-tuple of float A vector giving the torque along each world or body coordinate axis. relative : bool, optional If False, the torque values are assumed to be given in the world coordinate frame....
def request(self, path, method=None, data={}): if not path: raise ValueError('Invalid path parameter') if method and method not in ['GET', 'POST', 'DELETE', 'PUT']: raise NotImplementedError( 'HTTP %s method not implemented' % method) if path[0] == '/': ...
sends a request and gets a response from the Plivo REST API path: the URL (relative to the endpoint URL, after the /v1 method: the HTTP method to use, defaults to POST data: for POST or PUT, a dict of data to send returns Plivo response in XML or raises an exception on error
def set_sla(obj, metric, sub_metric, rules): if not hasattr(obj, 'sla_map'): return False rules_list = rules.split() for rule in rules_list: if '<' in rule: stat, threshold = rule.split('<') sla = SLA(metric, sub_metric, stat, threshold, 'lt') elif '>' in rule: stat, threshold = rule...
Extract SLAs from a set of rules
def parse_pylint_output(pylint_output): for line in pylint_output: if not line.strip(): continue if line[0:5] in ("-"*5, "*"*5): continue parsed = PYLINT_PARSEABLE_REGEX.search(line) if parsed is None: LOG.warning( u"Unable to parse...
Parse the pylint output-format=parseable lines into PylintError tuples.
def getClosest(self, inputPattern, topKCategories=3): inferenceResult = numpy.zeros(max(self._categoryList)+1) dist = self._getDistances(inputPattern) sorted = dist.argsort() validVectorCount = len(self._categoryList) - self._categoryList.count(-1) for j in sorted[:min(self.k, validVectorCount)]: ...
Returns the index of the pattern that is closest to inputPattern, the distances of all patterns to inputPattern, and the indices of the k closest categories.
def solr_advanced_search(self, query, token=None, limit=20): parameters = dict() parameters['query'] = query parameters['limit'] = limit if token: parameters['token'] = token response = self.request('midas.solr.search.advanced', parameters) return response
Search item metadata using Apache Solr. :param query: The Apache Lucene search query. :type query: string :param token: (optional) A valid token for the user in question. :type token: None | string :param limit: (optional) The limit of the search. :type limit: int | long...
def parse_segment(text): "we expect foo=bar" if not len(text): return NoopQuerySegment() q = QuerySegment() equalities = zip(constants.OPERATOR_EQUALITIES, itertools.repeat(text)) equalities = map(lambda x: (x[0], x[1].split(x[0], 1)), equalities) equalities = list(filter(lambda x: len(x...
we expect foo=bar
def handle(self, *args, **options): if not CourseEnrollment: raise NotConnectedToOpenEdX("This package must be installed in an OpenEdX environment.") days, enterprise_customer = self.parse_arguments(*args, **options) if enterprise_customer: try: lrs_config...
Send xAPI statements.
def record_add_field(rec, tag, ind1='', ind2='', subfields=[], controlfield_value=''): if controlfield_value: doc = etree.Element("controlfield", attrib={ "tag": tag, }) doc.text = unicode(co...
Add a MARCXML datafield as a new child to a XML document.
def substitute(prev, *args, **kw): template_obj = string.Template(*args, **kw) for data in prev: yield template_obj.substitute(data)
alias of string.Template.substitute
def isheader(self, line): i = line.find(':') if i > -1: return line[:i].lower() return None
Determine whether a given line is a legal header. This method should return the header name, suitably canonicalized. You may override this method in order to use Message parsing on tagged data in RFC 2822-like formats with special header formats.
def _nginx_stream_spec(port_spec, bridge_ip): server_string_spec = "\t server {\n" server_string_spec += "\t \t {}\n".format(_nginx_listen_string(port_spec)) server_string_spec += "\t \t {}\n".format(_nginx_proxy_string(port_spec, bridge_ip)) server_string_spec += "\t }\n" return server_string_spec
This will output the nginx stream config string for specific port spec
def get_tasks(self): tasks = self._get_tasks() tasks.extend(self._streams.get_tasks(self)) return tasks
Get the tasks attached to the instance Returns ------- list List of tasks (:class:`asyncio.Task`)
def swap(self, qs): try: replacement = qs[0] except IndexError: return if not self._valid_ordering_reference(replacement): raise ValueError( "%r can only be swapped with instances of %r which %s equals %r." % ( self, self.__...
Swap the positions of this object with a reference object.
def intersects(self, i): return self.start <= i.end and i.start <= self.end
Returns true iff this interval intersects the interval i
def make_server(host, port, app=None, server_class=AsyncWsgiServer, handler_class=AsyncWsgiHandler, ws_handler_class=None, ws_path='/ws'): handler_class.ws_handler_class = ws_handler_class handler_class.ws_path = ws_path httpd = server_class((h...
Create server instance with an optional WebSocket handler For pure WebSocket server ``app`` may be ``None`` but an attempt to access any path other than ``ws_path`` will cause server error. :param host: hostname or IP :type host: str :param port: server port :type port: int :param app:...
def share_file(comm, path): localrank, _ = get_local_rank_size(comm) if comm.Get_rank() == 0: with open(path, 'rb') as fh: data = fh.read() comm.bcast(data) else: data = comm.bcast(None) if localrank == 0: os.makedirs(os.path.dirname(path), exist_ok=Tr...
Copies the file from rank 0 to all other ranks Puts it in the same place on all machines
def _win32_read_junction(path): if not jwfs.is_reparse_point(path): raise ValueError('not a junction') handle = jwfs.api.CreateFile( path, 0, 0, None, jwfs.api.OPEN_EXISTING, jwfs.api.FILE_FLAG_OPEN_REPARSE_POINT | jwfs.api.FILE_FLAG_BACKUP_SEMANTICS, None...
Returns the location that the junction points, raises ValueError if path is not a junction. CommandLine: python -m ubelt._win32_links _win32_read_junction Example: >>> # xdoc: +REQUIRES(WIN32) >>> import ubelt as ub >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junctio...
def export(self): enrollment_queryset = EnterpriseCourseEnrollment.objects.select_related( 'enterprise_customer_user' ).filter( enterprise_customer_user__enterprise_customer=self.enterprise_customer, enterprise_customer_user__active=True, ).order_by('course_id...
Collect learner data for the ``EnterpriseCustomer`` where data sharing consent is granted. Yields a learner data object for each enrollment, containing: * ``enterprise_enrollment``: ``EnterpriseCourseEnrollment`` object. * ``completed_date``: datetime instance containing the course/enrollment ...
def locked_get(self): credential = self._backend.locked_get(self._key) if credential is not None: credential.set_store(self) return credential
Retrieves the current credentials from the store. Returns: An instance of :class:`oauth2client.client.Credentials` or `None`.
def guess_invert(st): pos = st.obj_get_positions() pxinds_ar = np.round(pos).astype('int') inim = st.ishape.translate(-st.pad).contains(pxinds_ar) pxinds_tuple = tuple(pxinds_ar[inim].T) pxvals = st.data[pxinds_tuple] invert = np.median(pxvals) < np.median(st.data) return invert
Guesses whether particles are bright on a dark bkg or vice-versa Works by checking whether the intensity at the particle centers is brighter or darker than the average intensity of the image, by comparing the median intensities of each. Parameters ---------- st : :class:`peri.states.ImageState...
def _save_config(self, filename=None): if filename is None: filename = self._config_filename parent_path = os.path.dirname(filename) if not os.path.isdir(parent_path): os.makedirs(parent_path) with open(filename, "w") as configfile: self._config.write(...
Save the given user configuration.
def state_size(self): return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units)
State size of the LSTMStateTuple.
def dzip(items1, items2, cls=dict): try: len(items1) except TypeError: items1 = list(items1) try: len(items2) except TypeError: items2 = list(items2) if len(items1) == 0 and len(items2) == 1: items2 = [] if len(items2) == 1 and len(items1) > 1: ite...
Zips elementwise pairs between items1 and items2 into a dictionary. Values from items2 can be broadcast onto items1. Args: items1 (Iterable): full sequence items2 (Iterable): can either be a sequence of one item or a sequence of equal length to `items1` cls (Type[dict]): dic...
def dist_tversky(src, tar, qval=2, alpha=1, beta=1, bias=None): return Tversky().dist(src, tar, qval, alpha, beta, bias)
Return the Tversky distance between two strings. This is a wrapper for :py:meth:`Tversky.dist`. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The len...
def limit(self, value): self._limit = value self._start = None self._sum = 0
Set throttle limit :param value: bytes per second :type value: :py:class:`int` or :py:class:`None`
def adjust_returns_for_slippage(returns, positions, transactions, slippage_bps): slippage = 0.0001 * slippage_bps portfolio_value = positions.sum(axis=1) pnl = portfolio_value * returns traded_value = get_txn_vol(transactions).txn_volume slippage_dollars = traded_valu...
Apply a slippage penalty for every dollar traded. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in cre...
def _nginx_http_spec(port_spec, bridge_ip): server_string_spec = "\t server {\n" server_string_spec += "\t \t {}\n".format(_nginx_max_file_size_string()) server_string_spec += "\t \t {}\n".format(_nginx_listen_string(port_spec)) server_string_spec += "\t \t {}\n".format(_nginx_server_name_string(port_sp...
This will output the nginx HTTP config string for specific port spec
def edit_txt(filename, substitutions, newname=None): if newname is None: newname = filename _substitutions = [{'lRE': re.compile(str(lRE)), 'sRE': re.compile(str(sRE)), 'repl': repl} for lRE,sRE,repl in substitutions if repl is not None...
Primitive text file stream editor. This function can be used to edit free-form text files such as the topology file. By default it does an **in-place edit** of *filename*. If *newname* is supplied then the edited file is written to *newname*. :Arguments: *filename* input text fil...
def search(self, what, name=None, version=None): filtered = {} if what is None: whats = list(self.plugins.keys()) elif what is not None: if what not in self.plugins: raise Exception("Unknown class of plugins") whats = [what] for what in...
Search for a plugin
def remove_inconsistent_edges(graph: BELGraph) -> None: for u, v in get_inconsistent_edges(graph): edges = [(u, v, k) for k in graph[u][v]] graph.remove_edges_from(edges)
Remove all edges between node pairs with inconsistent edges. This is the all-or-nothing approach. It would be better to do more careful investigation of the evidences during curation.
def draw_2d(self, width=300, height=300, Hs=False): r try: from rdkit.Chem import Draw from rdkit.Chem.Draw import IPythonConsole if Hs: mol = self.rdkitmol_Hs else: mol = self.rdkitmol return Draw.MolToImage(mol...
r'''Interface for drawing a 2D image of the molecule. Requires an HTML5 browser, and the libraries RDKit and IPython. An exception is raised if either of these libraries is absent. Parameters ---------- width : int Number of pixels wide for the view h...
def silence(self, location=0, silence_threshold=0.1, min_silence_duration=0.1, buffer_around_silence=False): if location not in [-1, 0, 1]: raise ValueError("location must be one of -1, 0, 1.") if not is_number(silence_threshold) or silence_threshold < 0: raise Va...
Removes silent regions from an audio file. Parameters ---------- location : int, default=0 Where to remove silence. One of: * 0 to remove silence throughout the file (default), * 1 to remove silence from the beginning, * -1 to remove silence fr...
def _hashCoordinate(coordinate): coordinateStr = ",".join(str(v) for v in coordinate) hash = int(int(hashlib.md5(coordinateStr).hexdigest(), 16) % (2 ** 64)) return hash
Hash a coordinate to a 64 bit integer.
def enter_singles(iloc, pnames, snppad, edg, aseqs, asnps, smask, samplecov, locuscov, start): seq = aseqs[iloc, :, edg[0]:edg[1]+1] snp = asnps[iloc, edg[0]:edg[1]+1, ] nalln = np.all(seq == "N", axis=1) nsidx = nalln + smask samplecov = samplecov + np.invert(nsidx).astype(np.int32) idx = np.su...
enter funcs for SE or merged data
def get_def_macros(tex_source): r macros = {} for match in DEF_PATTERN.finditer(tex_source): macros[match.group('name')] = match.group('content') return macros
r"""Get all ``\def`` macro definition from TeX source. Parameters ---------- tex_source : `str` TeX source content. Returns ------- macros : `dict` Keys are macro names (including leading ``\``) and values are the content (as `str`) of the macros. Notes ----- ...
def extender(self, edge): "See what edges can be extended by this edge." (j, k, B, _, _) = edge for (i, j, A, alpha, B1b) in self.chart[j]: if B1b and B == B1b[0]: self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
See what edges can be extended by this edge.
def do_filter(qs, keywords, exclude=False): and_q = Q() for keyword, value in iteritems(keywords): try: values = value.split(",") if len(values) > 0: or_q = Q() for value in values: or_q |= Q(**{keyword: value}) ...
Filter queryset based on keywords. Support for multiple-selected parent values.
def onesided_2_twosided(data): psd = np.concatenate((data[0:-1], cshift(data[-1:0:-1], -1)))/2. psd[0] *= 2. psd[-1] *= 2. return psd
Convert a two-sided PSD to a one-sided PSD In order to keep the power in the twosided PSD the same as in the onesided version, the twosided values are 2 times lower than the input data (except for the zero-lag and N-lag values). :: >>> twosided_2_onesided([10, 4, 6, 8]) array([ 10...
async def play_previous(self): if not self.previous: raise NoPreviousTrack self.queue.insert(0, self.previous) await self.play(ignore_shuffle=True)
Plays previous track if it exist, if it doesn't raises a NoPreviousTrack error.
def _translate_particles(s, max_mem=1e9, desc='', min_rad='calc', max_rad='calc', invert='guess', rz_order=0, do_polish=True): if desc is not None: desc_trans = desc + 'translate-particles' desc_burn = desc + 'addsub_burn' desc_polish = desc + 'addsub_polish' else: desc_t...
Workhorse for translating particles. See get_particles_featuring for docs.
def checkTUN(self): packet = self._TUN._tun.read(self._TUN._tun.mtu) return(packet)
Checks the TUN adapter for data and returns any that is found. Returns: packet: Data read from the TUN adapter
def write_json_response(self, response): self.write(tornado.escape.json_encode(response)) self.set_header("Content-Type", "application/json")
write back json response
def connect(self, callback, *args, **kwargs): if self.is_connected(callback): raise AttributeError('callback is already connected') if self.hard_subscribers is None: self.hard_subscribers = [] self.hard_subscribers.append((callback, args, kwargs))
Connects the event with the given callback. When the signal is emitted, the callback is invoked. .. note:: The signal handler is stored with a hard reference, so you need to make sure to call :class:`disconnect()` if you want the handler to be garbage co...
def reset_package(self): self.package_set = False self.package_vers_set = False self.package_file_name_set = False self.package_supplier_set = False self.package_originator_set = False self.package_down_location_set = False self.package_home_set = False se...
Resets the builder's state in order to build new packages.
def _add_install(self, context): contents = self._render_template('install.sh', context) self.config.setdefault('files', []) self._add_unique_file({ "path": "/install.sh", "contents": contents, "mode": "755" })
generates install.sh and adds it to included files
def fetch(self, path, use_sudo=False, user=None, remote=None): if path is None: raise ValueError("Path to the working copy is needed to fetch from a remote repository.") if remote is not None: cmd = 'git fetch %s' % remote else: cmd = 'git fetch' with ...
Fetch changes from the default remote repository. This will fetch new changesets, but will not update the contents of the working tree unless yo do a merge or rebase. :param path: Path of the working copy directory. This directory must exist and be a Git working copy with...
def min_tanimoto_set_similarity(x: Iterable[X], y: Iterable[X]) -> float: a, b = set(x), set(y) if not a or not b: return 0.0 return len(a & b) / min(len(a), len(b))
Calculate the tanimoto set similarity using the minimum size. :param set x: A set :param set y: Another set :return: The similarity between
def _apply_final_rules(self, phonetic, final_rules, language_arg, strip): if not final_rules: return phonetic phonetic = self._expand_alternates(phonetic) phonetic_array = phonetic.split('|') for k in range(len(phonetic_array)): phonetic = phonetic_array[k] ...
Apply a set of final rules to the phonetic encoding. Parameters ---------- phonetic : str The term to which to apply the final rules final_rules : tuple The set of final phonetic transform regexps language_arg : int An integer representing the...
def delete(self): if self.filters or self.notFilters: return self.mdl.deleter.deleteMultiple(self.allOnlyIndexedFields()) return self.mdl.deleter.destroyModel()
delete - Deletes all entries matching the filter criteria
def _upload_folder_as_item(local_folder, parent_folder_id, reuse_existing=False): item_id = _create_or_reuse_item(local_folder, parent_folder_id, reuse_existing) subdir_contents = sorted(os.listdir(local_folder)) filecount = len(subdir_contents)...
Upload a folder as a new item. Take a folder and use its base name as the name of a new item. Then, upload its containing files into the new item as bitstreams. :param local_folder: The path to the folder to be uploaded :type local_folder: string :param parent_folder_id: The id of the destination f...
def repair(self, rebuild_index=True, rebuild_relationships=True): if rebuild_index: self.reactions._generate_index() self.metabolites._generate_index() self.genes._generate_index() self.groups._generate_index() if rebuild_relationships: for met...
Update all indexes and pointers in a model Parameters ---------- rebuild_index : bool rebuild the indices kept in reactions, metabolites and genes rebuild_relationships : bool reset all associations between genes, metabolites, model and then re-add ...
def permutations_with_replacement(iterable, r=None): pool = tuple(iterable) n = len(pool) r = n if r is None else r for indices in itertools.product(range(n), repeat=r): yield list(pool[i] for i in indices)
Return successive r length permutations of elements in the iterable. Similar to itertools.permutation but withouth repeated values filtering.
def CPUID(cpu): conf = {0x0: (0x0000000d, 0x756e6547, 0x6c65746e, 0x49656e69), 0x1: (0x000306c3, 0x05100800, 0x7ffafbff, 0xbfebfbff), 0x2: (0x76035a01, 0x00f0b5ff, 0x00000000, 0x00c10000), 0x4: {0x0: (0x1c004121, 0x01c0003f, 0x0000003f, 0x00000000), ...
CPUID instruction. The ID flag (bit 21) in the EFLAGS register indicates support for the CPUID instruction. If a software procedure can set and clear this flag, the processor executing the procedure supports the CPUID instruction. This instruction operates the same in non-64-bit modes ...