code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def write(s, path, encoding="utf-8"): is_gzip = is_gzip_file(path) with open(path, "wb") as f: if is_gzip: f.write(zlib.compress(s.encode(encoding))) else: f.write(s.encode(encoding))
Write string to text file.
def make_timestamp_columns(): return ( Column('created_at', DateTime, default=func.utcnow(), nullable=False), Column('updated_at', DateTime, default=func.utcnow(), onupdate=func.utcnow(), nullable=False), )
Return two columns, created_at and updated_at, with appropriate defaults
def description(filename): with open(filename) as fp: for lineno, line in enumerate(fp): if lineno < 3: continue line = line.strip() if len(line) > 0: return line
Provide a short description.
def get_rows(self): table = self.soup.find_all('tr')[1:-3] return [row for row in table if row.contents[3].string]
Get the rows from a broadcast ratings chart
def __process_gprest_response(self, r=None, restType='GET'): if r is None: logging.info('No response for REST '+restType+' request') return None httpStatus = r.status_code logging.info('HTTP status code: %s', httpStatus) if httpStatus == requests.codes.ok or...
Returns the processed response for rest calls
def Header(self): if not self._header: self._header = Header(self.PrevHash, self.MerkleRoot, self.Timestamp, self.Index, self.ConsensusData, self.NextConsensus, self.Script) return self._header
Get the block header. Returns: neo.Core.Header:
def as_json(self, entity_url, context=None): try: urllib.request.urlopen(entity_url) except urllib.error.HTTPError: raise ValueError("Cannot open {}".format(entity_url)) entity_graph = self.read(entity_url) entity_json = json.loads(...
Method takes a entity uri and attempts to return the Fedora Object as a JSON-LD. Args: entity_url(str): Fedora Commons URL of Entity context(None): Returns JSON-LD with Context, default is None Returns: str: JSON-LD of Fedora Object
def get_all_kernels(self, kernel_ids=None, owners=None): params = {} if kernel_ids: self.build_list_params(params, kernel_ids, 'ImageId') if owners: self.build_list_params(params, owners, 'Owner') filter = {'image-type' : 'kernel'} self.build_filter_params...
Retrieve all the EC2 kernels available on your account. Constructs a filter to allow the processing to happen server side. :type kernel_ids: list :param kernel_ids: A list of strings with the image IDs wanted :type owners: list :param owners: A list of owner IDs :rtype...
def split_path(path): path = path.split(0, 1)[0] values = path.dimension_values(0) splits = np.concatenate([[0], np.where(np.isnan(values))[0]+1, [None]]) subpaths = [] data = PandasInterface.as_dframe(path) if pd else path.array() for i in range(len(splits)-1): end = splits[i+1] ...
Split a Path type containing a single NaN separated path into multiple subpaths.
def get_token(self): payload = {'grant_type': 'client_credentials', 'client_id': self.client_id, 'client_secret': self.client_secret} r = requests.post(OAUTH_ENDPOINT, data=json.dumps(payload), headers={'content-type': 'application/json'}) response = r.json() if r.status_code != 200 and ...
Gets the authorization token
def cli(env, identifier): mgr = SoftLayer.LoadBalancerManager(env.client) _, loadbal_id = loadbal.parse_id(identifier) if not (env.skip_confirmations or formatting.confirm("This action will cancel a load balancer. " "Continue?")): raise exceptions.CLIAbort(...
Cancel an existing load balancer.
def savepoint(self): if self._last_image: self._savepoints.append(self._last_image) self._last_image = None
Copies the last displayed image.
def run(self): if self.stdout: sys.stdout.write("extracted json data:\n" + json.dumps( self.metadata, default=to_str) + "\n") else: extract_dist.class_metadata = self.metadata
Sends extracted metadata in json format to stdout if stdout option is specified, assigns metadata dictionary to class_metadata variable otherwise.
def make_ggnvp(f, g=lambda x: 1./2*np.sum(x**2, axis=-1), f_argnum=0): @unary_to_nary def _make_ggnvp(f, x): f_vjp, f_x = _make_vjp(f, x) g_hvp, grad_g_x = _make_vjp(grad(g), f_x) f_jvp, _ = _make_vjp(f_vjp, vspace(grad_g_x).zeros()) def ggnvp(v): return f_vjp(g_hvp(f_jvp(v))) ...
Builds a function for evaluating generalized-Gauss-Newton-vector products at a point. Slightly more expensive than mixed-mode.
def close(self): if self._S is not None: self._S.close() self._S = None self._Q.put_nowait(None)
Begin closing subscription.
def group(*blueprints, url_prefix=""): def chain(nested): for i in nested: if isinstance(i, (list, tuple)): yield from chain(i) elif isinstance(i, BlueprintGroup): yield from i.blueprints else: ...
Create a list of blueprints, optionally grouping them under a general URL prefix. :param blueprints: blueprints to be registered as a group :param url_prefix: URL route to be prepended to all sub-prefixes
def install_virtualenv(parser_args): python_version = '.'.join(str(v) for v in sys.version_info[:2]) sys.stdout.write('Installing Python {0} virtualenv into {1} \n'.format(python_version, VE_ROOT)) if sys.version_info < (3, 3): install_virtualenv_p2(VE_ROOT, python_version) else: install...
Installs virtual environment
def read_file(path): with open(must_exist(path)) as infile: r = infile.read() return r
Read file to string. Arguments: path (str): Source.
def get_win32_short_path_name(long_name): import ctypes from ctypes import wintypes _GetShortPathNameW = ctypes.windll.kernel32.GetShortPathNameW _GetShortPathNameW.argtypes = [wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.DWORD] _GetShortPathNameW.restype = wintypes.DWORD output_buf_size = 0 ...
Gets the short path name of a given long path. References: http://stackoverflow.com/a/23598461/200291 http://stackoverflow.com/questions/23598289/get-win-short-fname-python Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut ...
def to_vobject(self, project=None, uid=None): self._update() vtodos = iCalendar() if uid: uid = uid.split('@')[0] if not project: for p in self._tasks: if uid in self._tasks[p]: project = p ...
Return vObject object of Taskwarrior tasks If filename and UID are specified, the vObject only contains that task. If only a filename is specified, the vObject contains all events in the project. Otherwise the vObject contains all all objects of all files associated with the IcsTask object. ...
def update_scalar_bar_range(self, clim, name=None): if isinstance(clim, float) or isinstance(clim, int): clim = [-clim, clim] if len(clim) != 2: raise TypeError('clim argument must be a length 2 iterable of values: (min, max).') if name is None: if not hasattr...
Update the value range of the active or named scalar bar. Parameters ---------- 2 item list The new range of scalar bar. Example: ``[-1, 2]``. name : str, optional The title of the scalar bar to update
def memory_usage(self, deep=False): if hasattr(self.array, 'memory_usage'): return self.array.memory_usage(deep=deep) v = self.array.nbytes if deep and is_object_dtype(self) and not PYPY: v += lib.memory_usage_of_objects(self.array) return v
Memory usage of the values Parameters ---------- deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- bytes used See Also -------- numpy.ndarray.nbytes ...
def _higher_function_scope(node): current = node while current.parent and not isinstance(current.parent, nodes.FunctionDef): current = current.parent if current and current.parent: return current.parent return None
Search for the first function which encloses the given scope. This can be used for looking up in that function's scope, in case looking up in a lower scope for a particular name fails. :param node: A scope node. :returns: ``None``, if no parent function scope was found, otherwise an...
def send(self, uid, event, payload=None): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) if uid in self.controllers.keys(): addr = self.controllers[uid][0] port = self.controllers[uid][1] if event == E_MESSAGE: return sock.sendto('/message/{}'...
Send an event to a connected controller. Use pymlgame event type and correct payload. To send a message to the controller use pymlgame.E_MESSAGE event and a string as payload. :param uid: Unique id of the controller :param event: Event type :param payload: Payload of the event :...
def clean_params(params, drop_nones=True, recursive=True): cleaned = {} for key, value in six.iteritems(params): if drop_nones and value is None: continue if recursive and isinstance(value, dict): value = clean_params(value, drop_nones, recursive) cleaned[key] = v...
Clean up a dict of API parameters to be sent to the Coinbase API. Some endpoints require boolean options to be represented as integers. By default, will remove all keys whose value is None, so that they will not be sent to the API endpoint at all.
def to_dade_matrix(M, annotations="", filename=None): n, m = M.shape A = np.zeros((n + 1, m + 1)) A[1:, 1:] = M if not annotations: annotations = np.array(["" for _ in n], dtype=str) A[0, :] = annotations A[:, 0] = annotations.T if filename: try: np.savetxt(filena...
Returns a Dade matrix from input numpy matrix. Any annotations are added as header. If filename is provided and valid, said matrix is also saved as text.
def _ParseVValueString( self, parser_mediator, data, user_information_descriptor): data_start_offset = ( user_information_descriptor.offset + self._V_VALUE_STRINGS_OFFSET) data_end_offset = data_start_offset + user_information_descriptor.size descriptor_data = data[data_start_offset:data_end_o...
Parses a V value string. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. data (bytes): Windows Registry V value data. user_information_descriptor (user_information_descriptor): V value user informat...
def get_raw_default_config_and_read_file_list(): global _CONFIG, _READ_DEFAULT_FILES if _CONFIG is not None: return _CONFIG, _READ_DEFAULT_FILES with _CONFIG_LOCK: if _CONFIG is not None: return _CONFIG, _READ_DEFAULT_FILES try: from ConfigParser import SafeCo...
Returns a ConfigParser object and a list of filenames that were parsed to initialize it
def datetime(self, field=None, val=None): if val is None: def source(): tzinfo = get_default_timezone() if settings.USE_TZ else None return datetime.fromtimestamp(randrange(1, 2100000000), tzinfo) else: ...
Returns a random datetime. If 'val' is passed, a datetime within two years of that date will be returned.
def _all_reachable_tables(t): for k, v in t.items(): for tname in _all_reachable_tables(v): yield tname yield k
A generator that provides all the names of tables that can be reached via merges starting at the given target table.
def get_chembl_id(nlm_mesh): mesh_id = get_mesh_id(nlm_mesh) pcid = get_pcid(mesh_id) url_mesh2pcid = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/' + \ 'cid/%s/synonyms/JSON' % pcid r = requests.get(url_mesh2pcid) res = r.json() synonyms = res['InformationList']['Info...
Get ChEMBL ID from NLM MESH Parameters ---------- nlm_mesh : str Returns ------- chembl_id : str
def _interpolated_template(self, templateid): phase, y = self._get_template_by_id(templateid) assert phase.min() >= 0 assert phase.max() <= 1 phase = np.concatenate([phase[-5:] - 1, phase, phase[:5] + 1]) y = np.concatenate([y[-5:], y, y[:5]]) return UnivariateSpline(phas...
Return an interpolator for the given template
def generate(regex, Ns): "Return the strings matching regex whose length is in Ns." return sorted(regex_parse(regex)[0](Ns), key=lambda s: (len(s), s))
Return the strings matching regex whose length is in Ns.
def create_intent(self, workspace_id, intent, description=None, examples=None, **kwargs): if workspace_id is None: raise ValueError('workspace_id must be provided') if intent is None...
Create intent. Create a new intent. This operation is limited to 2000 requests per 30 minutes. For more information, see **Rate limiting**. :param str workspace_id: Unique identifier of the workspace. :param str intent: The name of the intent. This string must conform to the ...
def get_summary(result): summary = { "success": result.wasSuccessful(), "stat": { 'total': result.testsRun, 'failures': len(result.failures), 'errors': len(result.errors), 'skipped': len(result.skipped), 'expectedFailures': len(result.expec...
get summary from test result Args: result (instance): HtmlTestResult() instance Returns: dict: summary extracted from result. { "success": True, "stat": {}, "time": {}, "records": [] }
def untrace_function(module, function): if not is_traced(function): return False name = get_object_name(function) setattr(module, name, untracer(function)) return True
Untraces given module function. :param module: Module of the function. :type module: object :param function: Function to untrace. :type function: object :return: Definition success. :rtype: bool
def bz2_pack(source): import bz2, base64 out = "" first_line = source.split('\n')[0] if analyze.shebang.match(first_line): if py3: if first_line.rstrip().endswith('python'): first_line = first_line.rstrip() first_line += '3' out = first_line + ...
Returns 'source' as a bzip2-compressed, self-extracting python script. .. note:: This method uses up more space than the zip_pack method but it has the advantage in that the resulting .py file can still be imported into a python program.
async def emit(self, record: LogRecord): if self.writer is None: self.writer = await self._init_writer() try: msg = self.format(record) + self.terminator self.writer.write(msg.encode()) await self.writer.drain() except Exception: await ...
Actually log the specified logging record to the stream.
def generate_phase_1(dim = 40): phase_1 = numpy.random.normal(0, 1, dim) for i in range(dim - 4, dim): phase_1[i] = 1.0 return phase_1
The first step in creating datapoints in the Poirazi & Mel model. This returns a vector of dimension dim, with the last four values set to 1 and the rest drawn from a normal distribution.
def bounds(ctx, tile): click.echo( '%s %s %s %s' % TilePyramid( ctx.obj['grid'], tile_size=ctx.obj['tile_size'], metatiling=ctx.obj['metatiling'] ).tile(*tile).bounds(pixelbuffer=ctx.obj['pixelbuffer']) )
Print Tile bounds.
def clip_adaptor(read, adaptor): missmatches = 2 adaptor = adaptor.truncate(10) read.clip_end(adaptor, len(adaptor) - missmatches)
Clip an adaptor sequence from this sequence. We assume it's in the 3' end. This is basically a convenience wrapper for clipThreePrime. It requires 8 out of 10 of the first bases in the adaptor sequence to match for clipping to occur. :param adaptor: sequence to look for. We only use the first 10 bases; ...
def _check_column_lengths(self): column_lengths_dict = { name: len(xs) for (name, xs) in self.columns_dict.items() } unique_column_lengths = set(column_lengths_dict.values()) if len(unique_column_lengths) != 1: raise ValueError( ...
Make sure columns are of the same length or else DataFrame construction will fail.
def get_matching_property_names(self, regex): log = logging.getLogger(self.cls_logger + '.get_matching_property_names') prop_list_matched = [] if not isinstance(regex, basestring): log.warn('regex arg is not a string, found type: {t}'.format(t=regex.__class__.__name__)) r...
Returns a list of property names matching the provided regular expression :param regex: Regular expression to search on :return: (list) of property names matching the regex
def export(self, Height=None, options=None, outputFile=None, Resolution=None,\ Units=None, Width=None, Zoom=None, view="current", verbose=False): PARAMS=set_param(["Height","options","outputFile","Resolution",\ "Units","Width","Zoom","view"],\ [Height,options,outputFile,Resolution,Units,...
Exports the current view to a graphics file and returns the path to the saved file. PNG and JPEG formats have options for scaling, while other formats only have the option 'exportTextAsFont'. For the PDF format, exporting text as font does not work for two-byte characters such as ...
def params_to_dict(params, dct): for param, val in params.items(): if val is None: continue dct[param] = val return dct
Updates the 'dct' dictionary with the 'params' dictionary, filtering out all those whose param value is None.
def taxonomy(value): try: value.encode('ascii') except UnicodeEncodeError: raise ValueError('tag %r is not ASCII' % value) if re.search(r'\s', value): raise ValueError('The taxonomy %r contains whitespace chars' % value) return value
Any ASCII character goes into a taxonomy, except spaces.
def install(name=None, refresh=False, version=None, pkgs=None, **kwargs): if refresh: refresh_db() try: pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs...
Install packages using the pkgutil tool. CLI Example: .. code-block:: bash salt '*' pkg.install <package_name> salt '*' pkg.install SMClgcc346 Multiple Package Installation Options: pkgs A list of packages to install from OpenCSW. Must be passed as a python list. ...
def get_target_list(self, scan_id): target_list = [] for target, _, _ in self.scans_table[scan_id]['targets']: target_list.append(target) return target_list
Get a scan's target list.
def load_pickle(file_path): pkl_file = open(file_path, 'rb') data = pickle.load(pkl_file) pkl_file.close() return data
Unpickle some data from a given path. Input: - file_path: Target file path. Output: - data: The python object that was serialized and stored in disk.
def fatalities_range(number): range_format = '{min_range} - {max_range}' more_than_format = '> {min_range}' ranges = [ [0, 100], [100, 1000], [1000, 10000], [10000, 100000], [100000, float('inf')] ] for r in ranges: min_range = r[0] max_range =...
A helper to return fatalities as a range of number. See https://github.com/inasafe/inasafe/issues/3666#issuecomment-283565297 :param number: The exact number. Will be converted as a range. :type number: int, float :return: The range of the number. :rtype: str
def get_chunked_content(self, chunksize=4096): r = self.obj.api.getDatastreamDissemination(self.obj.pid, self.id, stream=True, asOfDateTime=self.as_of_date) for chunk in r.iter_content(chunksize): yield chunk
Generator that returns the datastream content in chunks, so larger datastreams can be used without reading the entire contents into memory.
def _add_volume(line): section = _analyse_status_type(line) fields = line.strip().split() volume = {} for field in fields: volume[field.split(':')[0]] = field.split(':')[1] if section == 'LOCALDISK': resource['local volumes'].append(volume) else: lastpnodevolumes.append(v...
Analyse the line of volumes of ``drbdadm status``
def run(): args = parse_args() if args.verbose: log_level = logging.DEBUG else: log_level = logging.INFO logging.basicConfig( level=log_level, format='%(asctime)s %(levelname)s %(name)s: %(message)s') if not args.verbose: req_logger = logging.getLogger('reques...
Command line entrypoint for the ``refresh-lsst-bib`` program.
def Output(self): self.Open() self.Header() self.Body() self.Footer()
Output all sections of the page.
def disable_requiretty_on_sudoers(log=False): if log: bookshelf2.logging_helpers.log_green( 'disabling requiretty on sudo calls') comment_line('/etc/sudoers', '^Defaults.*requiretty', use_sudo=True) return True
allow sudo calls through ssh without a tty
def list_all(self, before_id=None, since_id=None, **kwargs): return self.list(before_id=before_id, since_id=since_id, **kwargs).autopage()
Return all direct messages. The messages come in reversed order (newest first). Note you can only provide _one_ of ``before_id``, ``since_id``. :param str before_id: message ID for paging backwards :param str since_id: message ID for most recent messages since :return: direct m...
def fullpath(relpath): if (type(relpath) is object or type(relpath) is file): relpath = relpath.name return os.path.abspath(os.path.expanduser(relpath))
Relative path to absolute
def space_clone(args): if not args.to_workspace: args.to_workspace = args.workspace if not args.to_project: args.to_project = args.project if (args.project == args.to_project and args.workspace == args.to_workspace): eprint("Error: destination project and namespace must diffe...
Replicate a workspace
def build_response(headers: Headers, key: str) -> None: headers["Upgrade"] = "websocket" headers["Connection"] = "Upgrade" headers["Sec-WebSocket-Accept"] = accept(key)
Build a handshake response to send to the client. ``key`` comes from :func:`check_request`.
def return_hdr(ts, package): try: fdno = os.open(package, os.O_RDONLY) except OSError: hdr = None return hdr ts.setVSFlags(~(rpm.RPMVSF_NOMD5 | rpm.RPMVSF_NEEDPAYLOAD)) try: hdr = ts.hdrFromFdno(fdno) except rpm.error: hdr = None raise rpm.error if...
Hand back the hdr - duh - if the pkg is foobar handback None Shamelessly stolen from Seth Vidal http://yum.baseurl.org/download/misc/checksig.py
def create(state, host, ctid, template=None): current_containers = host.fact.openvz_containers if ctid in current_containers: raise OperationError( 'An OpenVZ container with CTID {0} already exists'.format(ctid), ) args = ['{0}'.format(ctid)] if template: args.append(...
Create OpenVZ containers. + ctid: CTID of the container to create
def from_traceback(cls, tb): while tb.tb_next: tb = tb.tb_next return cls(tb.tb_frame.f_code, current_offset=tb.tb_lasti)
Construct a Bytecode from the given traceback
def filter(self, *args): if len(args) == 1 and isinstance(args[0], Filter): filter = args[0] else: filter = Filter(*args) filter.object_getattr = self.object_getattr self.filters.append(filter) return self
Adds a Filter to this query. Args: see :py:class:`Filter <datastore.query.Filter>` constructor Returns self for JS-like method chaining:: query.filter('age', '>', 18).filter('sex', '=', 'Female')
def add_directives(kb_app: kb, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, docnames=List[str], ): for k, v in list(kb_app.config.resources.items()): sphinx_app.add_directive(k, ResourceDirective)
For each resource type, register a new Sphinx directive
def local_title(self): name = self.title.partition(" for ")[0] exceptDate = getLocalDate(self.except_date, self.time_from, self.tz) title = _("{exception} for {date}").format(exception=_(name), date=dateFormat(exceptDate)) return title
Localised version of the human-readable title of the page.
def to_dict(self): attributes = dict(self.attributes.items()) if self.style: attributes.update({"style": dict(self.style.items())}) vdom_dict = {'tagName': self.tag_name, 'attributes': attributes} if self.event_handlers: event_handlers = dict(self.event_handlers.i...
Converts VDOM object to a dictionary that passes our schema
def get_output(self, job_id, outfn): job_info = self.job_info(jobid=job_id)[0] status = int(job_info["Status"]) if status != 5: raise Exception("The status of job %d is %d (%s)" %(job_id, status, self.status_codes[status])) remotefn = job_info["OutputLoc"]...
Download an output file given the id of the output request job. ## Arguments * `job_id` (int): The id of the _output_ job. * `outfn` (str): The file where the output should be stored. May also be a file-like object with a 'write' method.
def facets_area(self): area_faces = self.area_faces areas = np.array([sum(area_faces[i]) for i in self.facets], dtype=np.float64) return areas
Return an array containing the area of each facet. Returns --------- area : (len(self.facets),) float Total area of each facet (group of faces)
def by_user(config): client = Client() client.prepare_connection() audit_api = API(client) CLI.parse_membership('Groups by User', audit_api.by_user())
Display LDAP group membership sorted by user.
def clear_layout(layout: QLayout) -> None: if layout is not None: while layout.count(): item = layout.takeAt(0) widget = item.widget() if widget is not None: widget.deleteLater() else: clear_layout(item.layout())
Clear the layout off all its components
def get_builtin_date(date, date_format="%Y-%m-%dT%H:%M:%S", raise_exception=False): if isinstance(date, datetime.datetime): return date elif isinstance(date, xmlrpc_client.DateTime): return datetime.datetime.strptime(date.value, "%Y%m%dT%H:%M:%S") else: try: return dateti...
Try to convert a date to a builtin instance of ``datetime.datetime``. The input date can be a ``str``, a ``datetime.datetime``, a ``xmlrpc.client.Datetime`` or a ``xmlrpclib.Datetime`` instance. The returned object is a ``datetime.datetime``. :param date: The date object to convert. :param date_format:...
def set_default(self, default): if default is not None and len(default) > 1 and default[0] == '"' and default[-1] == '"': default = default[1:-1] self.defaultValue = default
Set Definition default value. :param default: default value; number, str or quoted str ("value")
def search(self, start_ts, end_ts): for meta_collection_name in self._meta_collections(): meta_coll = self.meta_database[meta_collection_name] for ts_ns_doc in meta_coll.find( {"_ts": {"$lte": end_ts, "$gte": start_ts}} ): yield ts_ns_doc
Called to query Mongo for documents in a time range.
def _match_to_array(m): return [_cast_biopax_element(m.get(i)) for i in range(m.varSize())]
Returns an array consisting of the elements obtained from a pattern search cast into their appropriate classes.
def combine_cache_keys(cls, cache_keys): if len(cache_keys) == 1: return cache_keys[0] else: combined_id = Target.maybe_readable_combine_ids(cache_key.id for cache_key in cache_keys) combined_hash = hash_all(sorted(cache_key.hash for cache_key in cache_keys)) return cls(combined_id, comb...
Returns a cache key for a list of target sets that already have cache keys. This operation is 'idempotent' in the sense that if cache_keys contains a single key then that key is returned. Note that this operation is commutative but not associative. We use the term 'combine' rather than 'merge' or 'un...
def load_entry_point_system_roles(self, entry_point_group): for ep in pkg_resources.iter_entry_points(group=entry_point_group): self.register_system_role(ep.load())
Load system roles from an entry point group. :param entry_point_group: The entrypoint for extensions.
def closing(image, radius=None, mask=None, footprint = None): dilated_image = grey_dilation(image, radius, mask, footprint) return grey_erosion(dilated_image, radius, mask, footprint)
Do a morphological closing image - pixel image to operate on radius - use a structuring element with the given radius. If no structuring element, use an 8-connected structuring element. mask - if present, only use unmasked pixels for operations
def extract_keywords_from_text(index_page, no_items=5): index_page = MLStripper.strip_tags(index_page) tokenized_index = TextBlob(index_page).lower() def to_str(key): if isinstance(key, unicode): return key.encode("utf-8") return key present_keywords = [ KEYWORDS_LOWE...
Try to process text on the `index_page` deduce the keywords and then try to match them on the Aleph's dataset. Function returns maximally `no_items` items, to prevent spamming the user. Args: index_page (str): Content of the page as UTF-8 string no_items (int, default 5): Number of items t...
def project_point(cb, msg, attributes=('x', 'y')): if skip(cb, msg, attributes): return msg plot = get_cb_plot(cb) x, y = msg.get('x', 0), msg.get('y', 0) crs = plot.current_frame.crs coordinates = crs.transform_points(plot.projection, np.array([x]), np.array([y])) msg['x'], msg['y'] = coordinat...
Projects a single point supplied by a callback
def set_color_in_session(intent, session): card_title = intent['name'] session_attributes = {} should_end_session = False if 'Color' in intent['slots']: favorite_color = intent['slots']['Color']['value'] session_attributes = create_favorite_color_attributes(favorite_color) speech...
Sets the color in the session and prepares the speech to reply to the user.
def flatten(cls, stats): flat_children = {} for _stats in spread_stats(stats): key = (_stats.name, _stats.filename, _stats.lineno, _stats.module) try: flat_stats = flat_children[key] except KeyError: flat_stats = flat_children[key] = cl...
Makes a flat statistics from the given statistics.
def layers(self): layer = ['NONE'] * len(self.entities) for i, e in enumerate(self.entities): if hasattr(e, 'layer'): layer[i] = str(e.layer) return layer
If entities have a layer defined, return it. Returns --------- layers: (len(entities), ) list of str
async def delView(self, iden): if iden == self.iden: raise s_exc.SynErr(mesg='cannot delete the main view') view = self.views.pop(iden, None) if view is None: raise s_exc.NoSuchView(iden=iden) await self.hive.pop(('cortex', 'views', iden)) await view.fini(...
Delete a cortex view by iden.
def _get_relationships(model): relationships = [] for name, relationship in inspect(model).relationships.items(): class_ = relationship.mapper.class_ if relationship.uselist: rel = ListRelationship(name, relation=class_.__name__) else: rel = Relationship(name, rel...
Gets the necessary relationships for the resource by inspecting the sqlalchemy model for relationships. :param DeclarativeMeta model: The SQLAlchemy ORM model. :return: A tuple of Relationship/ListRelationship instances corresponding to the relationships on the Model. :rtype: tuple
def _point_in_bbox(point, bounds): return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2] or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
valid whether the point is inside the bounding box
def midpoint(self): midpoints = [] for segment in self: if len(segment) < 2: midpoints.append([]) else: midpoints.append(segment.midpoint()) return midpoints
Calculate the midpoint between locations in segments. Returns: list of Point: Groups of midpoint between points in segments
def post_command(self, command, args): self._loop.log_coroutine(self.send_command(command, args, Verifier()))
Post a command asynchronously and don't wait for a response. There is no notification of any error that could happen during command execution. A log message will be generated if an error occurred. The command's response is discarded. This method is thread-safe and may be called from ...
def DiffArrayObjects(self, oldObj, newObj, isElementLinks=False): if oldObj == newObj: return True if not oldObj or not newObj: return False if len(oldObj) != len(newObj): __Log__.debug('DiffArrayObjects: Array lengths do not match %d != %d' % (len(oldObj), len(n...
Method which deligates the diffing of arrays based on the type
def look_up_and_get(cellpy_file_name, table_name): root = '/CellpyData' table_path = '/'.join([root, table_name]) logging.debug(f"look_up_and_get({cellpy_file_name}, {table_name}") store = pd.HDFStore(cellpy_file_name) table = store.select(table_path) store.close() return table
Extracts table from cellpy hdf5-file.
def encode_space_pad(instr, length, encoding): output = instr.decode('utf-8').encode(encoding) if len(output) > length: raise pycdlibexception.PyCdlibInvalidInput('Input string too long!') encoded_space = ' '.encode(encoding) left = length - len(output) while left > 0: output += enco...
A function to pad out an input string with spaces to the length specified. The space is first encoded into the specified encoding, then appended to the input string until the length is reached. Parameters: instr - The input string to encode and pad. length - The length to pad the input string to....
def update(self, newcfg): for key in newcfg.keys(): if key not in self._cfg: self._cfg[key] = CaseInsensitiveDict() for skey in newcfg[key]: self._cfg[key][skey] = newcfg[key][skey]
Update current config with a dictionary
def write_image(self, img, extname=None, extver=None, compress=None, tile_dims=None, header=None): self.create_image_hdu(img, header=header, extname=extname, extver=extver, compress=compress, tile_dims=...
Create a new image extension and write the data. parameters ---------- img: ndarray An n-dimensional image. extname: string, optional An optional extension name. extver: integer, optional FITS allows multiple extensions to have the same name (...
def cutout_shape(self, shape_obj): view, mask = self.get_shape_view(shape_obj) data = self._slice(view) mdata = np.ma.array(data, mask=np.logical_not(mask)) return mdata
Cut out and return a portion of the data corresponding to `shape_obj`. A masked numpy array is returned, where the pixels not enclosed in the shape are masked out.
def setup(__pkg: ModuleType) -> Tuple[Callable[[str], str], Callable[[str, str, int], str]]: package_locale = path.join(path.dirname(__pkg.__file__), 'locale') gettext.install(__pkg.__name__, package_locale) return gettext.gettext, gettext.ngettext
Configure ``gettext`` for given package. Args: __pkg: Package to use as location for :program:`gettext` files Returns: :program:`gettext` functions for singular and plural translations
def copy(self, overrides=None, locked=False): other = copy.copy(self) if overrides is not None: other.overrides = overrides other.locked = locked other._uncache() return other
Create a separate copy of this config.
def _propagate_incompatibility( self, incompatibility ): unsatisfied = None for term in incompatibility.terms: relation = self._solution.relation(term) if relation == SetRelation.DISJOINT: return elif relation == SetRelation.OVERLAPPING: ...
If incompatibility is almost satisfied by _solution, adds the negation of the unsatisfied term to _solution. If incompatibility is satisfied by _solution, returns _conflict. If incompatibility is almost satisfied by _solution, returns the unsatisfied term's package name. Otherw...
def prep_parallel(self, binary_args, other_args): if self.length < 100: raise Exception("Run this across 1 processor by setting num_processors kwarg to None.") if self.num_processors == -1: self.num_processors = mp.cpu_count() split_val = int(np.ceil(self.length/self.num_...
Prepare the parallel calculations Prepares the arguments to be run in parallel. It will divide up arrays according to num_splits. Args: binary_args (list): List of binary arguments for input into the SNR function. other_args (tuple of obj): tuple of other args for input...
def chunks(arr, size): for i in _range(0, len(arr), size): yield arr[i:i+size]
Splits a list into chunks :param arr: list to split :type arr: :class:`list` :param size: number of elements in each chunk :type size: :class:`int` :return: generator object :rtype: :class:`generator`
def log_critical(msg, logger="TaskLogger"): tasklogger = get_tasklogger(logger) tasklogger.critical(msg) return tasklogger
Log a CRITICAL message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged name : `str`, optional (default: "TaskLogger") Name used to retrieve the unique TaskLogger Returns ------- logger : TaskLogger
def bind(end_point, socket_type): sock = context.socket(socket_type) try: sock.bind(end_point) except zmq.error.ZMQError as exc: sock.close() raise exc.__class__('%s: %s' % (exc, end_point)) return sock
Bind to a zmq URL; raise a proper error if the URL is invalid; return a zmq socket.