code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def variable_iter(self, base): base_substs = dict(('<' + t + '>', u) for (t, u) in base.items()) substs = [] vals = [] for with_defn in self.with_exprs: substs.append('<' + with_defn[0] + '>') vals.append(Host.expand_with(with_defn[1:])) for val_tpl in pro...
returns iterator over the cross product of the variables for this stanza
def transform(self, vector): if isinstance(vector, RDD): vector = vector.map(_convert_to_vector) else: vector = _convert_to_vector(vector) return callMLlibFunc("elementwiseProductVector", self.scalingVector, vector)
Computes the Hadamard product of the vector.
def add_nodes(self, nodes): if not isinstance(nodes, list): add_list = [nodes] else: add_list = nodes self.node_list.extend(add_list)
Add a given node or list of nodes to self.node_list. Args: node (Node or list[Node]): the node or list of nodes to add to the graph Returns: None Examples: Adding one node: :: >>> from blur.markov.node import Node >>> graph = Graph...
async def jsk_git(self, ctx: commands.Context, *, argument: CodeblockConverter): return await ctx.invoke(self.jsk_shell, argument=Codeblock(argument.language, "git " + argument.content))
Shortcut for 'jsk sh git'. Invokes the system shell.
def unpickle(pickle_file): pickle = None with open(pickle_file, "rb") as pickle_f: pickle = dill.load(pickle_f) if not pickle: LOG.error("Could not load python object from file") return pickle
Unpickle a python object from the given path.
def _get_hanging_wall_coeffs_mag(self, C, mag): if mag < 5.5: return 0.0 elif mag > 6.5: return 1.0 + C["a2"] * (mag - 6.5) else: return (mag - 5.5) * (1.0 + C["a2"] * (mag - 6.5))
Returns the hanging wall magnitude term defined in equation 14
def initialize(cls) -> None: if cls._initialized: return io_loop = ioloop.IOLoop.current() cls._old_sigchld = signal.signal( signal.SIGCHLD, lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup), ) cls._initialized = True
Initializes the ``SIGCHLD`` handler. The signal handler is run on an `.IOLoop` to avoid locking issues. Note that the `.IOLoop` used for signal handling need not be the same one used by individual Subprocess objects (as long as the ``IOLoops`` are each running in separate threads). ...
def get_aws_secrets_from_env(): keys = set() for env_var in ( 'AWS_SECRET_ACCESS_KEY', 'AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN', ): if env_var in os.environ: keys.add(os.environ[env_var]) return keys
Extract AWS secrets from environment variables.
def set_widgets(self): self.tblFunctions1.horizontalHeader().setSectionResizeMode( QHeaderView.Stretch) self.tblFunctions1.verticalHeader().setSectionResizeMode( QHeaderView.Stretch) self.populate_function_table_1()
Set widgets on the Impact Functions Table 1 tab.
def load_stylesheet(pyside=True): if pyside: import qdarkstyle.pyside_style_rc else: import qdarkstyle.pyqt_style_rc if not pyside: from PyQt4.QtCore import QFile, QTextStream else: from PySide.QtCore import QFile, QTextStream f = QFile(":qdarkstyle/style.qss") if...
Loads the stylesheet. Takes care of importing the rc module. :param pyside: True to load the pyside rc file, False to load the PyQt rc file :return the stylesheet string
def set_image(self, image = None): if image is None or type(image) is not int: raise KPError("Need a new image number") else: self.image = image self.last_mod = datetime.now().replace(microsecond=0) return True
This method is used to set the image number. image must be an unsigned int.
def _extend_word(self, word, length, prefix=0, end=False, flatten=False): if len(word) == length: if end and "<" not in self[word[-1]]: raise GenerationError(word + " cannot be extended") else: return word else: exclude = {"<"} ...
Extend the given word with a random suffix up to length. :param length: the length of the extended word; >= len(word); :param prefix: if greater than 0, the maximum length of the prefix to consider to choose the next character; :param end: if True, the generated word ends...
def _merge(x, y): merged = {**x, **y} xkeys = x.keys() for key in xkeys: if isinstance(x[key], dict) and key in y: merged[key] = _merge(x[key], y[key]) return merged
Merge two nested dictionaries. Overwrite values in x with values in y.
def set_environment_variable(self, key, val): if self.get_environment_variable(key) in [None, val]: self.__dict__['environment_variables'][key] = val else: raise Contradiction("Could not set environment variable %s" % (key))
Sets a variable if that variable is not already set
def call(command, collect_missing=False, silent=True): r return (_execCommand if silent else execCommand)(shlex.split(command), collect_missing)
r"""Calls a task, as if it were called from the command line. Args: command (str): A route followed by params (as if it were entered in the shell). collect_missing (bool): Collects any missing argument for the command through the shell. Defaults to False. Returns: The return value of the called comman...
def _sort_modules(mods): def compare(x, y): x = x[1] y = y[1] if x == y: return 0 if y.stem == "__init__.py": return 1 if x.stem == "__init__.py" or x < y: return -1 return 1 return sorted(mods, key=cmp_to_key(compare))
Always sort `index` or `README` as first filename in list.
def update(self, byte_arr): if byte_arr: self.value = self.calculate(byte_arr, self.value)
Read bytes and update the CRC computed.
def match_patterns(codedata) : ret = {} for index1, pattern in enumerate(shaman.PatternMatcher.PATTERNS) : print('Matching pattern %d "%s"' % (index1+1, pattern)) matcher = shaman.PatternMatcher(pattern) tmp = {} for index2, (language, code) in enumerate(codedata) : if language not in shaman.SUPPORTING_LAN...
Match patterns by shaman.PatternMatcher Get average ratio of pattern and language
def find_triangles(self): return list(filter(lambda x: len(x) == 3, nx.find_cliques(self.model)))
Finds all the triangles present in the given model Examples -------- >>> from pgmpy.models import MarkovModel >>> from pgmpy.factors.discrete import DiscreteFactor >>> from pgmpy.inference import Mplp >>> mm = MarkovModel() >>> mm.add_nodes_from(['x1', 'x2', 'x3'...
def write_numeric(fmt, value, buff, byteorder='big'): try: buff.write(fmt[byteorder].pack(value)) except KeyError as exc: raise ValueError('Invalid byte order') from exc
Write a numeric value to a file-like object.
def fetch(self): params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return EntityInstance( self._version, payload, service_sid=self._solution['service_sid'], identity...
Fetch a EntityInstance :returns: Fetched EntityInstance :rtype: twilio.rest.authy.v1.service.entity.EntityInstance
def ft1file(self, **kwargs): kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.ft1file_format.format(**kwargs_copy) if kwa...
return the name of the input ft1 file list
def get_terms(self, field=None): if not field: raise AttributeError("Please provide field to apply aggregation to!") agg = A("terms", field=field, size=self.size, order={"_count": "desc"}) self.aggregations['terms_' + field] = agg return self
Create a terms aggregation object and add it to the aggregation dict :param field: the field present in the index that is to be aggregated :returns: self, which allows the method to be chainable with the other methods
def put(self, item, block=True, timeout=None): return self._queue.put(item, block, timeout)
Put item into underlying queue.
def search(self, name, value): partial = None header_name_search_result = CocaineHeaders.STATIC_TABLE_MAPPING.get(name) if header_name_search_result: index = header_name_search_result[1].get(value) if index is not None: return index, name, value ...
Searches the table for the entry specified by name and value Returns one of the following: - ``None``, no match at all - ``(index, name, None)`` for partial matches on name only. - ``(index, name, value)`` for perfect matches.
def clear_rr_ce_entries(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('This Primary Volume Descriptor is not yet initialized') for block in self.rr_ce_blocks: block.set_extent_location(-1)
A method to clear out all of the extent locations of all Rock Ridge Continuation Entries that the PVD is tracking. This can be used to reset all data before assigning new data. Parameters: None. Returns: Nothing.
def arches(self): if self.method == 'image': return self.params[2] if self.arch: return [self.arch] return []
Return a list of architectures for this task. :returns: a list of arch strings (eg ["ppc64le", "x86_64"]). The list is empty if this task has no arches associated with it.
def rotation_matrix(d): sin_angle = np.linalg.norm(d) if sin_angle == 0: return np.identity(3) d /= sin_angle eye = np.eye(3) ddt = np.outer(d, d) skew = np.array([[ 0, d[2], -d[1]], [-d[2], 0, d[0]], [ d[1], -d[0], 0]], dtype=np.f...
Calculates a rotation matrix given a vector d. The direction of d corresponds to the rotation axis. The length of d corresponds to the sin of the angle of rotation. Variant of: http://mail.scipy.org/pipermail/numpy-discussion/2009-March/040806.html
def Run(self, args): try: directory = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress) except (IOError, OSError) as e: self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.IOERROR, e) return files = list(directory.ListFiles()) files.sort(key=lambda x: x.pathspec.path) for r...
Lists a directory.
def _get_cache_key(self, args, kwargs): hash_input = json.dumps({'name': self.name, 'args': args, 'kwargs': kwargs}, sort_keys=True) return hashlib.md5(hash_input).hexdigest()
Returns key to be used in cache
def _build_circle(self): total_weight = 0 for node in self._nodes: total_weight += self._weights.get(node, 1) for node in self._nodes: weight = self._weights.get(node, 1) ks = math.floor((40 * len(self._nodes) * weight) / total_weight) for i in xra...
Creates hash ring.
def get_single_payload(self, query_obj): payload = self.get_df_payload(query_obj) df = payload.get('df') status = payload.get('status') if status != utils.QueryStatus.FAILED: if df is not None and df.empty: payload['error'] = 'No data' else: ...
Returns a payload of metadata and data
def make_url(self, method): token = self.settings()['token'] return TELEGRAM_URL.format( token=quote(token), method=quote(method), )
Generate a Telegram URL for this bot.
def calculate_windows(self, **kwargs): windows = find_windows(self.elements, self.coordinates, **kwargs) if windows: self.properties.update( { 'windows': { 'diameters': windows[0], 'centre_of_mass': windows[1], }...
Return the diameters of all windows in a molecule. This function first finds and then measures the diameters of all the window in the molecule. Returns ------- :class:`numpy.array` An array of windows' diameters. :class:`NoneType` If no windows ...
def pass_job(db: JobDB, result_queue: Queue, always_cache=False): @pull def pass_job_stream(job_source): result_sink = result_queue.sink() for message in job_source(): if message is EndOfQueue: return key, job = message if always_cache or ('sto...
Create a pull stream that receives jobs and passes them on to the database. If the job already has a result, that result is pushed onto the `result_queue`.
def prerequisites(self): prereqs = defaultdict(set) for input in self.inputs: spec = self._study.spec(input) if spec.is_spec and spec.derived: prereqs[spec.pipeline_getter].add(input.name) return prereqs
Iterates through the inputs of the pipelinen and determines the all prerequisite pipelines
def bytes2guid(s): assert isinstance(s, bytes) u = struct.unpack v = [] v.extend(u("<IHH", s[:8])) v.extend(u(">HQ", s[8:10] + b"\x00\x00" + s[10:])) return "%08X-%04X-%04X-%04X-%012X" % tuple(v)
Converts a serialized GUID to a text GUID
def load(self, mkey, mdesc, mdict=None, merge=False): j = mdict if mdict else read_json(mdesc) if j and isinstance(j, dict): self.__meta['header'].update({mkey: mdesc}) if merge: self.__meta = dict_merge(self.__meta, j) else: self.__met...
Loads a dictionary into current meta :param mkey: Type of data to load. Is be used to reference the data from the 'header' within meta :param mdesc: Either filename of json-file to load or further description of imported data when `mdict` is used ...
def DeleteConflict(self, conflict_link, options=None): if options is None: options = {} path = base.GetPathFromLink(conflict_link) conflict_id = base.GetResourceIdOrFullNameFromLink(conflict_link) return self.DeleteResource(path, 'conflicts'...
Deletes a conflict. :param str conflict_link: The link to the conflict. :param dict options: The request options for the request. :return: The deleted Conflict. :rtype: dict
def process_signal(self, signum): if signum == signal.SIGTERM: LOGGER.info('Received SIGTERM, initiating shutdown') self.stop() elif signum == signal.SIGHUP: LOGGER.info('Received SIGHUP') if self.config.reload(): LOGGER.info('Configuration...
Invoked whenever a signal is added to the stack. :param int signum: The signal that was added
def start(self): assert not self._started self._listening_stream.on_recv(self._recv_callback) self._started = True
Start to listen for incoming requests.
def addfield(self, pkt, buf, val): self.set_endianess(pkt) return self.fld.addfield(pkt, buf, val)
add the field with endianness to the buffer
def split_line(self): hash_or_end = self.line.find(" temp = self.line[self.region_end:hash_or_end].strip(" |") self.coord_str = regex_paren.sub("", temp) if hash_or_end >= 0: self.meta_str = self.line[hash_or_end:] else: self.meta_str = ""
Split line into coordinates and meta string
def save_file_json(data, export_file): create_dir(os.path.dirname(export_file)) with open(export_file, "w") as file: json.dump(data, file, indent=4)
Write data to a json file.
def parse_metadata(cls, obj, xml): for child in xml.xpath("ti:description", namespaces=XPATH_NAMESPACES): lg = child.get("{http://www.w3.org/XML/1998/namespace}lang") if lg is not None: obj.set_cts_property("description", child.text, lg) for child in xml.xpath("ti...
Parse a resource to feed the object :param obj: Obj to set metadata of :type obj: XmlCtsTextMetadata :param xml: An xml representation object :type xml: lxml.etree._Element
def create(args): with _catalog(args) as cat: for fname, created, obj in cat.create(args.args[0], {}): args.log.info('{0} -> {1} object {2.id}'.format( fname, 'new' if created else 'existing', obj))
cdstarcat create PATH Create objects in CDSTAR specified by PATH. When PATH is a file, a single object (possibly with multiple bitstreams) is created; When PATH is a directory, an object will be created for each file in the directory (recursing into subdirectories).
def interpolate_with(self, other_tf, t): if t < 0 or t > 1: raise ValueError('Must interpolate between 0 and 1') interp_translation = (1.0 - t) * self.translation + t * other_tf.translation interp_rotation = transformations.quaternion_slerp(self.quaternion, other_tf.quaternion, t) ...
Interpolate with another rigid transformation. Parameters ---------- other_tf : :obj:`RigidTransform` The transform to interpolate with. t : float The interpolation step in [0,1], where 0 favors this RigidTransform. Returns ------- :obj:...
def norm_score(self): cdf = (1.0 + math.erf(self.score / math.sqrt(2.0))) / 2.0 return 1 - 2*math.fabs(0.5 - cdf)
Return the normalized score. Equals 1.0 for a z-score of 0, falling to 0.0 for extremely positive or negative values.
def bind(self, environ): self.environ = environ self.path = '/' + environ.get('PATH_INFO', '/').lstrip('/') self.method = environ.get('REQUEST_METHOD', 'GET').upper()
Bind a new WSGI environment. This is done automatically for the global `bottle.request` instance on every request.
def _gen_3spec(op, path, xattr=False): flags = 0 if xattr: flags |= _P.SDSPEC_F_XATTR return Spec(op, path, flags)
Returns a Spec tuple suitable for passing to the underlying C extension. This variant is called for operations that lack an input value. :param str path: The path to fetch :param bool xattr: Whether this is an extended attribute :return: a spec suitable for passing to the underlying C extension
def as_rainbow(self, offset=35, style=None, rgb_mode=False): return self._as_rainbow( ('wrapper', ), offset=offset, style=style, rgb_mode=rgb_mode, )
Wrap each frame in a Colr object, using `Colr.rainbow`.
def ci_macos(): run_command("brew install $PYTHON pipenv || echo \"Installed PipEnv\"") command_string = "sudo -H $PIP install " for element in DEPENDENCIES + REQUIREMENTS + ["-U"]: command_string += element + " " run_command(command_string) run_command("sudo -H $PYTHON setup.py bdist_wheel"...
Setup Travis-CI macOS for wheel building
def clicks(self, tag=None, fromdate=None, todate=None): return self.call("GET", "/stats/outbound/clicks", tag=tag, fromdate=fromdate, todate=todate)
Gets total counts of unique links that were clicked.
def search(self, search): search = search.replace('/', ' ') params = {'q': search} return self._get_records(params)
search Zenodo record for string `search` :param search: string to search :return: Record[] results
def get_tags_users(self, id_): return _get_request(_TAGS_USERS.format(c_api=_C_API_BEGINNING, api=_API_VERSION, id_=id_, ...
Get a particular user which are tagged based on the id_
def _set_cursor_position(self, value): original_position = self.__cursor_position self.__cursor_position = max(0, value) return value != original_position
Set cursor position. Return whether it changed.
def git_wrapper(path): path = os.path.abspath(path) if path not in _wrapper_cache: if hasattr(Repo, 'commits'): _wrapper_cache[path] = _GitWrapperLegacy(path) else: _wrapper_cache[path] = _GitWrapper(path) return _wrapper_cache[path]
Get appropriate wrapper factory and cache instance for path
def install_versioning(self, conn): logging.info('Creating the versioning table %s', self.version_table) conn.executescript(CREATE_VERSIONING % self.version_table) self._insert_script(self.read_scripts()[0], conn)
Create the version table into an already populated database and insert the base script. :param conn: a DB API 2 connection
def job(self, name): for job in self.jobs(): if job.data.name == name: return job
Method for searching specific job by it's name. :param name: name of the job to search. :return: found job or None. :rtype: yagocd.resources.job.JobInstance
def decompress(self, chunk): try: return self._decompressobj.decompress(chunk) except zlib.error: if self._first_chunk: self._decompressobj = zlib.decompressobj(-zlib.MAX_WBITS) return self._decompressobj.decompress(chunk) raise ...
Decompress the chunk of data. :param bytes chunk: data chunk :rtype: bytes
def get_version_history_for_file(self, filepath): GIT_COMMIT_FIELDS = ['id', 'author_name', 'author_email', 'date', 'date_ISO_8601', 'relative_date', ...
Return a dict representation of this file's commit history This uses specially formatted git-log output for easy parsing, as described here: http://blog.lost-theory.org/post/how-to-parse-git-log-output/ For a full list of available fields, see: http://linux.die.net/man/1/git-log
def get_themes(): styles_dir = os.path.join(package_dir, 'styles') themes = [os.path.basename(theme).replace('.less', '') for theme in glob('{0}/*.less'.format(styles_dir))] return themes
return list of available themes
def set_proxy_bypass(domains, network_service="Ethernet"): servers_str = ' '.join(domains) cmd = 'networksetup -setproxybypassdomains {0} {1}'.format(network_service, servers_str,) out = __salt__['cmd.run'](cmd) return 'error' not in out
Sets the domains that can bypass the proxy domains An array of domains allowed to bypass the proxy network_service The network service to apply the changes to, this only necessary on macOS CLI Example: .. code-block:: bash salt '*' proxy.set_proxy_bypass "['127.0.0.1...
def suspend_queues(self, active_queues, sleep_time=10.0): for queue in active_queues: self.disable_queue(queue) while self.get_active_tasks(): time.sleep(sleep_time)
Suspend Celery queues and wait for running tasks to complete.
def find_all_segment(text: str, custom_dict: Trie = None) -> List[str]: if not text or not isinstance(text, str): return [] ww = list(_multicut(text, custom_dict=custom_dict)) return list(_combine(ww))
Get all possible segment variations :param str text: input string to be tokenized :return: returns list of segment variations
def create_session(self): url = self.build_url(self._endpoints.get('create_session')) response = self.con.post(url, data={'persistChanges': self.persist}) if not response: raise RuntimeError('Could not create session as requested by the user.') data = response.json() ...
Request a new session id
def get_next_action(self, request, application, label, roles): if label is not None: return HttpResponseBadRequest("<h1>Bad Request</h1>") actions = self.get_actions(request, application, roles) if request.method == "GET": context = self.context context.update...
Django view method. We provide a default detail view for applications.
def process_pc_pathsbetween(gene_names, neighbor_limit=1, database_filter=None, block_size=None): if not block_size: model = pcc.graph_query('pathsbetween', gene_names, neighbor_limit=neighbor_limit, database_filter=...
Returns a BiopaxProcessor for a PathwayCommons paths-between query. The paths-between query finds the paths between a set of genes. Here source gene names are given in a single list and all directions of paths between these genes are considered. http://www.pathwaycommons.org/pc2/#graph http://www...
def promote16(u, fn=None, *args, **kwargs): r dtype = np.float32 if u.dtype == np.float16 else u.dtype up = np.asarray(u, dtype=dtype) if fn is None: return up else: v = fn(up, *args, **kwargs) if isinstance(v, tuple): vp = tuple([np.asarray(vk, dtype=u.dtype) for...
r""" Utility function for use with functions that do not support arrays of dtype ``np.float16``. This function has two distinct modes of operation. If called with only the `u` parameter specified, the returned value is either `u` itself if `u` is not of dtype ``np.float16``, or `u` promoted to ``np....
def _pull(self): pull = self.m( 'pulling remote changes', cmdd=dict(cmd='git pull --tags', cwd=self.local), critical=False ) if 'CONFLICT' in pull.get('out'): self.m( 'Congratulations! You have merge conflicts in the repository!', ...
Helper function to pull from remote
def validate(self, path: str, strictness: str = "speconly") -> bool: valid1 = True with h5py.File(path, mode="r") as f: valid1 = self.validate_spec(f) if not valid1: self.errors.append("For help, see http://linnarssonlab.org/loompy/format/") valid2 = True if strictness == "conventions": with loompy...
Validate a file for conformance to the Loom specification Args: path: Full path to the file to be validated strictness: "speconly" or "conventions" Remarks: In "speconly" mode, conformance is assessed relative to the file format specification at http://linnarssonlab.org/loompy/format/. In "convent...
def do_first(self): pid = os.getpid() self.basename = os.path.join(self.tmpdir, 'iiif_netpbm_' + str(pid)) outfile = self.basename + '.pnm' filetype = self.file_type(self.srcfile) if (filetype == 'png'): if (self.shell_call(self.pngtopnm + ' ' + self.srcfile + ' > ' +...
Create PNM file from input image file.
def update(self, friendly_name=values.unset, assignment_callback_url=values.unset, fallback_assignment_callback_url=values.unset, configuration=values.unset, task_reservation_timeout=values.unset): return self._proxy.update( friendly_name=friendly_name, ...
Update the WorkflowInstance :param unicode friendly_name: A string representing a human readable name for this Workflow. :param unicode assignment_callback_url: A valid URL for the application that will process task assignment events. :param unicode fallback_assignment_callback_url: If the requ...
def _validate_user_class(cls, user_class): PraetorianError.require_condition( getattr(user_class, 'lookup', None) is not None, textwrap.dedent( ), ) PraetorianError.require_condition( getattr(user_class, 'identify', None) is not None, textwrap.dede...
Validates the supplied user_class to make sure that it has the class methods necessary to function correctly. Requirements: - ``lookup`` method. Accepts a string parameter, returns instance - ``identify`` method. Accepts an identity parameter, returns instance
def _conv(self,v): if isinstance(v,str): return '"%s"' %v.replace("'","''") elif isinstance(v,datetime.datetime): if v.tzinfo is not None: raise ValueError,\ "datetime instances with tzinfo not supported" return '"%s"' %self....
Convert Python values to MySQL values
def update_vip_request(self, vip_request, vip_request_id): uri = 'api/v3/vip-request/%s/' % vip_request_id data = dict() data['vips'] = list() data['vips'].append(vip_request) return super(ApiVipRequest, self).put(uri, data)
Method to update vip request param vip_request: vip_request object param vip_request_id: vip_request id
def parse_compound_table_file(path, f): context = FilePathContext(path) for i, row in enumerate(csv.DictReader(f, delimiter=str('\t'))): if 'id' not in row or row['id'].strip() == '': raise ParseError('Expected `id` column in table') props = {key: value for key, value in iteritems(ro...
Parse a tab-separated file containing compound IDs and properties The compound properties are parsed according to the header which specifies which property is contained in each column.
def random_shift(image, wsr=0.1, hsr=0.1): height, width, _ = common_layers.shape_list(image) width_range, height_range = wsr*width, hsr*height height_translations = tf.random_uniform((1,), -height_range, height_range) width_translations = tf.random_uniform((1,), -width_range, width_range) translations = tf.c...
Apply random horizontal and vertical shift to images. This is the default data-augmentation strategy used on CIFAR in Glow. Args: image: a 3-D Tensor wsr: Width shift range, as a float fraction of the width. hsr: Height shift range, as a float fraction of the width. Returns: images: images trans...
def assertFileSizeEqual(self, filename, size, msg=None): fsize = self._get_file_size(filename) self.assertEqual(fsize, size, msg=msg)
Fail if ``filename`` does not have the given ``size`` as determined by the '==' operator. Parameters ---------- filename : str, bytes, file-like size : int, float msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard mes...
def _compress_json(self, j): compressed_json = copy.copy(j) compressed_json.pop('users', None) compressed_data = zlib.compress( json.dumps(j['users']).encode('utf-8'), self.zlib_compression_strength ) b64_data = base64.b64encode(compressed_data).decode('ut...
Compress the BLOB data portion of the usernotes. Arguments: j: the JSON in Schema v5 format (dict) Returns a dict with the 'users' key removed and 'blob' key added
def check_values_selection_field(cr, table_name, field_name, allowed_values): res = True cr.execute("SELECT %s, count(*) FROM %s GROUP BY %s;" % (field_name, table_name, field_name)) for row in cr.fetchall(): if row[0] not in allowed_values: logger.error( "...
check if the field selection 'field_name' of the table 'table_name' has only the values 'allowed_values'. If not return False and log an error. If yes, return True. .. versionadded:: 8.0
def build_command(chunks): if not chunks: raise ValueError( "No command parts: {} ({})".format(chunks, type(chunks))) if isinstance(chunks, str): return chunks parsed_pieces = [] for cmd_part in chunks: if cmd_part is None: continue try: ...
Create a command from various parts. The parts provided may include a base, flags, option-bound arguments, and positional arguments. Each element must be either a string or a two-tuple. Raw strings are interpreted as either the command base, a pre-joined pair (or multiple pairs) of option and argument,...
def post_save_moderation(self, sender, comment, request, **kwargs): model = comment.content_type.model_class() if model not in self._registry: return self._registry[model].email(comment, comment.content_object, request)
Apply any necessary post-save moderation steps to new comments.
def listTargets(self): sql = 'select * from {}'.format(self.TABLE_ITEMS) cursor = self.db.execute(sql) return [(iid, name, path) for iid, name, path in cursor]
Returns a list of all the items secured in the vault
def tokenize(self, config): tokens = [] reg_ex = re.compile(self.TOKENS[0], re.M | re.I) for token in re.finditer(reg_ex, config): value = token.group(0) if token.group("operator"): t_type = "operator" elif token.group("literal"): ...
Break the config into a series of tokens
def discovery(self, url=None): if url: data = self.session.get(url).content elif self.discovery_url: response = self.session.get(self.discovery_url) if self.format == 'xml': data = xml(response.text) else: data = response.js...
Retrieve the standard discovery file that provides routing information. >>> Three().discovery() {'discovery': 'data'}
def motif_from_consensus(cons, n=12): width = len(cons) nucs = {"A":0,"C":1,"G":2,"T":3} pfm = [[0 for _ in range(4)] for _ in range(width)] m = Motif() for i,char in enumerate(cons): for nuc in m.iupac[char.upper()]: pfm[i][nucs[nuc]] = n / len(m.iupac[char.upper()]) m = Mot...
Convert consensus sequence to motif. Converts a consensus sequences using the nucleotide IUPAC alphabet to a motif. Parameters ---------- cons : str Consensus sequence using the IUPAC alphabet. n : int , optional Count used to convert the sequence to a PFM. Returns ...
def render_relation(self, r, **args): if r is None: return "." m = self.config.relsymbolmap if r in m: return m[r] return r
Render an object property
def purge_old(self): if self.keep_max is not None: keys = self.redis_conn.keys(self.get_key() + ':*') keys.sort(reverse=True) while len(keys) > self.keep_max: key = keys.pop() self.redis_conn.delete(key)
Removes keys that are beyond our keep_max limit
def msg(self, message, *args, **kwargs): target = kwargs.pop('target', None) raw = kwargs.pop('raw', False) if not target: target = self.line.sender.nick if self.line.pm else \ self.line.target if not raw: kw = { 'm': self, ...
Shortcut to send a message through the connection. This function sends the input message through the connection. A target can be defined, else it will send it to the channel or user from the input Line, effectively responding on whatever triggered the command which calls this function t...
def configure(access_key=None, secret_key=None, logger=None): if not logger: logger = log.get_logger('s3') if not all([access_key, secret_key]): logger.info('') access_key = input('AWS Access Key: ') secret_key = input('AWS Secret Key: ') _write_config(access_key, secret_key)...
Configures s3cmd prior to first use. If no arguments are provided, you will be prompted to enter the access key and secret key interactively. Args: access_key (str): AWS access key secret_key (str): AWS secret key
def _extract_asset_urls(self, asset_ids): dom = get_page(self._session, OPENCOURSE_ASSET_URL, json=True, ids=quote_plus(','.join(asset_ids))) return [{'id': element['id'], 'url': element['url'].strip()} for element in dom['el...
Extract asset URLs along with asset ids. @param asset_ids: List of ids to get URLs for. @type assertn: [str] @return: List of dictionaries with asset URLs and ids. @rtype: [{ 'id': '<id>', 'url': '<url>' }]
def getLayerName(url): urlInfo = None urlSplit = None try: urlInfo = urlparse.urlparse(url) urlSplit = str(urlInfo.path).split('/') name = urlSplit[len(urlSplit)-3] return name except: return url finally: urlInfo = None urlSplit = None ...
Extract the layer name from a url. Args: url (str): The url to parse. Returns: str: The layer name. Examples: >>> url = "http://services.arcgis.com/<random>/arcgis/rest/services/test/FeatureServer/12" >>> arcresthelper.common.getLayerIndex(url) 'test'
def get_all_tep(self): teps = {} for p in self.get_enabled_plugins: for e, v in p["plugin_tep"].items(): tep = teps.get(e, dict()) tepHF = tep.get("HTMLFile", []) tepHS = tep.get("HTMLString", []) tepHF += [s for f, s in v.items...
Template extension point :returns: dict: {tep: dict(HTMLFile=[], HTMLString=[]), tep...}
def updateMetadata(self, new): if self.node_id != new.node_id: raise ValueError("Broker metadata {!r} doesn't match node_id={}".format(new, self.node_id)) self.node_id = new.node_id self.host = new.host self.port = new.port
Update the metadata stored for this broker. Future connections made to the broker will use the host and port defined in the new metadata. Any existing connection is not dropped, however. :param new: :clas:`afkak.common.BrokerMetadata` with the same node ID as the ...
def disable_alarm_actions(self, alarm_names): params = {} self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') return self.get_status('DisableAlarmActions', params)
Disables actions for the specified alarms. :type alarms: list :param alarms: List of alarm names.
def sfs_folded(ac, n=None): ac, n = _check_ac_n(ac, n) mac = np.amin(ac, axis=1) mac = mac.astype(int, copy=False) x = n//2 + 1 s = np.bincount(mac, minlength=x) return s
Compute the folded site frequency spectrum given reference and alternate allele counts at a set of biallelic variants. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns --...
def pack_block(self, block: BaseBlock, *args: Any, **kwargs: Any) -> BaseBlock: if 'uncles' in kwargs: uncles = kwargs.pop('uncles') kwargs.setdefault('uncles_hash', keccak(rlp.encode(uncles))) else: uncles = block.uncles provided_fields = set(kwargs.keys()) ...
Pack block for mining. :param bytes coinbase: 20-byte public address to receive block reward :param bytes uncles_hash: 32 bytes :param bytes state_root: 32 bytes :param bytes transaction_root: 32 bytes :param bytes receipt_root: 32 bytes :param int bloom: :param ...
def queuedb_findall(path, queue_id, name=None, offset=None, limit=None): sql = "SELECT * FROM queue WHERE queue_id = ? ORDER BY rowid ASC" args = (queue_id,) if name: sql += ' AND name = ?' args += (name,) if limit: sql += ' LIMIT ?' args += (limit,) if offset: ...
Get all queued entries for a queue and a name. If name is None, then find all queue entries Return the rows on success (empty list if not found) Raise on error
def cleanup(self): if self.data.hooks and len(self.data.hooks.cleanup) > 0: env = self.data.env_list[0].copy() env.update({'PIPELINE_RESULT': 'SUCCESS', 'PIPELINE_SHELL_EXIT_CODE': '0'}) config = ShellConfig(script=self.data.hooks.cleanup, model=self.model, ...
Run cleanup script of pipeline when hook is configured.