sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def parse_value(self, value): """Cast value to `bool`.""" parsed = super(BoolField, self).parse_value(value) return bool(parsed) if parsed is not None else None
Cast value to `bool`.
entailment
def parse_value(self, values): """Cast value to proper collection.""" result = self.get_default_value() if not values: return result if not isinstance(values, list): return values return [self._cast_value(value) for value in values]
Cast value to proper collection.
entailment
def parse_value(self, value): """Parse value to proper model type.""" if not isinstance(value, dict): return value embed_type = self._get_embed_type() return embed_type(**value)
Parse value to proper model type.
entailment
def to_struct(self, value): """Cast `time` object to string.""" if self.str_format: return value.strftime(self.str_format) return value.isoformat()
Cast `time` object to string.
entailment
def parse_value(self, value): """Parse string into instance of `time`.""" if value is None: return value if isinstance(value, datetime.time): return value return parse(value).timetz()
Parse string into instance of `time`.
entailment
def to_struct(self, value): """Cast `date` object to string.""" if self.str_format: return value.strftime(self.str_format) return value.strftime(self.default_format)
Cast `date` object to string.
entailment
def parse_value(self, value): """Parse string into instance of `datetime`.""" if isinstance(value, datetime.datetime): return value if value: return parse(value) else: return None
Parse string into instance of `datetime`.
entailment
def validate(self, value): """Validate value.""" if self.exclusive: if value <= self.minimum_value: tpl = "'{value}' is lower or equal than minimum ('{min}')." raise ValidationError( tpl.format(value=value, min=self.minimum_value)) ...
Validate value.
entailment
def modify_schema(self, field_schema): """Modify field schema.""" field_schema['minimum'] = self.minimum_value if self.exclusive: field_schema['exclusiveMinimum'] = True
Modify field schema.
entailment
def validate(self, value): """Validate value.""" if self.exclusive: if value >= self.maximum_value: tpl = "'{val}' is bigger or equal than maximum ('{max}')." raise ValidationError( tpl.format(val=value, max=self.maximum_value)) els...
Validate value.
entailment
def modify_schema(self, field_schema): """Modify field schema.""" field_schema['maximum'] = self.maximum_value if self.exclusive: field_schema['exclusiveMaximum'] = True
Modify field schema.
entailment
def validate(self, value): """Validate value.""" flags = self._calculate_flags() try: result = re.search(self.pattern, value, flags) except TypeError as te: raise ValidationError(*te.args) if not result: raise ValidationError( ...
Validate value.
entailment
def modify_schema(self, field_schema): """Modify field schema.""" field_schema['pattern'] = utilities.convert_python_regex_to_ecma( self.pattern, self.flags)
Modify field schema.
entailment
def validate(self, value): """Validate value.""" len_ = len(value) if self.minimum_value is not None and len_ < self.minimum_value: tpl = "Value '{val}' length is lower than allowed minimum '{min}'." raise ValidationError(tpl.format( val=value, min=self.m...
Validate value.
entailment
def modify_schema(self, field_schema): """Modify field schema.""" if self.minimum_value: field_schema['minLength'] = self.minimum_value if self.maximum_value: field_schema['maxLength'] = self.maximum_value
Modify field schema.
entailment
def to_struct(model): """Cast instance of model to python structure. :param model: Model to be casted. :rtype: ``dict`` """ model.validate() resp = {} for _, name, field in model.iterate_with_name(): value = field.__get__(model) if value is None: continue ...
Cast instance of model to python structure. :param model: Model to be casted. :rtype: ``dict``
entailment
def run_from_argv(self, argv): """ Set up any environment changes requested (e.g., Python path and Django settings), then run this command. If the command raises a ``CommandError``, intercept it and print it sensibly to stderr. """ parser = self.create_parser(argv...
Set up any environment changes requested (e.g., Python path and Django settings), then run this command. If the command raises a ``CommandError``, intercept it and print it sensibly to stderr.
entailment
def create_parser(self, prog_name, subcommand): """ Create and return the ``OptionParser`` which will be used to parse the arguments to this command. """ parser = argparse.ArgumentParser(prog='%s %s' % (prog_name, subcommand), description=self.help) parser.add_argument(...
Create and return the ``OptionParser`` which will be used to parse the arguments to this command.
entailment
def rst(filename): ''' Load rst file and sanitize it for PyPI. Remove unsupported github tags: - code-block directive - travis ci build badge ''' content = codecs.open(filename, encoding='utf-8').read() for regex, replacement in PYPI_RST_FILTERS: content = re.sub(regex, replace...
Load rst file and sanitize it for PyPI. Remove unsupported github tags: - code-block directive - travis ci build badge
entailment
def javascript(filename, type='text/javascript'): '''A simple shortcut to render a ``script`` tag to a static javascript file''' if '?' in filename and len(filename.split('?')) is 2: filename, params = filename.split('?') return '<script type="%s" src="%s?%s"></script>' % (type, staticfiles_stor...
A simple shortcut to render a ``script`` tag to a static javascript file
entailment
def jquery_js(version=None, migrate=False): '''A shortcut to render a ``script`` tag for the packaged jQuery''' version = version or settings.JQUERY_VERSION suffix = '.min' if not settings.DEBUG else '' libs = [js_lib('jquery-%s%s.js' % (version, suffix))] if _boolean(migrate): libs.append(j...
A shortcut to render a ``script`` tag for the packaged jQuery
entailment
def django_js(context, jquery=True, i18n=True, csrf=True, init=True): '''Include Django.js javascript library in the page''' return { 'js': { 'minified': not settings.DEBUG, 'jquery': _boolean(jquery), 'i18n': _boolean(i18n), 'csrf': _boolean(csrf), ...
Include Django.js javascript library in the page
entailment
def django_js_init(context, jquery=False, i18n=True, csrf=True, init=True): '''Include Django.js javascript library initialization in the page''' return { 'js': { 'jquery': _boolean(jquery), 'i18n': _boolean(i18n), 'csrf': _boolean(csrf), 'init': _boolean(...
Include Django.js javascript library initialization in the page
entailment
def as_dict(self): ''' Serialize the context as a dictionnary from a given request. ''' data = {} if settings.JS_CONTEXT_ENABLED: for context in RequestContext(self.request): for key, value in six.iteritems(context): if settings.JS_...
Serialize the context as a dictionnary from a given request.
entailment
def process_LANGUAGE_CODE(self, language_code, data): ''' Fix language code when set to non included default `en` and add the extra variables ``LANGUAGE_NAME`` and ``LANGUAGE_NAME_LOCAL``. ''' # Dirty hack to fix non included default language_code = 'en-us' if language_co...
Fix language code when set to non included default `en` and add the extra variables ``LANGUAGE_NAME`` and ``LANGUAGE_NAME_LOCAL``.
entailment
def handle_user(self, data): ''' Insert user informations in data Override it to add extra user attributes. ''' # Default to unauthenticated anonymous user data['user'] = { 'username': '', 'is_authenticated': False, 'is_staff': False, ...
Insert user informations in data Override it to add extra user attributes.
entailment
def class_from_string(name): ''' Get a python class object from its name ''' module_name, class_name = name.rsplit('.', 1) __import__(module_name) module = sys.modules[module_name] return getattr(module, class_name)
Get a python class object from its name
entailment
def glob(cls, files=None): ''' Glob a pattern or a list of pattern static storage relative(s). ''' files = files or [] if isinstance(files, str): files = os.path.normpath(files) matches = lambda path: matches_patterns(path, [files]) return [pat...
Glob a pattern or a list of pattern static storage relative(s).
entailment
def execute(self, command): ''' Execute a subprocess yielding output lines ''' process = Popen(command, stdout=PIPE, stderr=STDOUT, universal_newlines=True) while True: if process.poll() is not None: self.returncode = process.returncode # pylint: disa...
Execute a subprocess yielding output lines
entailment
def phantomjs(self, *args, **kwargs): ''' Execute PhantomJS by giving ``args`` as command line arguments. If test are run in verbose mode (``-v/--verbosity`` = 2), it output: - the title as header (with separators before and after) - modules and test names - assert...
Execute PhantomJS by giving ``args`` as command line arguments. If test are run in verbose mode (``-v/--verbosity`` = 2), it output: - the title as header (with separators before and after) - modules and test names - assertions results (with ``django.utils.termcolors`` support) ...
entailment
def run_suite(self): ''' Run a phantomjs test suite. - ``phantomjs_runner`` is mandatory. - Either ``url`` or ``url_name`` needs to be defined. ''' if not self.phantomjs_runner: raise JsTestException('phantomjs_runner need to be defined') url = sel...
Run a phantomjs test suite. - ``phantomjs_runner`` is mandatory. - Either ``url`` or ``url_name`` needs to be defined.
entailment
def write(self, rows, keyed=False): """Write rows/keyed_rows to table """ for row in rows: keyed_row = row if not keyed: keyed_row = dict(zip(self.__schema.field_names, row)) keyed_row = self.__convert_row(keyed_row) if self.__check...
Write rows/keyed_rows to table
entailment
def __prepare_bloom(self): """Prepare bloom for existing checks """ self.__bloom = pybloom_live.ScalableBloomFilter() columns = [getattr(self.__table.c, key) for key in self.__update_keys] keys = select(columns).execution_options(stream_results=True).execute() for key in ...
Prepare bloom for existing checks
entailment
def __insert(self): """Insert rows to table """ if len(self.__buffer) > 0: # Insert data statement = self.__table.insert() if self.__autoincrement: statement = statement.returning( getattr(self.__table.c, self.__autoincremen...
Insert rows to table
entailment
def __update(self, row): """Update rows in table """ expr = self.__table.update().values(row) for key in self.__update_keys: expr = expr.where(getattr(self.__table.c, key) == row[key]) if self.__autoincrement: expr = expr.returning(getattr(self.__table.c, ...
Update rows in table
entailment
def __check_existing(self, row): """Check if row exists in table """ if self.__update_keys is not None: key = tuple(row[key] for key in self.__update_keys) if key in self.__bloom: return True self.__bloom.add(key) return False ...
Check if row exists in table
entailment
def buckets(self): """https://github.com/frictionlessdata/tableschema-sql-py#storage """ buckets = [] for table in self.__metadata.sorted_tables: bucket = self.__mapper.restore_bucket(table.name) if bucket is not None: buckets.append(bucket) ...
https://github.com/frictionlessdata/tableschema-sql-py#storage
entailment
def create(self, bucket, descriptor, force=False, indexes_fields=None): """https://github.com/frictionlessdata/tableschema-sql-py#storage """ # Make lists buckets = bucket if isinstance(bucket, six.string_types): buckets = [bucket] descriptors = descriptor ...
https://github.com/frictionlessdata/tableschema-sql-py#storage
entailment
def delete(self, bucket=None, ignore=False): """https://github.com/frictionlessdata/tableschema-sql-py#storage """ # Make lists buckets = bucket if isinstance(bucket, six.string_types): buckets = [bucket] elif bucket is None: buckets = reversed(se...
https://github.com/frictionlessdata/tableschema-sql-py#storage
entailment
def describe(self, bucket, descriptor=None): """https://github.com/frictionlessdata/tableschema-sql-py#storage """ # Set descriptor if descriptor is not None: self.__descriptors[bucket] = descriptor # Get descriptor else: descriptor = self.__desc...
https://github.com/frictionlessdata/tableschema-sql-py#storage
entailment
def iter(self, bucket): """https://github.com/frictionlessdata/tableschema-sql-py#storage """ # Get table and fallbacks table = self.__get_table(bucket) schema = tableschema.Schema(self.describe(bucket)) # Open and close transaction with self.__connection.begin(...
https://github.com/frictionlessdata/tableschema-sql-py#storage
entailment
def write(self, bucket, rows, keyed=False, as_generator=False, update_keys=None): """https://github.com/frictionlessdata/tableschema-sql-py#storage """ # Check update keys if update_keys is not None and len(update_keys) == 0: message = 'Argument "update_keys" cannot be an em...
https://github.com/frictionlessdata/tableschema-sql-py#storage
entailment
def __get_table(self, bucket): """Get table by bucket """ table_name = self.__mapper.convert_bucket(bucket) if self.__dbschema: table_name = '.'.join((self.__dbschema, table_name)) return self.__metadata.tables[table_name]
Get table by bucket
entailment
def __reflect(self): """Reflect metadata """ def only(name, _): return self.__only(name) and self.__mapper.restore_bucket(name) is not None self.__metadata.reflect(only=only)
Reflect metadata
entailment
def _get_field_comment(field, separator=' - '): """ Create SQL comment from field's title and description :param field: tableschema-py Field, with optional 'title' and 'description' values :param separator: :return: >>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': ...
Create SQL comment from field's title and description :param field: tableschema-py Field, with optional 'title' and 'description' values :param separator: :return: >>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': 'my_desc'})) 'my_title - my_desc' >>> _get_field_com...
entailment
def convert_descriptor(self, bucket, descriptor, index_fields=[], autoincrement=None): """Convert descriptor to SQL """ # Prepare columns = [] indexes = [] fallbacks = [] constraints = [] column_mapping = {} table_name = self.convert_bucket(bucket...
Convert descriptor to SQL
entailment
def convert_row(self, keyed_row, schema, fallbacks): """Convert row to SQL """ for key, value in list(keyed_row.items()): field = schema.get_field(key) if not field: del keyed_row[key] if key in fallbacks: value = _uncast_value(...
Convert row to SQL
entailment
def convert_type(self, type): """Convert type to SQL """ # Default dialect mapping = { 'any': sa.Text, 'array': None, 'boolean': sa.Boolean, 'date': sa.Date, 'datetime': sa.DateTime, 'duration': None, 'g...
Convert type to SQL
entailment
def restore_bucket(self, table_name): """Restore bucket from SQL """ if table_name.startswith(self.__prefix): return table_name.replace(self.__prefix, '', 1) return None
Restore bucket from SQL
entailment
def restore_descriptor(self, table_name, columns, constraints, autoincrement_column=None): """Restore descriptor from SQL """ # Fields fields = [] for column in columns: if column.name == autoincrement_column: continue field_type = self.re...
Restore descriptor from SQL
entailment
def restore_row(self, row, schema): """Restore row from SQL """ row = list(row) for index, field in enumerate(schema.fields): if self.__dialect == 'postgresql': if field.type in ['array', 'object']: continue row[index] = field.c...
Restore row from SQL
entailment
def restore_type(self, type): """Restore type from SQL """ # All dialects mapping = { ARRAY: 'array', sa.Boolean: 'boolean', sa.Date: 'date', sa.DateTime: 'datetime', sa.Float: 'number', sa.Integer: 'integer', ...
Restore type from SQL
entailment
def open_hierarchy(self, path, relative_to_object_id, object_id, create_file_type=0): """ CreateFileType 0 - Creates no new object. 1 - Creates a notebook with the specified name at the specified location. 2 - Creates a section group with the specified name at the specifi...
CreateFileType 0 - Creates no new object. 1 - Creates a notebook with the specified name at the specified location. 2 - Creates a section group with the specified name at the specified location. 3 - Creates a section with the specified name at the specified location.
entailment
def create_new_page (self, section_id, new_page_style=0): """ NewPageStyle 0 - Create a Page that has Default Page Style 1 - Create a blank page with no title 2 - Createa blank page that has no title """ try: self.process.CreateNewPage(section_...
NewPageStyle 0 - Create a Page that has Default Page Style 1 - Create a blank page with no title 2 - Createa blank page that has no title
entailment
def get_page_content(self, page_id, page_info=0): """ PageInfo 0 - Returns only basic page content, without selection markup and binary data objects. This is the standard value to pass. 1 - Returns page content with no selection markup, but with all binary data. 2 - Retur...
PageInfo 0 - Returns only basic page content, without selection markup and binary data objects. This is the standard value to pass. 1 - Returns page content with no selection markup, but with all binary data. 2 - Returns page content with selection markup, but no binary data. 3 -...
entailment
def publish(self, hierarchy_id, target_file_path, publish_format, clsid_of_exporter=""): """ PublishFormat 0 - Published page is in .one format. 1 - Published page is in .onea format. 2 - Published page is in .mht format. 3 - Published page is in .pdf format. ...
PublishFormat 0 - Published page is in .one format. 1 - Published page is in .onea format. 2 - Published page is in .mht format. 3 - Published page is in .pdf format. 4 - Published page is in .xps format. 5 - Published page is in .doc or .docx format. ...
entailment
def get_special_location(self, special_location=0): """ SpecialLocation 0 - Gets the path to the Backup Folders folder location. 1 - Gets the path to the Unfiled Notes folder location. 2 - Gets the path to the Default Notebook folder location. """ try: ...
SpecialLocation 0 - Gets the path to the Backup Folders folder location. 1 - Gets the path to the Unfiled Notes folder location. 2 - Gets the path to the Default Notebook folder location.
entailment
def memory(): """Determine memory specifications of the machine. Returns ------- mem_info : dictonary Holds the current values for the total, free and used memory of the system. """ mem_info = dict() for k, v in psutil.virtual_memory()._asdict().items(): mem_info[k] = i...
Determine memory specifications of the machine. Returns ------- mem_info : dictonary Holds the current values for the total, free and used memory of the system.
entailment
def get_chunk_size(N, n): """Given a two-dimensional array with a dimension of size 'N', determine the number of rows or columns that can fit into memory. Parameters ---------- N : int The size of one of the dimensions of a two-dimensional array. n : int The number of ar...
Given a two-dimensional array with a dimension of size 'N', determine the number of rows or columns that can fit into memory. Parameters ---------- N : int The size of one of the dimensions of a two-dimensional array. n : int The number of arrays of size 'N' times 'chunk_siz...
entailment
def get_compression_filter(byte_counts): """Determine whether or not to use a compression on the array stored in a hierarchical data format, and which compression library to use to that purpose. Compression reduces the HDF5 file size and also helps improving I/O efficiency for large datasets...
Determine whether or not to use a compression on the array stored in a hierarchical data format, and which compression library to use to that purpose. Compression reduces the HDF5 file size and also helps improving I/O efficiency for large datasets. Parameters ---------- byte_co...
entailment
def build_hypergraph_adjacency(cluster_runs): """Return the adjacency matrix to a hypergraph, in sparse matrix representation. Parameters ---------- cluster_runs : array of shape (n_partitions, n_samples) Returns ------- hypergraph_adjacency : compressed sparse row matrix R...
Return the adjacency matrix to a hypergraph, in sparse matrix representation. Parameters ---------- cluster_runs : array of shape (n_partitions, n_samples) Returns ------- hypergraph_adjacency : compressed sparse row matrix Represents the hypergraph associated with an ensemble ...
entailment
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name): """Write an hypergraph adjacency to disk to disk in an HDF5 data structure. Parameters ---------- hypergraph_adjacency : compressed sparse row matrix hdf5_file_name : file handle or string """ assert(hypergra...
Write an hypergraph adjacency to disk to disk in an HDF5 data structure. Parameters ---------- hypergraph_adjacency : compressed sparse row matrix hdf5_file_name : file handle or string
entailment
def load_hypergraph_adjacency(hdf5_file_name): """ Parameters ---------- hdf5_file_name : file handle or string Returns ------- hypergraph_adjacency : compressed sparse row matrix """ with tables.open_file(hdf5_file_name, 'r+') as fileh: pars = [] for par i...
Parameters ---------- hdf5_file_name : file handle or string Returns ------- hypergraph_adjacency : compressed sparse row matrix
entailment
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None): """Call up to three different functions for heuristic ensemble clustering (namely CSPA, HGPA and MCLA) then select as the definitive consensus clustering the one with the highest average mutual informat...
Call up to three different functions for heuristic ensemble clustering (namely CSPA, HGPA and MCLA) then select as the definitive consensus clustering the one with the highest average mutual information score between its vector of consensus labels and the vectors of labels associated to each ...
entailment
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False): """Compute a weighted average of the mutual information with the known labels, the weights being proportional to the fraction of known labels. Parameters ---------- cluster_runs : array of shape (n_partitions, n_samples)...
Compute a weighted average of the mutual information with the known labels, the weights being proportional to the fraction of known labels. Parameters ---------- cluster_runs : array of shape (n_partitions, n_samples) Each row of this matrix is such that the i-th entry corresponds to the ...
entailment
def checkcl(cluster_run, verbose = False): """Ensure that a cluster labelling is in a valid format. Parameters ---------- cluster_run : array of shape (n_samples,) A vector of cluster IDs for each of the samples selected for a given round of clustering. The samples not selected are lab...
Ensure that a cluster labelling is in a valid format. Parameters ---------- cluster_run : array of shape (n_samples,) A vector of cluster IDs for each of the samples selected for a given round of clustering. The samples not selected are labelled with NaN. verbose : Boolean, optional (...
entailment
def one_to_max(array_in): """Alter a vector of cluster labels to a dense mapping. Given that this function is herein always called after passing a vector to the function checkcl, one_to_max relies on the assumption that cluster_run does not contain any NaN entries. Parameters ---...
Alter a vector of cluster labels to a dense mapping. Given that this function is herein always called after passing a vector to the function checkcl, one_to_max relies on the assumption that cluster_run does not contain any NaN entries. Parameters ---------- array_in : a list or ...
entailment
def checks(similarities, verbose = False): """Check that a matrix is a proper similarity matrix and bring appropriate changes if applicable. Parameters ---------- similarities : array of shape (n_samples, n_samples) A matrix of pairwise similarities between (sub)-samples of the data-se...
Check that a matrix is a proper similarity matrix and bring appropriate changes if applicable. Parameters ---------- similarities : array of shape (n_samples, n_samples) A matrix of pairwise similarities between (sub)-samples of the data-set. verbose : Boolean, optional (default = Fa...
entailment
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None): """Cluster-based Similarity Partitioning Algorithm for a consensus function. Parameters ---------- hdf5_file_name : file handle or string cluster_runs : array of shape (n_partitions, n_samples) verbose...
Cluster-based Similarity Partitioning Algorithm for a consensus function. Parameters ---------- hdf5_file_name : file handle or string cluster_runs : array of shape (n_partitions, n_samples) verbose : bool, optional (default = False) N_clusters_max : int, optional (default = ...
entailment
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None): """HyperGraph-Partitioning Algorithm for a consensus function. Parameters ---------- hdf5_file_name : string or file handle cluster_runs: array of shape (n_partitions, n_samples) verbose : bool, option...
HyperGraph-Partitioning Algorithm for a consensus function. Parameters ---------- hdf5_file_name : string or file handle cluster_runs: array of shape (n_partitions, n_samples) verbose : bool, optional (default = False) N_clusters_max : int, optional (default = None) ...
entailment
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None): """Meta-CLustering Algorithm for a consensus function. Parameters ---------- hdf5_file_name : file handle or string cluster_runs : array of shape (n_partitions, n_samples) verbose : bool, optional (def...
Meta-CLustering Algorithm for a consensus function. Parameters ---------- hdf5_file_name : file handle or string cluster_runs : array of shape (n_partitions, n_samples) verbose : bool, optional (default = False) N_clusters_max : int, optional (default = None) Returns...
entailment
def create_membership_matrix(cluster_run): """For a label vector represented by cluster_run, constructs the binary membership indicator matrix. Such matrices, when concatenated, contribute to the adjacency matrix for a hypergraph representation of an ensemble of clusterings. Para...
For a label vector represented by cluster_run, constructs the binary membership indicator matrix. Such matrices, when concatenated, contribute to the adjacency matrix for a hypergraph representation of an ensemble of clusterings. Parameters ---------- cluster_run : array of s...
entailment
def metis(hdf5_file_name, N_clusters_max): """METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph passed by CSPA. Parameters ---------- hdf5_file_name : string or file handle N_clusters_max : int Returns ------- labels : array of shape (n_sam...
METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph passed by CSPA. Parameters ---------- hdf5_file_name : string or file handle N_clusters_max : int Returns ------- labels : array of shape (n_samples,) A vector of labels denoting the clu...
entailment
def hmetis(hdf5_file_name, N_clusters_max, w = None): """Gives cluster labels ranging from 1 to N_clusters_max for hypergraph partitioning required for HGPA. Parameters ---------- hdf5_file_name : file handle or string N_clusters_max : int w : array, optional (default = None)...
Gives cluster labels ranging from 1 to N_clusters_max for hypergraph partitioning required for HGPA. Parameters ---------- hdf5_file_name : file handle or string N_clusters_max : int w : array, optional (default = None) Returns ------- labels : array of shape (n_...
entailment
def wgraph(hdf5_file_name, w = None, method = 0): """Write a graph file in a format apposite to later use by METIS or HMETIS. Parameters ---------- hdf5_file_name : file handle or string w : list or array, optional (default = None) method : int, optional (default = 0) Ret...
Write a graph file in a format apposite to later use by METIS or HMETIS. Parameters ---------- hdf5_file_name : file handle or string w : list or array, optional (default = None) method : int, optional (default = 0) Returns ------- file_name : string
entailment
def sgraph(N_clusters_max, file_name): """Runs METIS or hMETIS and returns the labels found by those (hyper-)graph partitioning algorithms. Parameters ---------- N_clusters_max : int file_name : string Returns ------- labels : array of shape (n_samples,) ...
Runs METIS or hMETIS and returns the labels found by those (hyper-)graph partitioning algorithms. Parameters ---------- N_clusters_max : int file_name : string Returns ------- labels : array of shape (n_samples,) A vector of labels denoting the cluster to ...
entailment
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs): """Writes on disk (in an HDF5 file whose handle is provided as the first argument to this function) a stack of matrices, each describing for a particular run the overlap of cluster ID's that are matching each of the cluster ID...
Writes on disk (in an HDF5 file whose handle is provided as the first argument to this function) a stack of matrices, each describing for a particular run the overlap of cluster ID's that are matching each of the cluster ID's stored in 'consensus_labels' (the vector of labels obtained by e...
entailment
def obfuscate(p, action): """Obfuscate the auth details to avoid easy snatching. It's best to use a throw away account for these alerts to avoid having your authentication put at risk by storing it locally. """ key = "ru7sll3uQrGtDPcIW3okutpFLo6YYtd5bWSpbZJIopYQ0Du0a1WlhvJOaZEH" s = list() ...
Obfuscate the auth details to avoid easy snatching. It's best to use a throw away account for these alerts to avoid having your authentication put at risk by storing it locally.
entailment
def _config_bootstrap(self): """Go through and establish the defaults on the file system. The approach here was stolen from the CLI tool provided with the module. Idea being that the user should not always need to provide a username and password in order to run the script. If the config...
Go through and establish the defaults on the file system. The approach here was stolen from the CLI tool provided with the module. Idea being that the user should not always need to provide a username and password in order to run the script. If the configuration file is already present ...
entailment
def _session_check(self): """Attempt to authenticate the user through a session file. This process is done to avoid having to authenticate the user every single time. It uses a session file that is saved when a valid session is captured and then reused. Because sessions can expire, we n...
Attempt to authenticate the user through a session file. This process is done to avoid having to authenticate the user every single time. It uses a session file that is saved when a valid session is captured and then reused. Because sessions can expire, we need to test the session prior...
entailment
def _logger(self): """Create a logger to be used between processes. :returns: Logging instance. """ logger = logging.getLogger(self.NAME) logger.setLevel(self.LOG_LEVEL) shandler = logging.StreamHandler(sys.stdout) fmt = '\033[1;32m%(levelname)-5s %(module)s:%(fu...
Create a logger to be used between processes. :returns: Logging instance.
entailment
def set_log_level(self, level): """Override the default log level of the class""" if level == 'info': level = logging.INFO if level == 'debug': level = logging.DEBUG if level == 'error': level = logging.ERROR self._log.setLevel(level)
Override the default log level of the class
entailment
def _process_state(self): """Process the application state configuration. Google Alerts manages the account information and alert data through some custom state configuration. Not all values have been completely enumerated. """ self._log.debug("Capturing state from the r...
Process the application state configuration. Google Alerts manages the account information and alert data through some custom state configuration. Not all values have been completely enumerated.
entailment
def authenticate(self): """Authenticate the user and setup our state.""" valid = self._session_check() if self._is_authenticated and valid: self._log.debug("[!] User has already authenticated") return init = self._session.get(url=self.LOGIN_URL, headers=self.HEADE...
Authenticate the user and setup our state.
entailment
def list(self, term=None): """List alerts configured for the account.""" if not self._state: raise InvalidState("State was not properly obtained from the app") self._process_state() if not self._state[1]: self._log.info("No monitors have been created yet.") ...
List alerts configured for the account.
entailment
def create(self, term, options): """Create a monitor using passed configuration.""" if not self._state: raise InvalidState("State was not properly obtained from the app") options['action'] = 'CREATE' payload = self._build_payload(term, options) url = self.ALERTS_CREAT...
Create a monitor using passed configuration.
entailment
def modify(self, monitor_id, options): """Create a monitor using passed configuration.""" if not self._state: raise InvalidState("State was not properly obtained from the app") monitors = self.list() # Get the latest set of monitors obj = None for monitor in monitors...
Create a monitor using passed configuration.
entailment
def delete(self, monitor_id): """Delete a monitor by ID.""" if not self._state: raise InvalidState("State was not properly obtained from the app") monitors = self.list() # Get the latest set of monitors bit = None for monitor in monitors: if monitor_id !=...
Delete a monitor by ID.
entailment
def main(): """Run the core.""" parser = ArgumentParser() subs = parser.add_subparsers(dest='cmd') setup_parser = subs.add_parser('setup') setup_parser.add_argument('-e', '--email', dest='email', required=True, help='Email of the Google user.', type=str) setup_parse...
Run the core.
entailment
def search_packages_info(query): """ Gather details from installed distributions. Print distribution name, version, location, and installed files. Installed files requires a pip generated 'installed-files.txt' in the distributions '.egg-info' directory. """ installed = {} for p in pkg_re...
Gather details from installed distributions. Print distribution name, version, location, and installed files. Installed files requires a pip generated 'installed-files.txt' in the distributions '.egg-info' directory.
entailment
def process_view(self, request, view_func, view_args, view_kwargs): """ Collect data on Class-Based Views """ # Purge data in view method cache # Python 3's keys() method returns an iterator, so force evaluation before iterating. view_keys = list(VIEW_METHOD_DATA.keys())...
Collect data on Class-Based Views
entailment
def process_response(self, request, response): """Let's handle old-style response processing here, as usual.""" # For debug only. if not settings.DEBUG: return response # Check for responses where the data can't be inserted. content_encoding = response.get('Content-...
Let's handle old-style response processing here, as usual.
entailment
def get_job_class(klass_str): """ Return the job class """ mod_name, klass_name = klass_str.rsplit('.', 1) try: mod = importlib.import_module(mod_name) except ImportError as e: logger.error("Error importing job module %s: '%s'", mod_name, e) return try: klass ...
Return the job class
entailment
def get(self, *raw_args, **raw_kwargs): """ Return the data for this function (using the cache if possible). This method is not intended to be overidden """ # We pass args and kwargs through a filter to allow them to be # converted into values that can be pickled. ...
Return the data for this function (using the cache if possible). This method is not intended to be overidden
entailment
def invalidate(self, *raw_args, **raw_kwargs): """ Mark a cached item invalid and trigger an asynchronous job to refresh the cache """ args = self.prepare_args(*raw_args) kwargs = self.prepare_kwargs(**raw_kwargs) key = self.key(*args, **kwargs) item = sel...
Mark a cached item invalid and trigger an asynchronous job to refresh the cache
entailment
def delete(self, *raw_args, **raw_kwargs): """ Remove an item from the cache """ args = self.prepare_args(*raw_args) kwargs = self.prepare_kwargs(**raw_kwargs) key = self.key(*args, **kwargs) item = self.cache.get(key) if item is not None: self...
Remove an item from the cache
entailment
def raw_get(self, *raw_args, **raw_kwargs): """ Retrieve the item (tuple of value and expiry) that is actually in the cache, without causing a refresh. """ args = self.prepare_args(*raw_args) kwargs = self.prepare_kwargs(**raw_kwargs) key = self.key(*args, **kwa...
Retrieve the item (tuple of value and expiry) that is actually in the cache, without causing a refresh.
entailment
def set(self, *raw_args, **raw_kwargs): """ Manually set the cache value with its appropriate expiry. """ if self.set_data_kwarg in raw_kwargs: data = raw_kwargs.pop(self.set_data_kwarg) else: raw_args = list(raw_args) data = raw_args.pop() ...
Manually set the cache value with its appropriate expiry.
entailment
def store(self, key, expiry, data): """ Add a result to the cache :key: Cache key to use :expiry: The expiry timestamp after which the result is stale :data: The data to cache """ self.cache.set(key, (expiry, data), self.cache_ttl) if getattr(settings, '...
Add a result to the cache :key: Cache key to use :expiry: The expiry timestamp after which the result is stale :data: The data to cache
entailment
def refresh(self, *args, **kwargs): """ Fetch the result SYNCHRONOUSLY and populate the cache """ result = self.fetch(*args, **kwargs) self.store(self.key(*args, **kwargs), self.expiry(*args, **kwargs), result) return result
Fetch the result SYNCHRONOUSLY and populate the cache
entailment