text stringlengths 81 112k |
|---|
Return a new path with the file name changed.
def with_name(self, name):
"""Return a new path with the file name changed."""
if not self.name:
raise ValueError("%r has an empty name" % (self,))
return self._from_parsed_parts(self._drv, self._root,
... |
Return a new path with the file suffix changed (or added, if none).
def with_suffix(self, suffix):
"""Return a new path with the file suffix changed (or added, if none)."""
# XXX if suffix is None, should the current suffix be removed?
drv, root, parts = self._flavour.parse_parts((suffix,))
... |
Open the file pointed by this path and return a file descriptor,
as os.open() does.
def _raw_open(self, flags, mode=0o777):
"""
Open the file pointed by this path and return a file descriptor,
as os.open() does.
"""
return self._accessor.open(self, flags, mode) |
Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
def iterdir(self):
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
for name in self._accessor.listdir(self):
... |
Return an absolute version of this path. This function works
even if the path doesn't point to anything.
No normalization is done, i.e. all '.' and '..' will be kept along.
Use resolve() to get the canonical path to a file.
def absolute(self):
"""Return an absolute version of this pat... |
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows).
def resolve(self):
"""
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes i... |
Open the file pointed by this path and return a file object, as
the built-in open() function does.
def open(self, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
"""
Open the file pointed by this path and return a file object, as
the built-in open() funct... |
Rename this path to the given path, clobbering the existing
destination if it exists.
def replace(self, target):
"""
Rename this path to the given path, clobbering the existing
destination if it exists.
"""
if sys.version_info < (3, 3):
raise NotImplementedEr... |
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of os.symlink's.
def symlink_to(self, target, target_is_directory=False):
"""
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is... |
Whether this path is a symbolic link.
def is_symlink(self):
"""
Whether this path is a symbolic link.
"""
try:
return S_ISLNK(self.lstat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist
... |
Whether this path is a block device.
def is_block_device(self):
"""
Whether this path is a block device.
"""
try:
return S_ISBLK(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist or is a... |
Whether this path is a character device.
def is_char_device(self):
"""
Whether this path is a character device.
"""
try:
return S_ISCHR(self.stat().st_mode)
except OSError as e:
if e.errno != ENOENT:
raise
# Path doesn't exist ... |
Points are the outer edges of the UL and LR pixels. Size is rows, columns.
GC projection type is taken from Points.
def grid_coords_from_corners(upper_left_corner, lower_right_corner, size):
''' Points are the outer edges of the UL and LR pixels. Size is rows, columns.
GC projection type is taken from Poin... |
returns True if the GC's overlap.
def intersects(self, other_grid_coordinates):
""" returns True if the GC's overlap. """
ogc = other_grid_coordinates # alias
# for explanation: http://stackoverflow.com/questions/306316/determine-if-two-rectangles-overlap-each-other
# Note the flipped ... |
A string that (ideally) uniquely represents this GC object. This
helps with naming files for caching. 'Unique' is defined as 'If
GC1 != GC2, then GC1.unique_str() != GC2.unique_str()'; conversely,
'If GC1 == GC2, then GC1.unique_str() == GC2.unique_str()'.
The string should be filename-... |
See http://www.gdal.org/gdal_datamodel.html for details.
def _get_x_axis(self):
"""See http://www.gdal.org/gdal_datamodel.html for details."""
# 0,0 is top/left top top/left pixel. Actual x/y coord of that pixel are (.5,.5).
x_centers = np.linspace(.5, self.x_size - .5, self.x_size)
y_c... |
See http://www.gdal.org/gdal_datamodel.html for details.
def _get_y_axis(self):
"""See http://www.gdal.org/gdal_datamodel.html for details."""
# 0,0 is top/left top top/left pixel. Actual x/y coord of that pixel are (.5,.5).
y_centers = np.linspace(.5, self.y_size - .5, self.y_size)
x_c... |
Use pixel centers when appropriate.
See documentation for the GDAL function GetGeoTransform for details.
def raster_to_projection_coords(self, pixel_x, pixel_y):
""" Use pixel centers when appropriate.
See documentation for the GDAL function GetGeoTransform for details. """
h_px_py = np... |
Returns pixel centers.
See documentation for the GDAL function GetGeoTransform for details.
def projection_to_raster_coords(self, lat, lon):
""" Returns pixel centers.
See documentation for the GDAL function GetGeoTransform for details. """
r_px_py = np.array([1, lon, lat])
tg =... |
Reprojects data in this layer to match that in the GridCoordinates
object.
def reproject_to_grid_coordinates(self, grid_coordinates, interp=gdalconst.GRA_NearestNeighbour):
""" Reprojects data in this layer to match that in the GridCoordinates
object. """
source_dataset = self.grid_coor... |
Replace masked-out elements in an array using an iterative image inpainting algorithm.
def inpaint(self):
""" Replace masked-out elements in an array using an iterative image inpainting algorithm. """
import inpaint
filled = inpaint.replace_nans(np.ma.filled(self.raster_data, np.NAN).astype(np... |
Lookup a pixel value in the raster data, performing linear interpolation
if necessary. Indexed ==> nearest neighbor (*fast*).
def interp_value(self, lat, lon, indexed=False):
""" Lookup a pixel value in the raster data, performing linear interpolation
if necessary. Indexed ==> nearest neighbor ... |
Gets a connected Client object.
If max_size is reached, this method will block until a new client
object is available.
Returns:
A Future object with connected Client instance as a result
(or ClientError if there was a connection problem)
def get_connected_client(se... |
Gets a Client object (not necessary connected).
If max_size is reached, this method will return None (and won't block).
Returns:
A Client instance (not necessary connected) as result (or None).
def get_client_nowait(self):
"""Gets a Client object (not necessary connected).
... |
Returns a ContextManagerFuture to be yielded in a with statement.
Returns:
A ContextManagerFuture object.
Examples:
>>> with (yield pool.connected_client()) as client:
# client is a connected tornadis.Client instance
# it will be automati... |
Releases a client object to the pool.
Args:
client: Client object.
def release_client(self, client):
"""Releases a client object to the pool.
Args:
client: Client object.
"""
if isinstance(client, Client):
if not self._is_expired_client(clie... |
Disconnects all pooled client objects.
def destroy(self):
"""Disconnects all pooled client objects."""
while True:
try:
client = self.__pool.popleft()
if isinstance(client, Client):
client.disconnect()
except IndexError:
... |
(pre)Connects some or all redis clients inside the pool.
Args:
size (int): number of redis clients to build and to connect
(-1 means all clients if pool max_size > -1)
Raises:
ClientError: when size == -1 and pool max_size == -1
def preconnect(self, size=-1):
... |
Setup python search and add ``TASKS_VENDOR_DIR`` (if available).
def setup_path(invoke_minversion=None):
"""Setup python search and add ``TASKS_VENDOR_DIR`` (if available)."""
# print("INVOKE.tasks: setup_path")
if not os.path.isdir(TASKS_VENDOR_DIR):
print("SKIP: TASKS_VENDOR_DIR=%s is missing" % ... |
Ensures that :mod:`invoke` has at the least the :param:`min_version`.
Otherwise,
:param min_version: Minimal acceptable invoke version (as string).
:param verbose: Indicates if invoke.version should be shown.
:raises: VersionRequirementError=SystemExit if requirement fails.
def require_invoke_minv... |
Decorator for SectionSchema classes to define the mapping between
a config section schema class and one or more config sections with
matching name(s).
.. sourcecode::
@matches_section("foo")
class FooSchema(SectionSchema):
pass
@matches_section(["bar", "baz.*"])
... |
Class decorator to assign parameter name to instances of :class:`Param`.
.. sourcecode::
@assign_param_names
class ConfigSectionSchema(object):
alice = Param(type=str)
bob = Param(type=str)
assert ConfigSectionSchema.alice.name == "alice"
assert ConfigSec... |
Selects the parameters of a config section schema.
:param section_schema: Configuration file section schema to use.
:return: Generator of params
def select_params_from_section_schema(section_schema, param_class=Param,
deep=False):
"""Selects the parameters of a confi... |
Parse a config file section (INI file) by using its schema/description.
.. sourcecode::
import configparser # -- NOTE: Use backport for Python2
import click
from click_configfile import SectionSchema, Param, parse_config_section
class ConfigSectionSchema(object):
c... |
Generates all configuration file name combinations to read.
.. sourcecode::
# -- ALGORITHM:
# First basenames/directories are prefered and override other files.
for config_path in reversed(config_searchpath):
for config_basename in reversed(config_files):
con... |
Select a subset of the sections in a configuration file by using
a list of section names of list of section name patters
(supporting :mod:`fnmatch` wildcards).
:param configfile_sections: List of config section names (as strings).
:param desired_section_patterns:
:return: List of selected section n... |
Indicates if this schema can be used for a config section
by using the section name.
:param section_name: Config section name to check.
:return: True, if this schema can be applied to the config section.
:return: Fals, if this schema does not match the config section.
def matches_se... |
Derive support config section names from config section schemas.
If no :param:`config_section_schemas` are provided, the schemas from
this class are used (normally defined in the DerivedClass).
:param config_section_schemas: List of config section schema classes.
:return: List of confi... |
Process the config section and store the extracted data in
the param:`storage` (as outgoing param).
def process_config_section(cls, config_section, storage):
"""Process the config section and store the extracted data in
the param:`storage` (as outgoing param).
"""
# -- CONCEPT:
... |
Select the config schema that matches the config section (by name).
:param section_name: Config section name (as key).
:return: Config section schmema to use (subclass of: SectionSchema).
def select_config_schema_for(cls, section_name):
"""Select the config schema that matches the config se... |
Selects the data storage for a config section within the
:param:`storage`. The primary config section is normally merged into
the :param:`storage`.
:param section_name: Config section (name) to process.
:param storage: Data storage to use.
:return: :param:`storage` or... |
Cleanup temporary dirs/files to regain a clean state.
def clean(ctx, dry_run=False):
"""Cleanup temporary dirs/files to regain a clean state."""
# -- VARIATION-POINT 1: Allow user to override in configuration-file
directories = ctx.clean.directories
files = ctx.clean.files
# -- VARIATION-POINT 2: ... |
Clean up everything, even the precious stuff.
NOTE: clean task is executed first.
def clean_all(ctx, dry_run=False):
"""Clean up everything, even the precious stuff.
NOTE: clean task is executed first.
"""
cleanup_dirs(ctx.clean_all.directories or [], dry_run=dry_run)
cleanup_dirs(ctx.clean_all... |
Cleanup python related files/dirs: *.pyc, *.pyo, ...
def clean_python(ctx, dry_run=False):
"""Cleanup python related files/dirs: *.pyc, *.pyo, ..."""
# MAYBE NOT: "**/__pycache__"
cleanup_dirs(["build", "dist", "*.egg-info", "**/__pycache__"],
dry_run=dry_run)
if not dry_run:
c... |
Remove files or files selected by file patterns.
Skips removal if file does not exist.
:param patterns: File patterns, like "**/*.pyc" (as list).
:param dry_run: Dry-run mode indicator (as bool).
:param workdir: Current work directory (default=".")
def cleanup_files(patterns, dry_run=False,... |
Use pathlib for ant-like patterns, like: "**/*.py"
:param pattern: File/directory pattern to use (as string).
:param current_dir: Current working directory (as Path, pathlib.Path, str)
:return Resolved Path (as path.Path).
def path_glob(pattern, current_dir=None):
"""Use pathlib for ant-like pat... |
Stacks a redis command inside the object.
The syntax is the same than the call() method a Client class.
Args:
*args: full redis command as variable length argument list.
Examples:
>>> pipeline = Pipeline()
>>> pipeline.stack_call("HSET", "key", "field", "va... |
Connects the object to the host:port.
Returns:
Future: a Future object with True as result if the connection
process was ok.
def connect(self):
"""Connects the object to the host:port.
Returns:
Future: a Future object with True as result if the connecti... |
Disconnects the object.
Safe method (no exception, even if it's already disconnected or if
there are some connection errors).
def disconnect(self):
"""Disconnects the object.
Safe method (no exception, even if it's already disconnected or if
there are some connection errors).
... |
Buffers some data to be sent to the host:port in a non blocking way.
So the data is always buffered and not sent on the socket in a
synchronous way.
You can give a WriteBuffer as parameter. The internal Connection
WriteBuffer will be extended with this one (without copying).
A... |
Simulate the Python 3 ``surrogateescape`` handler, but for Python 2 only.
def surrogate_escape(error):
"""
Simulate the Python 3 ``surrogateescape`` handler, but for Python 2 only.
"""
chars = error.object[error.start:error.end]
assert len(chars) == 1
val = ord(chars)
val += 0xdc00
retu... |
Ensure the path as retrieved from a Python API, such as :func:`os.listdir`,
is a proper Unicode string.
def _always_unicode(cls, path):
"""
Ensure the path as retrieved from a Python API, such as :func:`os.listdir`,
is a proper Unicode string.
"""
if PY3 or isinstance(pa... |
The same as :meth:`name`, but with one file extension stripped off.
For example,
``Path('/home/guido/python.tar.gz').name == 'python.tar.gz'``,
but
``Path('/home/guido/python.tar.gz').namebase == 'python.tar'``.
def namebase(self):
""" The same as :meth:`name`, but with one fil... |
D.listdir() -> List of items in this directory.
Use :meth:`files` or :meth:`dirs` instead if you want a listing
of just files or just subdirectories.
The elements of the list are Path objects.
With the optional `pattern` argument, this only lists
items whose names match the gi... |
D.dirs() -> List of this directory's subdirectories.
The elements of the list are Path objects.
This does not walk recursively into subdirectories
(but see :meth:`walkdirs`).
With the optional `pattern` argument, this only lists
directories whose names match the given pattern. ... |
D.files() -> List of the files in this directory.
The elements of the list are Path objects.
This does not walk into subdirectories (see :meth:`walkfiles`).
With the optional `pattern` argument, this only lists files
whose names match the given pattern. For example,
``d.files(... |
D.walkdirs() -> iterator over subdirs, recursively.
With the optional `pattern` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``'test'``.
The `errors=` keyword a... |
D.walkfiles() -> iterator over files in D, recursively.
The optional argument `pattern` limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
def walkfiles(self, pattern=None, errors='str... |
Open this file and return a corresponding :class:`file` object.
Keyword arguments work as in :func:`io.open`. If the file cannot be
opened, an :class:`~exceptions.OSError` is raised.
def open(self, *args, **kwargs):
""" Open this file and return a corresponding :class:`file` object.
... |
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the `append=True` keyword argument.
There are two differences between :meth:`write_text` and
:meth:`write_bytes`: newline handling and Unicode handling.
See be... |
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See `linesep` below.
`lines` - A list of strings.
`encoding` - A Unicode encoding to use. This ap... |
Like :meth:`mkdir`, but does not raise an exception if the
directory already exists.
def mkdir_p(self, mode=0o777):
""" Like :meth:`mkdir`, but does not raise an exception if the
directory already exists. """
try:
self.mkdir(mode)
except OSError:
_, e, _ ... |
Like :meth:`makedirs`, but does not raise an exception if the
directory already exists.
def makedirs_p(self, mode=0o777):
""" Like :meth:`makedirs`, but does not raise an exception if the
directory already exists. """
try:
self.makedirs(mode)
except OSError:
... |
Like :meth:`rmdir`, but does not raise an exception if the
directory is not empty or does not exist.
def rmdir_p(self):
""" Like :meth:`rmdir`, but does not raise an exception if the
directory is not empty or does not exist. """
try:
self.rmdir()
except OSError:
... |
Like :meth:`removedirs`, but does not raise an exception if the
directory is not empty or does not exist.
def removedirs_p(self):
""" Like :meth:`removedirs`, but does not raise an exception if the
directory is not empty or does not exist. """
try:
self.removedirs()
... |
Like :meth:`remove`, but does not raise an exception if the
file does not exist.
def remove_p(self):
""" Like :meth:`remove`, but does not raise an exception if the
file does not exist. """
try:
self.unlink()
except OSError:
_, e, _ = sys.exc_info()
... |
Like :meth:`rmtree`, but does not raise an exception if the
directory does not exist.
def rmtree_p(self):
""" Like :meth:`rmtree`, but does not raise an exception if the
directory does not exist. """
try:
self.rmtree()
except OSError:
_, e, _ = sys.exc_in... |
Copy entire contents of self to dst, overwriting existing
contents in dst with those in self.
If the additional keyword `update` is True, each
`src` will only be copied if `dst` does not exist,
or `src` is newer than `dst`.
Note that the technique employed stages the files in a... |
DiskReader implementation.
def __gdal_dataset_default(self):
"""DiskReader implementation."""
if not os.path.exists(self.file_name):
return None
if os.path.splitext(self.file_name)[1].lower() not in self.file_types:
raise RuntimeError('Filename %s does not have extensio... |
Connects the client object to redis.
It's safe to use this method even if you are already connected.
Note: this method is useless with autoconnect mode (default).
Returns:
a Future object with True as result if the connection was ok.
def connect(self):
"""Connects the clie... |
Callback called when redis closed the connection.
The callback queue is emptied and we call each callback found
with None or with an exception object to wake up blocked client.
def _close_callback(self):
"""Callback called when redis closed the connection.
The callback queue is emptie... |
Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket.
def _read_callback(self, data=None):
"""Callback ... |
Calls a redis command and returns a Future of the reply.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: internal private options (do not use).
Returns:
a Future with the decoded... |
Calls a redis command, waits for the reply and call a callback.
Following options are available (not part of the redis command itself):
- callback
Function called (with the result as argument) when the result
is available. If not set, the reply is silently discarded. In
... |
Formats arguments into redis protocol...
This function makes and returns a string/buffer corresponding to
given arguments formated with the redis protocol.
integer, text, string or binary types are automatically converted
(using utf8 if necessary).
More informations about the protocol: http://red... |
Internal "done callback" to set the result of the object.
The result of the object if forced by the wrapped future. So this
internal callback must be called when the wrapped future is ready.
Args:
wrapped (Future): the wrapped Future object
def _done_callback(self, wrapped):
... |
The result method which returns a context manager
Returns:
ContextManager: The corresponding context manager
def result(self):
"""The result method which returns a context manager
Returns:
ContextManager: The corresponding context manager
"""
if self.ex... |
Create the URL in the LyricWikia format
def create_url(artist, song):
"""Create the URL in the LyricWikia format"""
return (__BASE_URL__ +
'/wiki/{artist}:{song}'.format(artist=urlize(artist),
song=urlize(song))) |
Retrieve the lyrics of the song and return the first one in case
multiple versions are available.
def get_lyrics(artist, song, linesep='\n', timeout=None):
"""Retrieve the lyrics of the song and return the first one in case
multiple versions are available."""
return get_all_lyrics(artist, song, linesep... |
Retrieve a list of all the lyrics versions of a song.
def get_all_lyrics(artist, song, linesep='\n', timeout=None):
"""Retrieve a list of all the lyrics versions of a song."""
url = create_url(artist, song)
response = _requests.get(url, timeout=timeout)
soup = _BeautifulSoup(response.content, "html.par... |
Open an ARF file, creating as necessary.
Use this instead of h5py.File to ensure that root-level attributes and group
creation property lists are set correctly.
def open_file(name, mode=None, driver=None, libver=None, userblock_size=None, **kwargs):
"""Open an ARF file, creating as necessary.
Use thi... |
Create a new ARF entry under group, setting required attributes.
An entry is an abstract collection of data which all refer to the same time
frame. Data can include physiological recordings, sound recordings, and
derived data such as spike times and labels. See add_data() for information
on how data ar... |
Create an ARF dataset under group, setting required attributes
Required arguments:
name -- the name of dataset in which to store the data
data -- the data to store
Data can be of the following types:
* sampled data: an N-D numerical array of measurements
* "simple" event data: a 1-D array... |
Create a new array dataset under group with compound datatype and maxshape=(None,)
def create_table(group, name, dtype, **attributes):
"""Create a new array dataset under group with compound datatype and maxshape=(None,)"""
dset = group.create_dataset(
name, shape=(0,), dtype=dtype, maxshape=(None,))
... |
Append data to dset along axis 0. Data must be a single element or
a 1D array of the same type as the dataset (including compound datatypes).
def append_data(dset, data):
"""Append data to dset along axis 0. Data must be a single element or
a 1D array of the same type as the dataset (including compound dat... |
Check the ARF version attribute of file for compatibility.
Raises DeprecationWarning for backwards-incompatible files, FutureWarning
for (potentially) forwards-incompatible files, and UserWarning for files
that may not have been created by an ARF library.
Returns the version for the file
def check_fi... |
Set multiple attributes on node.
If overwrite is False, and the attribute already exists, does nothing. If
the value for a key is None, the attribute is deleted.
def set_attributes(node, overwrite=True, **attributes):
"""Set multiple attributes on node.
If overwrite is False, and the attribute alread... |
Returns a sequence of links in group in order of creation.
Raises an error if the group was not set to track creation order.
def keys_by_creation(group):
"""Returns a sequence of links in group in order of creation.
Raises an error if the group was not set to track creation order.
"""
from h5py ... |
Make an ARF timestamp from an object.
Argument can be a datetime.datetime object, a time.struct_time, an integer,
a float, or a tuple of integers. The returned value is a numpy array with
the integer number of seconds since the Epoch and any additional
microseconds.
Note that because floating poin... |
Convert an ARF timestamp to a datetime.datetime object (naive local time)
def timestamp_to_datetime(timestamp):
"""Convert an ARF timestamp to a datetime.datetime object (naive local time)"""
from datetime import datetime, timedelta
obj = datetime.fromtimestamp(timestamp[0])
return obj + timedelta(micr... |
Set the uuid attribute of an HDF5 object. Use this method to ensure correct dtype
def set_uuid(obj, uuid=None):
"""Set the uuid attribute of an HDF5 object. Use this method to ensure correct dtype """
from uuid import uuid4, UUID
if uuid is None:
uuid = uuid4()
elif isinstance(uuid, bytes):
... |
Return the uuid for obj, or null uuid if none is set
def get_uuid(obj):
"""Return the uuid for obj, or null uuid if none is set"""
# TODO: deprecate null uuid ret val
from uuid import UUID
try:
uuid = obj.attrs['uuid']
except KeyError:
return UUID(int=0)
# convert to unicode for... |
Return the number of children of obj, optionally restricting by class
def count_children(obj, type=None):
"""Return the number of children of obj, optionally restricting by class"""
if type is None:
return len(obj)
else:
# there doesn't appear to be any hdf5 function for getting this
... |
generate a dict keyed by value
def _todict(cls):
""" generate a dict keyed by value """
return dict((getattr(cls, attr), attr) for attr in dir(cls) if not attr.startswith('_')) |
get_template will return a template in the template folder,
with some substitutions (eg, {'{{ graph | safe }}':"fill this in!"}
def get_template(template_name,fields=None):
'''get_template will return a template in the template folder,
with some substitutions (eg, {'{{ graph | safe }}':"fill this in!"}
... |
container similarity_vector is similar to compare_packages, but intended
to compare a container object (singularity image or singularity hub container)
to a list of packages. If packages_set is not provided, the default used is
'docker-os'. This can be changed to 'docker-library', or if the user wants a cu... |
compare_singularity_images is a wrapper for compare_containers to compare
singularity containers. If image_paths2 is not defined, pairwise comparison is done
with image_paths1
def compare_singularity_images(image_paths1, image_paths2=None):
'''compare_singularity_images is a wrapper for compare_containers ... |
compare_containers will generate a data structure with common and unique files to
two images. If environmental variable SINGULARITY_HUB is set, will use container
database objects.
:param container1: first container for comparison
:param container2: second container for comparison if either not defined ... |
compare lists is the lowest level that drives compare_containers and
compare_packages. It returns a comparison object (dict) with the unique,
total, and intersecting things between two lists
:param list1: the list for container1
:param list2: the list for container2
def compare_lists(list1,list2):
... |
calculate_similarity will calculate similarity of two containers
by files content, default will calculate
2.0*len(intersect) / total package1 + total package2
Parameters
==========
container1: container 1
container2: container 2 must be defined or
metric a function to take a total... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.