text stringlengths 81 112k |
|---|
Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes tha... |
Process image files from the dataset.
def _process_image_file(fobj, session, filename):
"""Process image files from the dataset."""
# We need to read the image files and convert them to JPEG, since some files
# actually contain GIF, PNG or BMP data (despite having a .jpg extension) and
# some encoding options ... |
Yields examples.
def _generate_examples(self, archive):
"""Yields examples."""
prefix_len = len("SUN397")
with tf.Graph().as_default():
with utils.nogpu_session() as sess:
for filepath, fobj in archive:
if (filepath.endswith(".jpg") and
filepath not in _SUN397_IGNORE_I... |
Returns examples from parallel SGML or text files, which may be gzipped.
def _parse_parallel_sentences(f1, f2):
"""Returns examples from parallel SGML or text files, which may be gzipped."""
def _parse_text(path):
"""Returns the sentences from a single text file, which may be gzipped."""
split_path = path.... |
Generates examples from TMX file.
def _parse_tmx(path):
"""Generates examples from TMX file."""
def _get_tuv_lang(tuv):
for k, v in tuv.items():
if k.endswith("}lang"):
return v
raise AssertionError("Language not found in `tuv` attributes.")
def _get_tuv_seg(tuv):
segs = tuv.findall("s... |
Generates examples from TSV file.
def _parse_tsv(path, language_pair=None):
"""Generates examples from TSV file."""
if language_pair is None:
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
assert lang_match is not None, "Invalid TSV filename: %s" % path
l1, l2 = lang_match.groups()
... |
Generates examples from Wikiheadlines dataset file.
def _parse_wikiheadlines(path):
"""Generates examples from Wikiheadlines dataset file."""
lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
l1, l2 = lang_match.groups()
... |
Generates examples from CzEng v1.6, with optional filtering for v1.7.
def _parse_czeng(*paths, **kwargs):
"""Generates examples from CzEng v1.6, with optional filtering for v1.7."""
filter_path = kwargs.get("filter_path", None)
if filter_path:
re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
with tf.io.... |
Injects languages into (potentially) template strings.
def _inject_language(self, src, strings):
"""Injects languages into (potentially) template strings."""
if src not in self.sources:
raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
def _format_string(s):
if "{0}" in s... |
Subsets that make up each split of the dataset for the language pair.
def subsets(self):
"""Subsets that make up each split of the dataset for the language pair."""
source, target = self.builder_config.language_pair
filtered_subsets = {}
for split, ss_names in self._subsets.items():
filtered_subs... |
Returns the examples in the raw (text) form.
def _generate_examples(self, split_subsets, extraction_map):
"""Returns the examples in the raw (text) form."""
source, _ = self.builder_config.language_pair
def _get_local_paths(ds, extract_dirs):
rel_paths = ds.get_path(source)
if len(extract_dirs... |
Fetches a `tfds.core.DatasetBuilder` by string name.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `"dataset_name"` or
`"dataset_name/config_name"` for datasets with `BuilderConfig`s.
As a convenience, this string m... |
Loads the named dataset into a `tf.data.Dataset`.
If `split=None` (the default), returns all splits for the dataset. Otherwise,
returns the specified split.
`load` is a convenience method that fetches the `tfds.core.DatasetBuilder` by
string name, optionally calls `DatasetBuilder.download_and_prepare`
(if `... |
Extract kwargs from name str.
def _dataset_name_and_kwargs_from_name_str(name_str):
"""Extract kwargs from name str."""
res = _NAME_REG.match(name_str)
if not res:
raise ValueError(_NAME_STR_ERR.format(name_str))
name = res.group("dataset_name")
kwargs = _kwargs_str_to_kwargs(res.group("kwargs"))
try:
... |
Try cast to int, float, bool, str, in that order.
def _cast_to_pod(val):
"""Try cast to int, float, bool, str, in that order."""
bools = {"True": True, "False": False}
if val in bools:
return bools[val]
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
... |
Try importing a module, with an informative error message on failure.
def _try_import(module_name):
"""Try importing a module, with an informative error message on failure."""
try:
mod = importlib.import_module(module_name)
return mod
except ImportError:
err_msg = ("Tried importing %s but failed. See... |
Returns list from list, tuple or ndarray.
def np_to_list(elem):
"""Returns list from list, tuple or ndarray."""
if isinstance(elem, list):
return elem
elif isinstance(elem, tuple):
return list(elem)
elif isinstance(elem, np.ndarray):
return list(elem)
else:
raise ValueError(
'Input el... |
Transpose a nested dict[list] into a list[nested dict].
def _transpose_dict_list(dict_list):
"""Transpose a nested dict[list] into a list[nested dict]."""
# 1. Unstack numpy arrays into list
dict_list = utils.map_nested(np_to_list, dict_list, dict_only=True)
# 2. Extract the sequence length (and ensure the le... |
See base class for details.
def get_tensor_info(self):
"""See base class for details."""
# Add the additional length dimension to every shape
def add_length_dim(tensor_info):
return feature_lib.TensorInfo(
shape=(self._length,) + tensor_info.shape,
dtype=tensor_info.dtype,
... |
See base class for details.
def get_serialized_info(self):
"""See base class for details."""
# Add the additional length dimension to every serialized features
def add_length_dim(serialized_info):
"""Add the length dimension to the serialized_info.
Args:
serialized_info: One of tf.io.... |
Returns SplitGenerators.
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Download the full MNIST Database
filenames = {
"train_data": _MNIST_TRAIN_DATA_FILENAME,
"train_labels": _MNIST_TRAIN_LABELS_FILENAME,
"test_data": _MNIST_TEST_DATA_FILENAME,
"... |
Generate MNIST examples as dicts.
Args:
num_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Yields:
Generator yielding the next examples
def _generate_examples(self, num_examples, data_path, label_path):
"""Genera... |
Returns SplitGenerators.
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Download images and annotations that come in separate archives.
# Note, that the extension of archives is .tar.gz even though the actual
# archives format is uncompressed tar.
dl_paths = dl_manager.do... |
Yields examples.
def _generate_examples(self, images_dir_path, labels_path, setid_path,
split_name):
"""Yields examples."""
with tf.io.gfile.GFile(labels_path, "rb") as f:
labels = tfds.core.lazy_imports.scipy.io.loadmat(f)["labels"][0]
with tf.io.gfile.GFile(setid_path, "rb"... |
Calculate statistics for the specified split.
def get_dataset_feature_statistics(builder, split):
"""Calculate statistics for the specified split."""
statistics = statistics_pb2.DatasetFeatureStatistics()
# Make this to the best of our abilities.
schema = schema_pb2.Schema()
dataset = builder.as_dataset(sp... |
Read JSON-formatted proto into DatasetInfo proto.
def read_from_json(json_filename):
"""Read JSON-formatted proto into DatasetInfo proto."""
with tf.io.gfile.GFile(json_filename) as f:
dataset_info_json_str = f.read()
# Parse it back into a proto.
parsed_proto = json_format.Parse(dataset_info_json_str,
... |
Full canonical name: (<dataset_name>/<config_name>/<version>).
def full_name(self):
"""Full canonical name: (<dataset_name>/<config_name>/<version>)."""
names = [self._builder.name]
if self._builder.builder_config:
names.append(self._builder.builder_config.name)
names.append(str(self.version))
... |
Overwrite the splits if they are different from the current ones.
* If splits aren't already defined or different (ex: different number of
shards), then the new split dict is used. This will trigger stats
computation during download_and_prepare.
* If splits are already defined in DatasetInfo and si... |
Split setter (private method).
def _set_splits(self, split_dict):
"""Split setter (private method)."""
# Update the dictionary representation.
# Use from/to proto for a clean copy
self._splits = split_dict.copy()
# Update the proto
del self.as_proto.splits[:] # Clear previous
for split_in... |
Update from the DatasetBuilder.
def _compute_dynamic_properties(self, builder):
"""Update from the DatasetBuilder."""
# Fill other things by going over the dataset.
splits = self.splits
for split_info in utils.tqdm(
splits.values(), desc="Computing statistics...", unit=" split"):
try:
... |
Write `DatasetInfo` as JSON to `dataset_info_dir`.
def write_to_directory(self, dataset_info_dir):
"""Write `DatasetInfo` as JSON to `dataset_info_dir`."""
# Save the metadata from the features (vocabulary, labels,...)
if self.features:
self.features.save_metadata(dataset_info_dir)
if self.redis... |
Update DatasetInfo from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the DatasetInfo.
This will overwrite all previous metadata.
Args:
dataset_info_dir: `str` The directory containing the metadata f... |
Initialize DatasetInfo from GCS bucket info files.
def initialize_from_bucket(self):
"""Initialize DatasetInfo from GCS bucket info files."""
# In order to support Colab, we use the HTTP GCS API to access the metadata
# files. They are copied locally and then loaded.
tmp_dir = tempfile.mkdtemp("tfds")
... |
Returns SplitGenerators.
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
url = _DL_URLS[self.builder_config.name]
data_dirs = dl_manager.download_and_extract(url)
path_to_dataset = os.path.join(data_dirs, tf.io.gfile.listdir(data_dirs)[0])
train_a_path = os.path.join(path_... |
Map the function into each element and resolve the promise.
def _map_promise(map_fn, all_inputs):
"""Map the function into each element and resolve the promise."""
all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function
res = utils.map_nested(_wait_on_promise, all_promises)
return res |
Store dled file to definitive place, write INFO file, return path.
def _handle_download_result(self, resource, tmp_dir_path, sha256, dl_size):
"""Store dled file to definitive place, write INFO file, return path."""
fnames = tf.io.gfile.listdir(tmp_dir_path)
if len(fnames) > 1:
raise AssertionError('... |
Download resource, returns Promise->path to downloaded file.
def _download(self, resource):
"""Download resource, returns Promise->path to downloaded file."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
url = resource.url
if url in self._sizes_checksu... |
Extract a single archive, returns Promise->path to extraction result.
def _extract(self, resource):
"""Extract a single archive, returns Promise->path to extraction result."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(path=resource)
path = resource.path
extract_m... |
Download-extract `Resource` or url, returns Promise->path.
def _download_extract(self, resource):
"""Download-extract `Resource` or url, returns Promise->path."""
if isinstance(resource, six.string_types):
resource = resource_lib.Resource(url=resource)
def callback(path):
resource.path = path
... |
Download data for a given Kaggle competition.
def download_kaggle_data(self, competition_name):
"""Download data for a given Kaggle competition."""
with self._downloader.tqdm():
kaggle_downloader = self._downloader.kaggle_downloader(competition_name)
urls = kaggle_downloader.competition_urls
... |
Download given url(s).
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
Returns:
downloaded_path(s): `str`, The downloaded paths matching the given input
url_or_urls.
def download(self, url_or_urls):
... |
Returns iterator over files within archive.
**Important Note**: caller should read files as they are yielded.
Reading out of order is slow.
Args:
resource: path to archive or `tfds.download.Resource`.
Returns:
Generator yielding tuple (path_within_archive, file_obj).
def iter_archive(sel... |
Extract given path(s).
Args:
path_or_paths: path or `list`/`dict` of path of file to extract. Each
path can be a `str` or `tfds.download.Resource`.
If not explicitly specified in `Resource`, the extraction method is deduced
from downloaded file name.
Returns:
extracted_path(s): `s... |
Download and extract given url_or_urls.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls: url or `list`/`dict` of urls to download and extract. Each
url can be a `str` or `tfds.download.Resource`.
If not ... |
Returns the directory containing the manually extracted data.
def manual_dir(self):
"""Returns the directory containing the manually extracted data."""
if not tf.io.gfile.exists(self._manual_dir):
raise AssertionError(
'Manual directory {} does not exist. Create it and download/extract '
... |
Construct a list of BuilderConfigs.
Construct a list of 75 Cifar10CorruptedConfig objects, corresponding to
the 15 corruption types and 5 severities.
Returns:
A list of 75 Cifar10CorruptedConfig objects.
def _make_builder_configs():
"""Construct a list of BuilderConfigs.
Construct a list of 75 Cifar10... |
Return the test split of Cifar10.
Args:
dl_manager: download manager object.
Returns:
test split.
def _split_generators(self, dl_manager):
"""Return the test split of Cifar10.
Args:
dl_manager: download manager object.
Returns:
test split.
"""
path = dl_manager.d... |
Generate corrupted Cifar10 test data.
Apply corruptions to the raw images according to self.corruption_type.
Args:
data_dir: root directory of downloaded dataset
Yields:
dictionary with image file and label.
def _generate_examples(self, data_dir):
"""Generate corrupted Cifar10 test data.... |
Doc string for a single builder, with or without configs.
def document_single_builder(builder):
"""Doc string for a single builder, with or without configs."""
mod_name = builder.__class__.__module__
cls_name = builder.__class__.__name__
mod_file = sys.modules[mod_name].__file__
if mod_file.endswith("pyc"):
... |
Get all builders organized by module in nested dicts.
def make_module_to_builder_dict(datasets=None):
"""Get all builders organized by module in nested dicts."""
# pylint: disable=g-long-lambda
# dict to hold tfds->image->mnist->[builders]
module_to_builder = collections.defaultdict(
lambda: collections.... |
Pretty-print tfds.features.FeaturesDict.
def _pprint_features_dict(features_dict, indent=0, add_prefix=True):
"""Pretty-print tfds.features.FeaturesDict."""
first_last_indent_str = " " * indent
indent_str = " " * (indent + 4)
first_line = "%s%s({" % (
first_last_indent_str if add_prefix else "",
ty... |
Make statistics information table.
def make_statistics_information(info):
"""Make statistics information table."""
if not info.splits.total_num_examples:
# That means that we have yet to calculate the statistics for this.
return "None computed"
stats = [(info.splits.total_num_examples, "ALL")]
for spl... |
Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
string describing the datasets (in the MarkDown format).
def dataset_docs_str(datasets=None):
"""Create d... |
Builds schema.org microdata for DatasetSearch from DatasetBuilder.
Markup spec: https://developers.google.com/search/docs/data-types/dataset#dataset
Testing tool: https://search.google.com/structured-data/testing-tool
For Google Dataset Search: https://toolbox.google.com/datasetsearch
Microdata format was cho... |
Generating a Gaussian blurring kernel with disk shape.
Generating a Gaussian blurring kernel with disk shape using cv2 API.
Args:
radius: integer, radius of blurring kernel.
alias_blur: float, standard deviation of Gaussian blurring.
dtype: data type of kernel
Returns:
cv2 object of the Gaussia... |
Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping.
def clipped_zoom(img, zoom_factor):
"""Zoom ima... |
Generate a heightmap using diamond-square algorithm.
Modification of the algorithm in
https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
Args:
mapsize: side length of the heightmap, must be a power of two.
wibbledecay: integer, decay factor.
Returns:
numpy 2d array, side length 'mapsi... |
Gaussian noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added Gaussian noise.
def gaussian_noise(x, severity=1):
"""Gaussian noise corru... |
Shot noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added shot noise.
def shot_noise(x, severity=1):
"""Shot noise corruption to images.... |
Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
def impulse_noise(x, severity=1):
"""Impulse noise corruptio... |
Defocus blurring to images.
Apply defocus blurring to images using Gaussian kernel.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied defocus blur.
def defocu... |
Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted... |
Zoom blurring to images.
Applying zoom blurring to images by zooming the central part of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied zoom blu... |
Fog corruption to images.
Adding fog to images. Fog is generated by diamond-square algorithm.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added fog.
def fog(x, ... |
Change brightness of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed brightness.
def brightness(x, severity=1):
"""Change brightness of images.
Ar... |
Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast.
def contrast(x, severity=1):
"""Change contrast of images.
Args:
... |
Conduct elastic transform to images.
Elastic transform is performed on small patches of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied elastic t... |
Pixelate images.
Conduct pixelating corruptions to images by first shrinking the images and
then resizing to original size.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in ... |
Conduct jpeg compression to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied jpeg compression.
def jpeg_compression(x, severity=1):
"""Conduct jpeg co... |
Temporarily assign obj.attr to value.
def temporary_assignment(obj, attr, value):
"""Temporarily assign obj.attr to value."""
original = getattr(obj, attr, None)
setattr(obj, attr, value)
yield
setattr(obj, attr, original) |
Iterate over items of dictionaries grouped by their keys.
def zip_dict(*dicts):
"""Iterate over items of dictionaries grouped by their keys."""
for key in set(itertools.chain(*dicts)): # set merge all keys
# Will raise KeyError if the dict don't have the same keys
yield key, tuple(d[key] for d in dicts) |
Apply a function recursively to each element of a nested data struct.
def map_nested(function, data_struct, dict_only=False, map_tuple=False):
"""Apply a function recursively to each element of a nested data struct."""
# Could add support for more exotic data_struct, like OrderedDict
if isinstance(data_struct, ... |
Zip data struct together and return a data struct with the same shape.
def zip_nested(arg0, *args, **kwargs):
"""Zip data struct together and return a data struct with the same shape."""
# Python 2 do not support kwargs only arguments
dict_only = kwargs.pop("dict_only", False)
assert not kwargs
# Could add ... |
Simulate proto inheritance.
By default, protobuf do not support direct inheritance, so this decorator
simulates inheritance to the class to which it is applied.
Example:
```
@as_proto_class(proto.MyProto)
class A(object):
def custom_method(self):
return self.proto_field * 10
p = proto.MyProt... |
Path to tensorflow_datasets directory.
def tfds_dir():
"""Path to tensorflow_datasets directory."""
return os.path.dirname(os.path.dirname(os.path.dirname(__file__))) |
Writes to path atomically, by writing to temp file and renaming it.
def atomic_write(path, mode):
"""Writes to path atomically, by writing to temp file and renaming it."""
tmp_path = "%s%s_%s" % (path, constants.INCOMPLETE_SUFFIX, uuid.uuid4().hex)
with tf.io.gfile.GFile(tmp_path, mode) as file_:
yield file_... |
Given a hash constructor, returns checksum digest and size of file.
def read_checksum_digest(path, checksum_cls=hashlib.sha256):
"""Given a hash constructor, returns checksum digest and size of file."""
checksum = checksum_cls()
size = 0
with tf.io.gfile.GFile(path, "rb") as f:
while True:
block = f.... |
Reraise an exception with an additional message.
def reraise(additional_msg):
"""Reraise an exception with an additional message."""
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = str(exc_value) + "\n" + additional_msg
six.reraise(exc_type, exc_type(msg), exc_traceback) |
Get attr that handles dots in attr name.
def rgetattr(obj, attr, *args):
"""Get attr that handles dots in attr name."""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split(".")) |
Returns SplitGenerators.
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
image_tar_file = os.path.join(dl_manager.manual_dir,
self.builder_config.file_name)
if not tf.io.gfile.exists(image_tar_file):
# The current celebahq generation code depe... |
This function returns the examples in the raw (text) form.
def _generate_examples(self, source_file, target_file):
"""This function returns the examples in the raw (text) form."""
with tf.io.gfile.GFile(source_file) as f:
source_sentences = f.read().split("\n")
with tf.io.gfile.GFile(target_file) as ... |
This function returns the examples in the raw (text) form.
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
rows_per_pair_id = collections.defaultdict(list)
with tf.io.gfile.GFile(filepath) as f:
reader = csv.DictReader(f, delimiter='\t', quoti... |
Yields examples.
def _generate_example(self, data_path, image_id):
"""Yields examples."""
image_filepath = os.path.join(
data_path, "VOCdevkit/VOC2007/JPEGImages", "{}.jpg".format(image_id))
annon_filepath = os.path.join(
data_path, "VOCdevkit/VOC2007/Annotations", "{}.xml".format(image_id)... |
Update the encoding format.
def set_encoding_format(self, encoding_format):
"""Update the encoding format."""
supported = ENCODE_FN.keys()
if encoding_format not in supported:
raise ValueError('`encoding_format` must be one of %s.' % supported)
self._encoding_format = encoding_format |
Update the shape.
def set_shape(self, shape):
"""Update the shape."""
channels = shape[-1]
acceptable_channels = ACCEPTABLE_CHANNELS[self._encoding_format]
if channels not in acceptable_channels:
raise ValueError('Acceptable `channels` for %s: %s (was %s)' % (
self._encoding_format, acc... |
Returns np_image encoded as jpeg or png.
def _encode_image(self, np_image):
"""Returns np_image encoded as jpeg or png."""
if np_image.dtype != np.uint8:
raise ValueError('Image should be uint8. Detected: %s.' % np_image.dtype)
utils.assert_shape_match(np_image.shape, self._shape)
return self._ru... |
Convert the given image into a dict convertible to tf example.
def encode_example(self, image_or_path_or_fobj):
"""Convert the given image into a dict convertible to tf example."""
if isinstance(image_or_path_or_fobj, np.ndarray):
encoded_image = self._encode_image(image_or_path_or_fobj)
elif isinsta... |
Reconstruct the image from the tf example.
def decode_example(self, example):
"""Reconstruct the image from the tf example."""
img = tf.image.decode_image(
example, channels=self._shape[-1], dtype=tf.uint8)
img.set_shape(self._shape)
return img |
See base class for details.
def save_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
filepath = _get_metadata_filepath(data_dir, feature_name)
with tf.io.gfile.GFile(filepath, 'w') as f:
json.dump({
'shape': [-1 if d is None else d for d in self._shape],
... |
See base class for details.
def load_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Restore names if defined
filepath = _get_metadata_filepath(data_dir, feature_name)
if tf.io.gfile.exists(filepath):
with tf.io.gfile.GFile(filepath, 'r') as f:
info_data =... |
Create a moving image sequence from the given image a left padding values.
Args:
image: [in_h, in_w, n_channels] uint8 array
pad_lefts: [sequence_length, 2] int32 array of left padding values
total_padding: tensor of padding values, (pad_h, pad_w)
Returns:
[sequence_length, out_h, out_w, n_channel... |
Construct a linear trajectory from x0.
Args:
x0: N-D float tensor.
velocity: N-D float tensor
t: [sequence_length]-length float tensor
Returns:
x: [sequence_length, ndims] float tensor.
def _get_linear_trajectory(x0, velocity, t):
"""Construct a linear trajectory from x0.
Args:
x0: N-D f... |
Turn simple static images into sequences of the originals bouncing around.
Adapted from Srivastava et al.
http://www.cs.toronto.edu/~nitish/unsupervised_video/
Example usage:
```python
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_datasets.video import moving_sequence
tf.c... |
Returns splits.
def _split_generators(self, dl_manager):
"""Returns splits."""
dl_urls = {
split: _BASE_DOWNLOAD_PATH + "%s.tfrecord" % split for split in _SPLITS
}
dl_urls["instrument_labels"] = (_BASE_DOWNLOAD_PATH +
"instrument_labels.txt")
dl_paths = ... |
Return the tuple (major, minor, patch) version extracted from the str.
def _str_to_version(version_str, allow_wildcard=False):
"""Return the tuple (major, minor, patch) version extracted from the str."""
reg = _VERSION_WILDCARD_REG if allow_wildcard else _VERSION_RESOLVED_REG
res = reg.match(version_str)
if no... |
Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
def match(self, other_version):
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be ... |
Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
def _get_validation_labels(val_path):
"""Returns labels fo... |
Yields examples.
def _generate_examples(self, archive, validation_labels=None):
"""Yields examples."""
if validation_labels: # Validation split
for example in self._generate_examples_validation(archive,
validation_labels):
yield example
... |
Whether any of the filenames exist.
def do_files_exist(filenames):
"""Whether any of the filenames exist."""
preexisting = [tf.io.gfile.exists(f) for f in filenames]
return any(preexisting) |
Returns a temporary filename based on filename.
def get_incomplete_path(filename):
"""Returns a temporary filename based on filename."""
random_suffix = "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
return filename + ".incomplete" + random_suffix |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.