text
stringlengths
1
1.02k
class_index
int64
0
271
source
stringclasses
76 values
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): default_dtype = {"dtype": torch.int64} # Convert dtype to np.int64 if it's either np.uint16 or np.uint32 to ensure compatibility. # np.uint64 is excluded from this conversion as there is no com...
256
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/torch_formatter.py
value = value.transpose((2, 0, 1)) if config.DECORD_AVAILABLE and "decord" in sys.modules: from decord import VideoReader from decord.bridge import to_torch if isinstance(value, VideoReader): value._hf_bridge_out = to_torch return value ...
256
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/torch_formatter.py
# support for torch, tf, jax etc. if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor): data_struct = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(data_struct, np.ndarray): if data_struct.dtyp...
256
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/torch_formatter.py
def format_row(self, pa_table: pa.Table) -> Mapping: row = self.numpy_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return self.recursive_tensorize(row) def format_column(self, pa_table: pa.Table) -> "torch.Tensor": column = self.numpy_ar...
256
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/torch_formatter.py
class JaxFormatter(TensorFormatter[Mapping, "jax.Array", Mapping]): def __init__(self, features=None, device=None, token_per_repo_id=None, **jnp_array_kwargs): super().__init__(features=features, token_per_repo_id=token_per_repo_id) import jax from jaxlib.xla_client import Device
257
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/jax_formatter.py
if isinstance(device, Device): raise ValueError( f"Expected {device} to be a `str` not {type(device)}, as `jaxlib.xla_extension.Device` " "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its...
257
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/jax_formatter.py
f"Device with string identifier {self.device} not listed among the available " f"devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default " f"device: {str(jax.devices()[0])}." ) self.device = str(jax.devices()[0]) self.jnp_array_kwargs = jnp_...
257
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/jax_formatter.py
@staticmethod def _map_devices_to_str() -> Dict[str, "jaxlib.xla_extension.Device"]: import jax return {str(device): device for device in jax.devices()} def _consolidate(self, column): import jax import jax.numpy as jnp if isinstance(column, list) and column: ...
257
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/jax_formatter.py
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_x64: ...
257
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/jax_formatter.py
if isinstance(value, PIL.Image.Image): value = np.asarray(value) if config.DECORD_AVAILABLE and "decord" in sys.modules: # We need to import torch first, otherwise later it can cause issues # e.g. "RuntimeError: random_device could not be read" # when running ...
257
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/jax_formatter.py
with jax.default_device(DEVICE_MAPPING[self.device]): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs}) def _recursive_tensorize(self, data_struct): ...
257
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/jax_formatter.py
if isinstance(data_struct, torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(data_struct, "__array__") and not isinstance(data_struct, jax.Array): data_struct = data_struct.__array__() # support for nested types like struct of list of st...
257
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/jax_formatter.py
def format_row(self, pa_table: pa.Table) -> Mapping: row = self.numpy_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return self.recursive_tensorize(row) def format_column(self, pa_table: pa.Table) -> "jax.Array": column = self.numpy_arrow...
257
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/jax_formatter.py
class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]): def __init__(self, features=None, token_per_repo_id=None, **tf_tensor_kwargs): super().__init__(features=features, token_per_repo_id=token_per_repo_id) self.tf_tensor_kwargs = tf_tensor_kwargs import tensorflow as tf # noqa:...
258
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/tf_formatter.py
return column def _tensorize(self, value): import tensorflow as tf if value is None: return value default_dtype = {} if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): default_dtype = {"dtype": tf.int64} elif ...
258
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/tf_formatter.py
if isinstance(value, PIL.Image.Image): value = np.asarray(value) if config.DECORD_AVAILABLE and "decord" in sys.modules: # We need to import torch first, otherwise later it can cause issues # e.g. "RuntimeError: random_device could not be read" # when running ...
258
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/tf_formatter.py
if isinstance(data_struct, torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor): data_struct = data_struct.__array__() # support for nested types like struct of list of st...
258
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/tf_formatter.py
def format_row(self, pa_table: pa.Table) -> Mapping: row = self.numpy_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return self.recursive_tensorize(row) def format_column(self, pa_table: pa.Table) -> "tf.Tensor": column = self.numpy_arrow...
258
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/tf_formatter.py
class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]): def __init__(self, features=None, token_per_repo_id=None, **np_array_kwargs): super().__init__(features=features, token_per_repo_id=token_per_repo_id) self.np_array_kwargs = np_array_kwargs def _consolidate(self, column): ...
259
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/np_formatter.py
def _tensorize(self, value): if isinstance(value, (str, bytes, type(None))): return value elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): return value elif isinstance(value, np.number): return value def...
259
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/np_formatter.py
if isinstance(value, PIL.Image.Image): return np.asarray(value, **self.np_array_kwargs) if config.DECORD_AVAILABLE and "decord" in sys.modules: # We need to import torch first, otherwise later it can cause issues # e.g. "RuntimeError: random_device could not be read" ...
259
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/np_formatter.py
if isinstance(data_struct, torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)): data_struct = data_struct.__array__() # support for nested typ...
259
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/np_formatter.py
def format_row(self, pa_table: pa.Table) -> Mapping: row = self.numpy_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return self.recursive_tensorize(row) def format_column(self, pa_table: pa.Table) -> np.ndarray: column = self.numpy_arrow_...
259
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/np_formatter.py
class DeleteFromHubCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser): parser: ArgumentParser = parser.add_parser("delete_from_hub", help="Delete dataset config from the Hub") parser.add_argument( "dataset_id", help="source dataset ID, e.g. USERNAME/DATASE...
260
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/delete_from_hub.py
def run(self) -> None: _ = delete_from_hub(self._dataset_id, self._config_name, revision=self._revision, token=self._token)
260
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/delete_from_hub.py
class EnvironmentCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): download_parser = parser.add_parser("env", help="Print relevant system environment info.") download_parser.set_defaults(func=info_command_factory) def run(self): info = {...
261
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/env.py
class ConvertToParquetCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser): parser: ArgumentParser = parser.add_parser("convert_to_parquet", help="Convert dataset to Parquet") parser.add_argument( "dataset_id", help="source dataset ID, e.g. USERNAME/DATASET_...
262
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/convert_to_parquet.py
def __init__( self, dataset_id: str, token: Optional[str], revision: Optional[str], trust_remote_code: bool, ): self._dataset_id = dataset_id self._token = token self._revision = revision self._trust_remote_code = trust_remote_code def run...
262
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/convert_to_parquet.py
class ConvertCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): """ Register this command to argparse so it's available for the datasets-cli Args: parser: Root parser to register command-specific arguments """ trai...
263
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/convert.py
def __init__(self, tfds_path: str, datasets_directory: str, *args): self._logger = get_logger("datasets-cli/converting") self._tfds_path = tfds_path self._datasets_directory = datasets_directory def run(self): if os.path.isdir(self._tfds_path): abs_tfds_path = os.path.a...
263
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/convert.py
for f_name in file_names: self._logger.info(f"Looking at file {f_name}") input_file = os.path.join(abs_tfds_path, f_name) output_file = os.path.join(abs_datasets_path, f_name) if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in ...
263
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/convert.py
# Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.pu...
263
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/convert.py
to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT)) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n") out_lines.append(out_line) out_lines.append(HIGHLIGHT_MESSAGE_POST) continue else: ...
263
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/convert.py
# Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line) tfds_imports.extend(imp.strip() for imp in match.group(1).split(",")...
263
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/convert.py
if is_builder or "wmt" in f_name: # We create a new directory for each dataset dir_name = f_name.replace(".py", "") output_dir = os.path.join(abs_datasets_path, dir_name) output_file = os.path.join(output_dir, f_name) os.makedirs(output_dir...
263
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/convert.py
for utils_file in utils_files: try: f_name = os.path.basename(utils_file) dest_folder = imports_to_builder_map[f_name.replace(".py", "")] self._logger.info(f"Moving {dest_folder} to {utils_file}") shutil.copy(utils_file, dest_folder) ...
263
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/convert.py
class BaseDatasetsCLICommand(ABC): @staticmethod @abstractmethod def register_subcommand(parser: ArgumentParser): raise NotImplementedError() @abstractmethod def run(self): raise NotImplementedError()
264
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/__init__.py
class TestCommand(BaseDatasetsCLICommand): __test__ = False # to tell pytest it's not a test class
265
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/test.py
@staticmethod def register_subcommand(parser: ArgumentParser): test_parser = parser.add_parser("test", help="Test dataset implementation.") test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name") test_parser.add_argument( "--cache_dir", ...
265
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/test.py
help="Run the test without checksums and splits checks.", ) test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload") test_parser.add_argument( "--clear_cache", action="store_true", help="Remove downloaded files and c...
265
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/test.py
def __init__( self, dataset: str, name: str, cache_dir: str, data_dir: str, all_configs: bool, save_infos: bool, ignore_verifications: bool, force_redownload: bool, clear_cache: bool, num_proc: int, trust_remote_code: Option...
265
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/test.py
"The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n" "Please provide a --cache_dir that will be used to test the dataset script." ) exit(1) if save_infos: self._ignore_verifications = Tru...
265
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/test.py
def run(self): logging.getLogger("filelock").setLevel(ERROR) if self._name is not None and self._all_configs: print("Both parameters `config` and `all_configs` can't be used at once.") exit(1) path, config_name = self._dataset, self._name module = dataset_module_f...
265
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/test.py
def get_builders() -> Generator[DatasetBuilder, None, None]: if self._all_configs and builder_cls.BUILDER_CONFIGS: for i, config in enumerate(builder_cls.BUILDER_CONFIGS): if "config_name" in module.builder_kwargs: yield builder_cls( ...
265
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/test.py
else: yield builder_cls( config_name=config_name, cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs, )
265
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/test.py
for j, builder in enumerate(get_builders()): print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})") builder._record_infos = os.path.exists( os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME) ) # record checks...
265
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/test.py
# If save_infos=True, the dataset card (README.md) is created next to the loaded module file. # The dataset_infos are saved in the YAML part of the README.md
265
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/test.py
# Let's move it to the original directory of the dataset script, to allow the user to # upload them on S3 at the same time afterwards. if self._save_infos: dataset_readme_path = os.path.join( builder_cls.get_imported_module_dir(), datasets.config.REPOCARD_FILE...
265
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/test.py
# Move dataset_info back to the user if dataset_dir is not None: user_dataset_readme_path = os.path.join(dataset_dir, datasets.config.REPOCARD_FILENAME) copyfile(dataset_readme_path, user_dataset_readme_path) print(f"Dataset card saved at {user...
265
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/test.py
class BaseCompressedFileFileSystem(AbstractArchiveFileSystem): """Read contents of compressed file as a filesystem with one file inside.""" root_marker = "" protocol: str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) compress...
266
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py
Args: fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()`` mode (:obj:``str``): Currently, only 'rb' accepted target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL. target_options (:obj:``dict``, optional):...
266
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py
**(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed. }, **(target_options or {}), ) self.compressed_name = os.path.basename(self.fo.split("::")[0]) self.uncompressed_name = ( self.compressed_name[: self.compressed_nam...
266
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py
@classmethod def _strip_protocol(cls, path): # compressed file paths are always relative to the archive root return super()._strip_protocol(path).lstrip("/") def _get_dirs(self): if self.dir_cache is None: f = {**self._open_with_fsspec().fs.info(self.fo), "name": self.uncomp...
266
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py
class Bz2FileSystem(BaseCompressedFileFileSystem): """Read contents of BZ2 file as a filesystem with one file inside.""" protocol = "bz2" compression = "bz2" extension = ".bz2"
267
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py
class GzipFileSystem(BaseCompressedFileFileSystem): """Read contents of GZIP file as a filesystem with one file inside.""" protocol = "gzip" compression = "gzip" extension = ".gz"
268
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py
class Lz4FileSystem(BaseCompressedFileFileSystem): """Read contents of LZ4 file as a filesystem with one file inside.""" protocol = "lz4" compression = "lz4" extension = ".lz4"
269
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py
class XzFileSystem(BaseCompressedFileFileSystem): """Read contents of .xz (LZMA) file as a filesystem with one file inside.""" protocol = "xz" compression = "xz" extension = ".xz"
270
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py
class ZstdFileSystem(BaseCompressedFileFileSystem): """ Read contents of .zstd file as a filesystem with one file inside. """ protocol = "zstd" compression = "zstd" extension = ".zst"
271
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py