diff --git a/testbed/graphql-python__graphene/setup.py b/testbed/graphql-python__graphene/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..6c1f29c952d460f8f9bc765f7393a9992a3589d6 --- /dev/null +++ b/testbed/graphql-python__graphene/setup.py @@ -0,0 +1,93 @@ +import ast +import codecs +import re +import sys + +from setuptools import find_packages, setup +from setuptools.command.test import test as TestCommand + +_version_re = re.compile(r"VERSION\s+=\s+(.*)") + +with open("graphene/__init__.py", "rb") as f: + version = ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1)) + +path_copy = sys.path[:] + +sys.path.append("graphene") +try: + from pyutils.version import get_version + + version = get_version(version) +except Exception: + version = ".".join([str(v) for v in version]) + +sys.path[:] = path_copy + + +class PyTest(TestCommand): + user_options = [("pytest-args=", "a", "Arguments to pass to py.test")] + + def initialize_options(self): + TestCommand.initialize_options(self) + self.pytest_args = [] + + def finalize_options(self): + TestCommand.finalize_options(self) + self.test_args = [] + self.test_suite = True + + def run_tests(self): + # import here, cause outside the eggs aren't loaded + import pytest + + errno = pytest.main(self.pytest_args) + sys.exit(errno) + + +tests_require = [ + "pytest>=6,<7", + "pytest-benchmark>=3.4,<4", + "pytest-cov>=3,<4", + "pytest-mock>=3,<4", + "pytest-asyncio>=0.16,<2", + "snapshottest>=0.6,<1", + "coveralls>=3.3,<4", + "mock>=4,<5", + "pytz==2022.1", + "iso8601>=1,<2", +] + +dev_requires = ["black==22.3.0", "flake8>=4,<5"] + tests_require + +setup( + name="graphene", + version=version, + description="GraphQL Framework for Python", + long_description=codecs.open( + "README.rst", "r", encoding="ascii", errors="replace" + ).read(), + url="https://github.com/graphql-python/graphene", + author="Syrus Akbary", + author_email="me@syrusakbary.com", + license="MIT", + classifiers=[ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Topic :: Software Development :: Libraries", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + ], + keywords="api graphql protocol rest relay graphene", + packages=find_packages(exclude=["examples*"]), + install_requires=[ + "graphql-core>=3.1,<3.3", + "graphql-relay>=3.1,<3.3", + "aniso8601>=8,<10", + ], + tests_require=tests_require, + extras_require={"test": tests_require, "dev": dev_requires}, + cmdclass={"test": PyTest}, +) diff --git a/testbed/huggingface__accelerate/.devcontainer/devcontainer.json b/testbed/huggingface__accelerate/.devcontainer/devcontainer.json new file mode 100644 index 0000000000000000000000000000000000000000..9d44afde7995e5506d4f0052cb8d85a35eae5f1b --- /dev/null +++ b/testbed/huggingface__accelerate/.devcontainer/devcontainer.json @@ -0,0 +1,25 @@ +// File only needed for VSCode users to have proper Docker based interpreters +{ + "name": "accelerate_dev_environment", + "build": { + // ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment + "dockerfile": "../docker/accelerate-cpu/Dockerfile" +// "dockerfile": "../docker/accelerate-gpu/Dockerfile" + }, + "runArgs": [ + // ACTION NEEDED: uncomment the next line if your local machine has GPUs available +// "--gpus", "all", + // Enable the docker container to access system resources + "--ipc", "host" + ], + "remoteEnv": { + "PYTHONPATH": "${containerEnv:PATH}:${containerWorkspaceFolder}" + }, + "extensions": [ + // Ensure we have IntelliSense in VSCode when running inside container + "ms-python.python" + ], + "workspaceFolder": "/workspaces/accelerate", + // Need git for VSCode to color code modifications. Only runs when building environment. + "onCreateCommand": "apt-get update && apt-get install -y git && pip install -e '.[dev]'" +} \ No newline at end of file diff --git a/testbed/huggingface__accelerate/.github/ISSUE_TEMPLATE/bug-report.yml b/testbed/huggingface__accelerate/.github/ISSUE_TEMPLATE/bug-report.yml new file mode 100644 index 0000000000000000000000000000000000000000..3ed39ac4bb75eb9a6209513c9de318369fa7c2db --- /dev/null +++ b/testbed/huggingface__accelerate/.github/ISSUE_TEMPLATE/bug-report.yml @@ -0,0 +1,58 @@ +name: "\U0001F41B Bug Report" +description: Submit a bug report to help us improve Accelerate +body: + - type: textarea + id: system-info + attributes: + label: System Info + description: Please share your accelerate configuration with us. You can run the command `accelerate env` and copy-paste its outputs below + render: Shell + placeholder: accelerate version, OS, python version, numpy version, torch version, and accelerate's configuration + validations: + required: true + + - type: checkboxes + id: information-scripts-examples + attributes: + label: Information + description: 'The problem arises when using:' + options: + - label: "The official example scripts" + - label: "My own modified scripts" + + - type: checkboxes + id: information-tasks + attributes: + label: Tasks + description: "The tasks I am working on are:" + options: + - label: "One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)" + - label: "My own task or dataset (give details below)" + + - type: textarea + id: reproduction + validations: + required: true + attributes: + label: Reproduction + description: | + Please provide a code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet. + If you have code snippets, error messages, stack traces please provide them here as well. + Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting + Do not use screenshots, as they are hard to read and (more importantly) don't allow others to copy-and-paste your code. + + placeholder: | + Steps to reproduce the behavior: + + 1. + 2. + 3. + + - type: textarea + id: expected-behavior + validations: + required: true + attributes: + label: Expected behavior + description: "A clear and concise description of what you would expect to happen." + render: Shell diff --git a/testbed/huggingface__accelerate/.github/workflows/build-docker-images-release.yml b/testbed/huggingface__accelerate/.github/workflows/build-docker-images-release.yml new file mode 100644 index 0000000000000000000000000000000000000000..be0c76c48bab5d67b67d8b5aa93dafd2f5d322e5 --- /dev/null +++ b/testbed/huggingface__accelerate/.github/workflows/build-docker-images-release.yml @@ -0,0 +1,64 @@ +name: Build Docker images (releases) + +on: + workflow_dispatch: + release: + types: [published] + +concurrency: + group: docker-image-builds + cancel-in-progress: false + +jobs: + get-version: + runs-on: ubuntu-latest + outputs: + version: ${{ steps.step1.outputs.version }} + steps: + - uses: actions/checkout@v3 + - id: step1 + run: echo "version=$(python setup.py --version)" >> $GITHUB_OUTPUT + + version-cpu: + name: "Latest Accelerate CPU [version]" + runs-on: ubuntu-latest + needs: get-version + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Check out code + uses: actions/checkout@v2 + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Build and Push CPU + uses: docker/build-push-action@v2 + with: + context: ./docker/accelerate-cpu + push: true + tags: huggingface/accelerate-cpu:${{needs.get-version.outputs.version}} + + version-cuda: + name: "Latest Accelerate GPU [version]" + runs-on: ubuntu-latest + needs: get-version + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Check out code + uses: actions/checkout@v2 + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Build and Push GPU + uses: docker/build-push-action@v2 + with: + context: ./docker/accelerate-gpu + push: true + tags: huggingface/accelerate-gpu:${{needs.get-version.outputs.version}} \ No newline at end of file diff --git a/testbed/huggingface__accelerate/.github/workflows/build_and_run_tests.yml b/testbed/huggingface__accelerate/.github/workflows/build_and_run_tests.yml new file mode 100644 index 0000000000000000000000000000000000000000..68e11c44adb5d6ff6b4dcb536139e3b026ddf877 --- /dev/null +++ b/testbed/huggingface__accelerate/.github/workflows/build_and_run_tests.yml @@ -0,0 +1,45 @@ +name: Trigger docker images and run tests + +on: + push: + branches: + - main + workflow_dispatch: + +env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + +jobs: + check-for-source: + runs-on: ubuntu-latest + name: Check if setup was changed + outputs: + changed: ${{ steps.was_changed.outputs.changed }} + steps: + - uses: actions/checkout@v3.1.0 + with: + fetch-depth: "2" + + - name: Get changed files + id: changed-files + uses: tj-actions/changed-files@v22.2 + + - name: Was setup changed + id: was_changed + run: | + for file in ${{ steps.changed-files.outputs.all_changed_files }}; do + if [ `basename "${file}"` == "setup.py" ]; then + echo "changed=1" >> $GITHUB_OUTPUT + fi + done + + build-docker-containers: + needs: check-for-source + if: (github.event_name == 'push') && (needs.check-for-source.outputs.changed == '1') + uses: ./.github/workflows/build_docker_images.yml + secrets: inherit + + run-merge-tests: + needs: build-docker-containers + if: always() + uses: ./.github/workflows/run_merge_tests.yml \ No newline at end of file diff --git a/testbed/huggingface__accelerate/.github/workflows/build_documentation.yml b/testbed/huggingface__accelerate/.github/workflows/build_documentation.yml new file mode 100644 index 0000000000000000000000000000000000000000..082ece25ed7ce7d91b9982364b54c0e2da910e56 --- /dev/null +++ b/testbed/huggingface__accelerate/.github/workflows/build_documentation.yml @@ -0,0 +1,17 @@ +name: Build documentation + +on: + push: + branches: + - main + - doc-builder* + - v*-release + +jobs: + build: + uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main + with: + commit_sha: ${{ github.sha }} + package: accelerate + secrets: + token: ${{ secrets.HUGGINGFACE_PUSH }} diff --git a/testbed/huggingface__accelerate/.github/workflows/build_pr_documentation.yml b/testbed/huggingface__accelerate/.github/workflows/build_pr_documentation.yml new file mode 100644 index 0000000000000000000000000000000000000000..dc56751c6ab1dda67e8feca76c71187f92cfa2db --- /dev/null +++ b/testbed/huggingface__accelerate/.github/workflows/build_pr_documentation.yml @@ -0,0 +1,16 @@ +name: Build PR Documentation + +on: + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + build: + uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main + with: + commit_sha: ${{ github.event.pull_request.head.sha }} + pr_number: ${{ github.event.number }} + package: accelerate diff --git a/testbed/huggingface__accelerate/.github/workflows/delete_doc_comment.yml b/testbed/huggingface__accelerate/.github/workflows/delete_doc_comment.yml new file mode 100644 index 0000000000000000000000000000000000000000..da61d21dff3236200ebe16ad1522d950028d6ea6 --- /dev/null +++ b/testbed/huggingface__accelerate/.github/workflows/delete_doc_comment.yml @@ -0,0 +1,13 @@ +name: Delete dev documentation + +on: + pull_request: + types: [ closed ] + + +jobs: + delete: + uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main + with: + pr_number: ${{ github.event.number }} + package: accelerate diff --git a/testbed/huggingface__accelerate/.github/workflows/nightly.yml b/testbed/huggingface__accelerate/.github/workflows/nightly.yml new file mode 100644 index 0000000000000000000000000000000000000000..277a81df9dc5bbe3fc74b2e6e1dabf0152d0b67f --- /dev/null +++ b/testbed/huggingface__accelerate/.github/workflows/nightly.yml @@ -0,0 +1,88 @@ +name: Self-hosted runner with slow tests (scheduled) + +on: + workflow_dispatch: + schedule: + - cron: "0 2 * * *" + +env: + RUN_SLOW: "yes" + IS_GITHUB_CI: "1" + +jobs: + run_all_tests_single_gpu: + runs-on: [self-hosted, docker-gpu, multi-gpu] + env: + CUDA_VISIBLE_DEVICES: "0" + container: + image: huggingface/accelerate-gpu:latest + options: --gpus all --shm-size "16gb" + defaults: + run: + working-directory: accelerate/ + shell: bash + steps: + - name: Update clone & pip install + run: | + source activate accelerate + git config --global --add safe.directory '*' + git fetch && git checkout ${{ github.sha }} + pip install -e . --no-deps + pip install pytest-reportlog + + - name: Run test on GPUs + run: | + source activate accelerate + make test + - name: Run examples on GPUs + run: | + source activate accelerate + pip uninstall comet_ml -y + make test_examples + + - name: Generate Report + if: always() + run: | + python utils/log_reports.py >> $GITHUB_STEP_SUMMARY + + run_all_tests_multi_gpu: + runs-on: [self-hosted, docker-gpu, multi-gpu] + env: + CUDA_VISIBLE_DEVICES: "0,1" + container: + image: huggingface/accelerate-gpu:latest + options: --gpus all --shm-size "16gb" + defaults: + run: + working-directory: accelerate/ + shell: bash + steps: + - name: Update clone + run: | + source activate accelerate + git config --global --add safe.directory '*' + git fetch && git checkout ${{ github.sha }} + pip install -e . --no-deps + pip install pytest-reportlog + + - name: Run core and big modeling tests on GPUs + run: | + source activate accelerate + make test_big_modeling + make test_core + + - name: Run Integration tests on GPUs + run: | + source activate accelerate + make test_integrations + + - name: Run examples on GPUs + run: | + source activate accelerate + pip uninstall comet_ml -y + make test_examples + + - name: Generate Report + if: always() + run: | + python utils/log_reports.py >> $GITHUB_STEP_SUMMARY \ No newline at end of file diff --git a/testbed/huggingface__accelerate/.github/workflows/quality.yml b/testbed/huggingface__accelerate/.github/workflows/quality.yml new file mode 100644 index 0000000000000000000000000000000000000000..5d4707b4e6651b19bb0892367f7a5d94740eca3a --- /dev/null +++ b/testbed/huggingface__accelerate/.github/workflows/quality.yml @@ -0,0 +1,17 @@ +name: Quality Check + +on: [pull_request] + +jobs: + quality: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.7 + uses: actions/setup-python@v3 + with: + python-version: 3.7 + - name: Install Python dependencies + run: pip install -e .[quality] + - name: Run Quality check + run: make quality \ No newline at end of file diff --git a/testbed/huggingface__accelerate/.github/workflows/run_merge_tests.yml b/testbed/huggingface__accelerate/.github/workflows/run_merge_tests.yml new file mode 100644 index 0000000000000000000000000000000000000000..fcbb62f53807c1dcdfcef9d1d60dca34bdcf5c7a --- /dev/null +++ b/testbed/huggingface__accelerate/.github/workflows/run_merge_tests.yml @@ -0,0 +1,89 @@ +name: Self-hosted runner tests (push to "main") + +on: + workflow_call: + workflow_dispatch: + +env: + TESTING_MOCKED_DATALOADERS: "1" + IS_GITHUB_CI: "1" + +jobs: + run_all_tests_single_gpu: + runs-on: [self-hosted, docker-gpu, multi-gpu] + env: + CUDA_VISIBLE_DEVICES: "0" + container: + image: huggingface/accelerate-gpu:latest + options: --gpus all --shm-size "16gb" + defaults: + run: + working-directory: accelerate/ + shell: bash + steps: + - name: Update clone & pip install + run: | + source activate accelerate + git config --global --add safe.directory '*' + git fetch && git checkout ${{ github.sha }} + pip install -e .[testing,test_trackers] + pip install pytest-reportlog + + - name: Run CLI tests + run: | + source activate accelerate + make test_cli + + - name: Run test on GPUs + run: | + source activate accelerate + make test + - name: Run examples on GPUs + run: | + source activate accelerate + pip uninstall comet_ml -y + make test_examples + + - name: Generate Report + if: always() + run: | + python utils/log_reports.py >> $GITHUB_STEP_SUMMARY + + run_all_tests_multi_gpu: + runs-on: [self-hosted, docker-gpu, multi-gpu] + container: + image: huggingface/accelerate-gpu:latest + options: --gpus all --shm-size "16gb" + defaults: + run: + working-directory: accelerate/ + shell: bash + steps: + - name: Update clone + run: | + source activate accelerate + git config --global --add safe.directory '*' + git fetch && git checkout ${{ github.sha }} + pip install -e .[testing,test_trackers] + pip install pytest-reportlog + + - name: Run CLI tests + run: | + source activate accelerate + make test_cli + + - name: Run test on GPUs + run: | + source activate accelerate + make test + + - name: Run examples on GPUs + run: | + source activate accelerate + pip uninstall comet_ml -y + make test_examples + + - name: Generate Report + if: always() + run: | + python utils/log_reports.py >> $GITHUB_STEP_SUMMARY \ No newline at end of file diff --git a/testbed/huggingface__accelerate/.github/workflows/stale.yml b/testbed/huggingface__accelerate/.github/workflows/stale.yml new file mode 100644 index 0000000000000000000000000000000000000000..c98ce0bb16fcfd8024862df839ad4e7750d91257 --- /dev/null +++ b/testbed/huggingface__accelerate/.github/workflows/stale.yml @@ -0,0 +1,28 @@ +name: Stale Bot + +on: + schedule: + - cron: "0 15 * * *" + workflow_dispatch: + +jobs: + close_stale_issues: + name: Close Stale Issues + if: github.repository == 'huggingface/accelerate' + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v2 + + - name: Setup Python + uses: actions/setup-python@v1 + with: + python-version: 3.7 + + - name: Install requirements + run: | + pip install PyGithub + - name: Close stale issues + run: | + python utils/stale.py \ No newline at end of file diff --git a/testbed/huggingface__accelerate/.github/workflows/test.yml b/testbed/huggingface__accelerate/.github/workflows/test.yml new file mode 100644 index 0000000000000000000000000000000000000000..691397b0d75543603699a7e015cf0277c904ea89 --- /dev/null +++ b/testbed/huggingface__accelerate/.github/workflows/test.yml @@ -0,0 +1,73 @@ +name: Run Tests + +on: + pull_request: + paths: + - "src/**" + - "tests/**" + - ".github/**" + - "examples/**" + - "setup.py" + types: [opened, synchronize, reopened] + +env: + HF_HOME: ~/hf_cache + TESTING_MOCKED_DATALOADERS: "1" + IS_GITHUB_CI: "1" + +jobs: + run-tests: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + pytorch-version: [ + latest, + minimum + ] + test-kind: [ + test_prod, + test_core, + test_cli, + test_big_modeling, + test_deepspeed, + test_fsdp, + test_example_differences, + test_checkpoint_step, + test_checkpoint_epoch, + test_rest + ] + steps: + - uses: actions/checkout@v3.1.0 + - name: Set up python 3.7 + uses: actions/setup-python@v3 + with: + python-version: 3.7 + + - name: Activate python cache + uses: actions/cache@v3 + with: + path: | + ${{ env.pythonLocation }} + ${{ env.HF_HOME }} + key: ${{ env.pythonLocation }}-${{ matrix.pytorch-version }}-${{ matrix.test-kind }}-${{ hashFiles('setup.py') }} + + - name: Install the library + run: | + pip install --upgrade pip + if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi + if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi + if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi + if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torch==1.6.0; fi + pip install pytest-reportlog + + - name: Run Tests + env: + PYTORCH_VERSION: ${{ matrix.pytorch-version }} + run: | + make ${{ matrix.test-kind }} + + - name: Generate Report + if: always() + run: | + python utils/log_reports.py >> $GITHUB_STEP_SUMMARY \ No newline at end of file diff --git a/testbed/huggingface__accelerate/.gitignore b/testbed/huggingface__accelerate/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..da99824aa34e856e9aeb2e74bbfc324e7acc6d36 --- /dev/null +++ b/testbed/huggingface__accelerate/.gitignore @@ -0,0 +1,141 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# VSCode +.vscode + +# IntelliJ +.idea + +# Mac .DS_Store +.DS_Store + +# More test things +wandb \ No newline at end of file diff --git a/testbed/huggingface__accelerate/CODE_OF_CONDUCT.md b/testbed/huggingface__accelerate/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..c8ad966288a9faeeb71b2fad3ba12f6048e1a03f --- /dev/null +++ b/testbed/huggingface__accelerate/CODE_OF_CONDUCT.md @@ -0,0 +1,129 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +feedback@huggingface.co. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/testbed/huggingface__accelerate/CONTRIBUTING.md b/testbed/huggingface__accelerate/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..d0e142b16e614e66df6fef90185f6c5996428464 --- /dev/null +++ b/testbed/huggingface__accelerate/CONTRIBUTING.md @@ -0,0 +1,238 @@ + + +# How to contribute to 🤗 Accelerate? + +Everyone is welcome to contribute, and we value everybody's contribution. Code +is thus not the only way to help the community. Answering questions, helping +others, reaching out and improving the documentations are immensely valuable to +the community. + +It also helps us if you spread the word: reference the library from blog posts +on the awesome projects it made possible, shout out on Twitter every time it has +helped you, or simply star the repo to say "thank you". + +Whichever way you choose to contribute, please be mindful to respect our +[code of conduct](https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md). + +## You can contribute in so many ways! + +Some of the ways you can contribute to Accelerate: +* Fixing outstanding issues with the existing code; +* Contributing to the examples or to the documentation; +* Submitting issues related to bugs or desired new features. + +## Submitting a new issue or feature request + +Do your best to follow these guidelines when submitting an issue or a feature +request. It will make it easier for us to come back to you quickly and with good +feedback. + +### Did you find a bug? + +The 🤗 Accelerate library is robust and reliable thanks to the users who notify us of +the problems they encounter. So thank you for reporting an issue. + +First, we would really appreciate it if you could **make sure the bug was not +already reported** (use the search bar on Github under Issues). + +Did not find it? :( So we can act quickly on it, please follow these steps: + +* Include your **OS type and version**, the versions of **Python** and **PyTorch**. +* A short, self-contained, code snippet that allows us to reproduce the bug in + less than 30s; +* Provide the with your Accelerate configuration (located by default in `~/.cache/huggingface/accelerate/default_config.yaml`) + +### Do you want a new feature? + +A good feature request addresses the following points: + +1. Motivation first: +* Is it related to a problem/frustration with the library? If so, please explain + why. Providing a code snippet that demonstrates the problem is best. +* Is it related to something you would need for a project? We'd love to hear + about it! +* Is it something you worked on and think could benefit the community? + Awesome! Tell us what problem it solved for you. +2. Write a *full paragraph* describing the feature; +3. Provide a **code snippet** that demonstrates its future use; +4. In case this is related to a paper, please attach a link; +5. Attach any additional information (drawings, screenshots, etc.) you think may help. + +If your issue is well written we're already 80% of the way there by the time you +post it. + +## Submitting a pull request (PR) + +Before writing code, we strongly advise you to search through the existing PRs or +issues to make sure that nobody is already working on the same thing. If you are +unsure, it is always a good idea to open an issue to get some feedback. + +You will need basic `git` proficiency to be able to contribute to +🤗 Accelerate. `git` is not the easiest tool to use but it has the greatest +manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro +Git](https://git-scm.com/book/en/v2) is a very good reference. + +Follow these steps to start contributing: + +1. Fork the [repository](https://github.com/huggingface/accelerate) by + clicking on the 'Fork' button on the repository's page. This creates a copy of the code + under your GitHub user account. + +2. Clone your fork to your local disk, and add the base repository as a remote. The following command + assumes you have your public SSH key uploaded to GitHub. See the following guide for more + [information](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository). + + ```bash + $ git clone git@github.com:/accelerate.git + $ cd accelerate + $ git remote add upstream https://github.com/huggingface/accelerate.git + ``` + +3. Create a new branch to hold your development changes, and do this for every new PR you work on. + + Start by synchronizing your `main` branch with the `upstream/main` branch (ore details in the [GitHub Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork)): + + ```bash + $ git checkout main + $ git fetch upstream + $ git merge upstream/main + ``` + + Once your `main` branch is synchronized, create a new branch from it: + + ```bash + $ git checkout -b a-descriptive-name-for-my-changes + ``` + + **Do not** work on the `main` branch. + +4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library: + + ```bash + $ pip install -e ".[quality]" + ``` + + (If accelerate was already installed in the virtual environment, remove + it with `pip uninstall accelerate` before reinstalling it in editable + mode with the `-e` flag.) + + Alternatively, if you are using [Visual Studio Code](https://code.visualstudio.com/Download), the fastest way to get set up is by using + the provided Dev Container. Documentation on how to get started with dev containers is available [here](https://code.visualstudio.com/docs/remote/containers). + +5. Develop the features on your branch. + + As you work on the features, you should make sure that the test suite + passes. You should run the tests impacted by your changes like this (see + below an explanation regarding the environment variable): + + ```bash + $ pytest tests/.py + ``` + + > For the following commands leveraging the `make` utility, we recommend using the WSL system when running on + > Windows. More information [here](https://docs.microsoft.com/en-us/windows/wsl/about). + + You can also run the full suite with the following command. + + ```bash + $ make test + ``` + + `accelerate` relies on `black` and `isort` to format its source code + consistently. After you make changes, apply automatic style corrections and code verifications + that can't be automated in one go with: + + This target is also optimized to only work with files modified by the PR you're working on. + + If you prefer to run the checks one after the other, the following command apply the + style corrections: + + ```bash + $ make style + ``` + + `accelerate` also uses `flake8` and a few custom scripts to check for coding mistakes. Quality + control runs in CI, however you can also run the same checks with: + + ```bash + $ make quality + ``` + + Once you're happy with your changes, add changed files using `git add` and + make a commit with `git commit` to record your changes locally: + + ```bash + $ git add modified_file.py + $ git commit + ``` + + Please write [good commit messages](https://chris.beams.io/posts/git-commit/). + + It is a good idea to sync your copy of the code with the original + repository regularly. This way you can quickly account for changes: + + ```bash + $ git fetch upstream + $ git rebase upstream/main + ``` + + Push the changes to your account using: + + ```bash + $ git push -u origin a-descriptive-name-for-my-changes + ``` + +6. Once you are satisfied (**and the checklist below is happy too**), go to the + webpage of your fork on GitHub. Click on 'Pull request' to send your changes + to the project maintainers for review. + +7. It's ok if maintainers ask you for changes. It happens to core contributors + too! So everyone can see the changes in the Pull request, work in your local + branch and push the changes to your fork. They will automatically appear in + the pull request. + + +### Checklist + +1. The title of your pull request should be a summary of its contribution; +2. If your pull request addresses an issue, please mention the issue number in + the pull request description to make sure they are linked (and people + consulting the issue know you are working on it); +3. To indicate a work in progress please prefix the title with `[WIP]`, or mark + the PR as a draft PR. These are useful to avoid duplicated work, and to differentiate + it from PRs ready to be merged; +4. Make sure existing tests pass; +5. Add high-coverage tests. No quality testing = no merge. + +See an example of a good PR here: https://github.com/huggingface/accelerate/pull/255 + +### Tests + +An extensive test suite is included to test the library behavior and several examples. Library tests can be found in +the [tests folder](https://github.com/huggingface/accelerate/tree/main/tests). + +We use `pytest` in order to run the tests. From the root of the +repository, here's how to run tests with `pytest` for the library: + +```bash +$ python -m pytest -sv ./tests +``` + +In fact, that's how `make test` is implemented (sans the `pip install` line)! + +You can specify a smaller set of tests in order to test only the feature +you're working on. \ No newline at end of file diff --git a/testbed/huggingface__accelerate/LICENSE b/testbed/huggingface__accelerate/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/testbed/huggingface__accelerate/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/testbed/huggingface__accelerate/Makefile b/testbed/huggingface__accelerate/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..57ecab5d7b08b6ed4244810c52fd861206427999 --- /dev/null +++ b/testbed/huggingface__accelerate/Makefile @@ -0,0 +1,67 @@ +.PHONY: quality style test docs + +check_dirs := tests src examples benchmarks + +# Check that source code meets quality standards + +extra_quality_checks: + python utils/check_copies.py + python utils/check_dummies.py + python utils/check_repo.py + doc-builder style src/accelerate docs/source --max_len 119 + +# this target runs checks on all files +quality: + black --check $(check_dirs) + isort --check-only $(check_dirs) + flake8 $(check_dirs) + doc-builder style src/accelerate docs/source --max_len 119 --check_only + +# Format source code automatically and check is there are any problems left that need manual fixing +style: + black $(check_dirs) + isort $(check_dirs) + doc-builder style src/accelerate docs/source --max_len 119 + +# Run tests for the library +test: + python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_all.log",) + +test_big_modeling: + python -m pytest -s -v ./tests/test_big_modeling.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_big_modeling.log",) + +test_core: + python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \ + --ignore=./tests/fsdp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_core.log",) + +test_cli: + python -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_cli.log",) + +test_deepspeed: + python -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_deepspeed.log",) + +test_fsdp: + python -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_fsdp.log",) + +test_examples: + python -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_examples.log",) + +# Broken down example tests for the CI runners +test_integrations: + python -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_integrations.log",) + +test_example_differences: + python -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_example_diff.log",) + +test_checkpoint_epoch: + python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_checkpoint_epoch.log",) + +test_checkpoint_step: + python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_step" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_checkpoint_step.log",) + +# Same as test but used to install only the base dependencies +test_prod: + $(MAKE) test_core + +test_rest: + python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "not by_step and not by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_rest.log",) diff --git a/testbed/huggingface__accelerate/README.md b/testbed/huggingface__accelerate/README.md new file mode 100644 index 0000000000000000000000000000000000000000..52cd4135823168ff93b2e6a137ef6c3f5546e629 --- /dev/null +++ b/testbed/huggingface__accelerate/README.md @@ -0,0 +1,259 @@ + + +

+
+ +
+

+ +

+ + + License + + + Documentation + + + GitHub release + + + Contributor Covenant + +

+ +

+

Run your *raw* PyTorch training script on any kind of device +

+ +

+ +

+ +## Easy to integrate + +🤗 Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boilerplate code needed to use multi-GPUs/TPU/fp16. + +🤗 Accelerate abstracts exactly and only the boilerplate code related to multi-GPUs/TPU/fp16 and leaves the rest of your code unchanged. + +Here is an example: + +```diff + import torch + import torch.nn.functional as F + from datasets import load_dataset ++ from accelerate import Accelerator + ++ accelerator = Accelerator() +- device = 'cpu' ++ device = accelerator.device + + model = torch.nn.Transformer().to(device) + optimizer = torch.optim.Adam(model.parameters()) + + dataset = load_dataset('my_dataset') + data = torch.utils.data.DataLoader(dataset, shuffle=True) + ++ model, optimizer, data = accelerator.prepare(model, optimizer, data) + + model.train() + for epoch in range(10): + for source, targets in data: + source = source.to(device) + targets = targets.to(device) + + optimizer.zero_grad() + + output = model(source) + loss = F.cross_entropy(output, targets) + +- loss.backward() ++ accelerator.backward(loss) + + optimizer.step() +``` + +As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp16). + +In particular, the same code can then be run without modification on your local machine for debugging or your training environment. + +🤗 Accelerate even handles the device placement for you (which requires a few more changes to your code, but is safer in general), so you can even simplify your training loop further: + +```diff + import torch + import torch.nn.functional as F + from datasets import load_dataset ++ from accelerate import Accelerator + +- device = 'cpu' ++ accelerator = Accelerator() + +- model = torch.nn.Transformer().to(device) ++ model = torch.nn.Transformer() + optimizer = torch.optim.Adam(model.parameters()) + + dataset = load_dataset('my_dataset') + data = torch.utils.data.DataLoader(dataset, shuffle=True) + ++ model, optimizer, data = accelerator.prepare(model, optimizer, data) + + model.train() + for epoch in range(10): + for source, targets in data: +- source = source.to(device) +- targets = targets.to(device) + + optimizer.zero_grad() + + output = model(source) + loss = F.cross_entropy(output, targets) + +- loss.backward() ++ accelerator.backward(loss) + + optimizer.step() +``` + +Want to learn more? Check out the [documentation](https://huggingface.co/docs/accelerate) or have look at our [examples](https://github.com/huggingface/accelerate/tree/main/examples). + +## Launching script + +🤗 Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.launch` or to write a specific launcher for TPU training! +On your machine(s) just run: + +```bash +accelerate config +``` + +and answer the questions asked. This will generate a config file that will be used automatically to properly set the default options when doing + +```bash +accelerate launch my_script.py --args_to_my_script +``` + +For instance, here is how you would run the GLUE example on the MRPC task (from the root of the repo): + +```bash +accelerate launch examples/nlp_example.py +``` + +This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torch.distributed.launch my_script.py` at your convenance. + +## Launching multi-CPU run using MPI + +🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well. +Once you have MPI setup on your cluster, just run: + +```bash +mpirun -np 2 python examples/nlp_example.py +``` + +## Launching training using DeepSpeed + +🤗 Accelerate supports training on single/multiple GPUs using DeepSpeed. To use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your python script, we provide you the `DeepSpeedPlugin`. + +```python +from accelerate import Accelerator, DeepSpeedPlugin + +# deepspeed needs to know your gradient accumulation steps before hand, so don't forget to pass it +# Remember you still need to do gradient accumulation by yourself, just like you would have done without deepspeed +deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2) +accelerator = Accelerator(fp16=True, deepspeed_plugin=deepspeed_plugin) + +# How to save your 🤗 Transformer? +accelerator.wait_for_everyone() +unwrapped_model = accelerator.unwrap_model(model) +unwrapped_model.save_pretrained(save_dir, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model)) +``` + +Note: DeepSpeed support is experimental for now. In case you get into some problem, please open an issue. + +## Launching your training from a notebook + +🤗 Accelerate also provides a `notebook_launcher` function you can use in a notebook to launch a distributed training. This is especially useful for Colab or Kaggle notebooks with a TPU backend. Just define your training loop in a `training_function` then in your last cell, add: + +```python +from accelerate import notebook_launcher + +notebook_launcher(training_function) +``` + +An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb). [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb) + +## Why should I use 🤗 Accelerate? + +You should use 🤗 Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library, In fact the whole API of 🤗 Accelerate is in one class, the `Accelerator` object. + +## Why shouldn't I use 🤗 Accelerate? + +You shouldn't use 🤗 Accelerate if you don't want to write a training loop yourself. There are plenty of high-level libraries above PyTorch that will offer you that, 🤗 Accelerate is not one of them. + +## Frameworks using 🤗 Accelerate + +If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around your training loop, some frameworks that are built on top of 🤗 Accelerate are listed below: + +* [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common "breakpoints" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76). +* [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model train, and inference logic. +* [fastai](https://github.com/fastai/fastai#installing) is a PyTorch framework for Deep Learning that simplifies training fast and accurate neural nets using modern best practices. fastai provides a [Learner](https://docs.fast.ai/learner.html#Learner) to handle the training, fine-tuning, and inference of deep learning algorithms. +* [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library. +* [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centred around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves! + + +## Installation + +This repository is tested on Python 3.6+ and PyTorch 1.4.0+ + +You should install 🤗 Accelerate in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). + +First, create a virtual environment with the version of Python you're going to use and activate it. + +Then, you will need to install PyTorch: refer to the [official installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform. Then 🤗 Accelerate can be installed using pip as follows: + +```bash +pip install accelerate +``` + +## Supported integrations + +- CPU only +- multi-CPU on one node (machine) +- multi-CPU on several nodes (machines) +- single GPU +- multi-GPU on one node (machine) +- multi-GPU on several nodes (machines) +- TPU +- FP16 with native AMP (apex on the roadmap) +- DeepSpeed support (Experimental) +- PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental) +- Megatron-LM support (Experimental) + +## Citing 🤗 Accelerate + +If you use 🤗 Accelerate in your publication, please cite it by using the following BibTeX entry. + +```bibtex +@Misc{accelerate, + title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.}, + author = {Sylvain Gugger, Lysandre Debut, Thomas Wolf, Philipp Schmid, Zachary Mueller, Sourab Mangrulkar}, + howpublished = {\url{https://github.com/huggingface/accelerate}}, + year = {2022} +} +``` diff --git a/testbed/huggingface__accelerate/benchmarks/README.md b/testbed/huggingface__accelerate/benchmarks/README.md new file mode 100644 index 0000000000000000000000000000000000000000..243e9df58ea92bc4148101a5fcf399978cbf07f3 --- /dev/null +++ b/testbed/huggingface__accelerate/benchmarks/README.md @@ -0,0 +1,46 @@ +# Big model inference benchmarks + +Running inference with Accelerate on big models. + +## Setup + +These benchmarks use the `transformers` library: + +```bash +pip install transformers +``` + +To reproduce or test a new setup, run + +```py +python inference_acc.py model_name +``` + +This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`. + +To force a different `torch_dtype` than the one in the config: `--torch_dtype xxx`. + +If you get an error linked to disk offload, you need to add the option `--disk-offload` + +## Results + +On a setup with two Titan RTXs (24GB of RAM) and 32GB of RAM, we get the following benchmarks (T0pp does not run in float16, which is why it's not included). + +| Model | Model load time | Generation time | dtype | GPU 0 use | GPU 1 use | CPU use | Disk offload | +|:-----:|:---------------:|:---------------:|:-----:|:---------:|:---------:|:-------:|:------------:| +| GPT-J-6B | 8.7s | 0.05s per token | float16 | 11.7GB | 0GB | 0GB | no | +| GPT-J-6B | 12.4s | 0.06s per token | float32 | 21.9GB | 1.5GB | 0GB | no | +| GPT-Neo-X-20B | 30.9s | 0.08s per token | float16 | 21.5GB | 18GB | 0GB | no | +| GPT-Neo-X-20B | 78.2s | 10.72s per token | float32 | 20.3GB | 22.7 GB | 24.4GB | yes | +| T0pp (11B) | 29.4s | 0.05s per token | float32 | 21.1GB | 21.3GB | 0GB | no | +| OPT-30B | 34.5s | 2.37s per token | float16 | 20.7GB | 22.3GB | 14.1GB | no | +| OPT-30B | 112.3s | 33.9s per token | float32 | 20.2GB | 21.2GB | 23.5GB | yes | + +Note on the results: +- using two GPUs instead of one does not slow down generation +- using CPU offload slows down a bit (see OPT-30b) +- using disk offload slows down a lot (need to implement prefetching) + +You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary: +- peak GPU memory is exactly the size of the model put on a given GPU +- peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger. \ No newline at end of file diff --git a/testbed/huggingface__accelerate/benchmarks/big_model_inference.py b/testbed/huggingface__accelerate/benchmarks/big_model_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..cb832d1287bd3920228a37299886faa244116113 --- /dev/null +++ b/testbed/huggingface__accelerate/benchmarks/big_model_inference.py @@ -0,0 +1,143 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import time + +import torch + +import transformers +from accelerate.utils import compute_module_sizes +from measures_util import end_measure, log_measures, start_measure +from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer + + +DEFAULT_MODELS = { + "gpt-j-6b": {"is_causal": True, "model": "sgugger/sharded-gpt-j-6B", "tokenizer": "EleutherAI/gpt-j-6B"}, + "gpt-neox": {"is_causal": True, "model": "EleutherAI/gpt-neox-20b"}, + "opt": {"is_causal": True, "model": "facebook/opt-30b"}, + "T0pp": {"is_causal": False, "model": "bigscience/T0pp", "model_revision": "sharded"}, +} + +PROMPTS = [ + "Hello, my name is", + "Are unicorns real? Unicorns are", + "For the first time in several years,", + "My name is Julien and I am", + "The goal of life is", + "Whenever I'm sad, I like to", +] + + +def parse_args(): + parser = argparse.ArgumentParser(description="Run and time generations on a big model using Accelerate.") + parser.add_argument("model_name", type=str, default=None, help="The name of the model to try.") + parser.add_argument( + "--tokenizer_name", type=str, default=None, help="The name of the tokenizer (if different from the model." + ) + parser.add_argument("--is_causal", type=bool, default=None, help="Whether or not the model is causal.") + parser.add_argument( + "--model_revision", type=str, default=None, help="The revision to use for the model checkpoint." + ) + parser.add_argument("--torch_dtype", type=str, default=None, help="The dtype for the model.") + parser.add_argument("--disk_offload", action="store_true") + + args = parser.parse_args() + + # Sanitize args + if args.model_name in DEFAULT_MODELS: + defaults = DEFAULT_MODELS[args.model_name] + args.model_name = defaults["model"] + if args.tokenizer_name is None: + args.tokenizer_name = defaults.get("tokenizer", args.model_name) + if args.is_causal is None: + args.is_causal = defaults["is_causal"] + if args.model_revision is None: + args.model_revision = defaults.get("model_revision", "main") + + if args.is_causal is None: + raise ValueError("Could not infer the default for `--is_causal`, pass either True or False for it.") + if args.tokenizer_name is None: + args.tokenizer_name = args.model_name + if args.model_revision is None: + args.model_revision = "main" + + return args + + +def main(): + transformers.utils.logging.set_verbosity_error() + args = parse_args() + + if args.torch_dtype is None: + config = AutoConfig.from_pretrained(args.model_name) + torch_dtype = getattr(config, "torch_dtype", torch.float32) + else: + torch_dtype = getattr(torch, args.torch_dtype) + model_cls = AutoModelForCausalLM if args.is_causal else AutoModelForSeq2SeqLM + kwargs = { + "torch_dtype": torch_dtype, + "revision": args.model_revision, + } + if args.disk_offload: + kwargs["offload_folder"] = "tmp_offload" + kwargs["offload_state_dict"] = True + + start_measures = start_measure() + model = model_cls.from_pretrained(args.model_name, device_map="auto", **kwargs) + end_measures = end_measure(start_measures) + log_measures(end_measures, "Model loading") + + module_sizes = compute_module_sizes(model) + device_size = {v: 0 for v in model.hf_device_map.values()} + for module, device in model.hf_device_map.items(): + device_size[device] += module_sizes[module] + message = "\n".join([f"- {device}: {size // 2**20}MiB" for device, size in device_size.items()]) + print(f"\nTheoretical use:\n{message}") + + tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name) + + start_measures = start_measure() + generation_times = [] + gen_tokens = [] + texts_outs = [] + for prompt in PROMPTS: + inputs = tokenizer(prompt, return_tensors="pt").to(0) + tokens = inputs["input_ids"][0].tolist() + before_generate = time.time() + outputs = model.generate(inputs["input_ids"]) + after_generate = time.time() + outputs = outputs[0].tolist() + num_gen_tokens = len(outputs) if outputs[: len(tokens)] != tokens else len(outputs) - len(tokens) + generation_time = after_generate - before_generate + + text_out = tokenizer.decode(outputs, skip_special_tokens=True) + texts_outs.append(text_out) + generation_times.append(generation_time) + gen_tokens.append(num_gen_tokens) + print(f"Prompt: {prompt}\nGeneration {text_out}\nIn {generation_time:.2f}s for {num_gen_tokens} tokens\n") + + end_measures = end_measure(start_measures) + log_measures(end_measures, "Model generation") + + generation_times_per_token = [gen / tok for gen, tok in zip(generation_times, gen_tokens)] + avg_gen = sum(generation_times_per_token) / len(generation_times) + print(f"Average time of generation per token: {avg_gen:.2f}s") + print(f"First generation (avg time per token): {generation_times_per_token[0]:.2f}s") + avg_gen = sum(generation_times_per_token[1:]) / (len(generation_times_per_token) - 1) + print(f"Average time of generation per token (excluding the first): {avg_gen:.2f}s") + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/benchmarks/measures_util.py b/testbed/huggingface__accelerate/benchmarks/measures_util.py new file mode 100644 index 0000000000000000000000000000000000000000..b6ac76b5455593d2251afa6a3664ed66b99e9151 --- /dev/null +++ b/testbed/huggingface__accelerate/benchmarks/measures_util.py @@ -0,0 +1,86 @@ +import gc +import threading +import time + +import torch + +import psutil + + +class PeakCPUMemory: + def __init__(self): + self.process = psutil.Process() + self.peak_monitoring = False + + def peak_monitor(self): + self.cpu_memory_peak = -1 + + while True: + self.cpu_memory_peak = max(self.process.memory_info().rss, self.cpu_memory_peak) + + # can't sleep or will not catch the peak right (this comment is here on purpose) + if not self.peak_monitoring: + break + + def start(self): + self.peak_monitoring = True + self.thread = threading.Thread(target=self.peak_monitor) + self.thread.daemon = True + self.thread.start() + + def stop(self): + self.peak_monitoring = False + self.thread.join() + return self.cpu_memory_peak + + +cpu_peak_tracker = PeakCPUMemory() + + +def start_measure(): + # Time + measures = {"time": time.time()} + + gc.collect() + torch.cuda.empty_cache() + + # CPU mem + measures["cpu"] = psutil.Process().memory_info().rss + cpu_peak_tracker.start() + + # GPU mem + for i in range(torch.cuda.device_count()): + measures[str(i)] = torch.cuda.memory_allocated(i) + torch.cuda.reset_peak_memory_stats() + + return measures + + +def end_measure(start_measures): + # Time + measures = {"time": time.time() - start_measures["time"]} + + gc.collect() + torch.cuda.empty_cache() + + # CPU mem + measures["cpu"] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20 + measures["cpu-peak"] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20 + + # GPU mem + for i in range(torch.cuda.device_count()): + measures[str(i)] = (torch.cuda.memory_allocated(i) - start_measures[str(i)]) / 2**20 + measures[f"{i}-peak"] = (torch.cuda.max_memory_allocated(i) - start_measures[str(i)]) / 2**20 + + return measures + + +def log_measures(measures, description): + print(f"{description}:") + print(f"- Time: {measures['time']:.2f}s") + for i in range(torch.cuda.device_count()): + print(f"- GPU {i} allocated: {measures[str(i)]:.2f}MiB") + peak = measures[f"{i}-peak"] + print(f"- GPU {i} peak: {peak:.2f}MiB") + print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB") + print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB") diff --git a/testbed/huggingface__accelerate/docker/accelerate-cpu/Dockerfile b/testbed/huggingface__accelerate/docker/accelerate-cpu/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..a872e6fb2258583e49e9b21a3719cb78ac7ca19b --- /dev/null +++ b/testbed/huggingface__accelerate/docker/accelerate-cpu/Dockerfile @@ -0,0 +1,35 @@ +# Builds CPU-only Docker image of PyTorch +# Uses multi-staged approach to reduce size +# Stage 1 +FROM python:3.7-slim as compile-image + +ARG DEBIAN_FRONTEND=noninteractive + +RUN apt update +RUN apt-get install -y --no-install-recommends \ + build-essential \ + git \ + gcc + +# Setup virtual environment for Docker +ENV VIRTUAL_ENV=/opt/venv +RUN python3 -m venv ${VIRTUAL_ENV} +# Make sure we use the virtualenv +ENV PATH="${VIRTUAL_ENV}/bin:$PATH" +WORKDIR /workspace +# Install specific CPU torch wheel to save on space +RUN python3 -m pip install --upgrade --no-cache-dir pip +RUN python3 -m pip install --no-cache-dir \ + jupyter \ + git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \ + --extra-index-url https://download.pytorch.org/whl/cpu + +# Stage 2 +FROM python:3.7-slim AS build-image +COPY --from=compile-image /opt/venv /opt/venv +RUN useradd -ms /bin/bash user +USER user + +# Make sure we use the virtualenv +ENV PATH="/opt/venv/bin:$PATH" +CMD ["/bin/bash"] \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docker/accelerate-gpu/Dockerfile b/testbed/huggingface__accelerate/docker/accelerate-gpu/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..44aa94b29fb924c5b4d9f25e5fe6dffde1e21989 --- /dev/null +++ b/testbed/huggingface__accelerate/docker/accelerate-gpu/Dockerfile @@ -0,0 +1,42 @@ +# Builds GPU docker image of PyTorch +# Uses multi-staged approach to reduce size +# Stage 1 +# Use base conda image to reduce time +FROM continuumio/miniconda3:latest AS compile-image +# Specify py version +ENV PYTHON_VERSION=3.7.3 +# Install apt libs +RUN apt-get update && \ + apt-get install -y curl git wget && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists* + +# Create our conda env +RUN conda create --name accelerate python=${PYTHON_VERSION} ipython jupyter pip +# We don't install pytorch here yet since CUDA isn't available +# instead we use the direct torch wheel +ENV PATH /opt/conda/envs/accelerate/bin:$PATH +# Activate our bash shell +RUN chsh -s /bin/bash +SHELL ["/bin/bash", "-c"] +# Activate the conda env and install torch + accelerate +RUN source activate accelerate && \ + python3 -m pip install --no-cache-dir \ + git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \ + --extra-index-url https://download.pytorch.org/whl/cu113 + +# Stage 2 +FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04 AS build-image +COPY --from=compile-image /opt/conda /opt/conda +ENV PATH /opt/conda/bin:$PATH + +# Install apt libs +RUN apt-get update && \ + apt-get install -y curl git wget && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists* + +RUN echo "source activate accelerate" >> ~/.profile + +# Activate the virtualenv +CMD ["/bin/bash"] \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/Makefile b/testbed/huggingface__accelerate/docs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..8879933e6cda150267451c9e7d07dd22b7b0d3f1 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/Makefile @@ -0,0 +1,19 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SOURCEDIR = source +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/README.md b/testbed/huggingface__accelerate/docs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..32e51f103ecc0751f04f3c75d616c147b4462aa4 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/README.md @@ -0,0 +1,267 @@ + + +# Generating the documentation + +To generate the documentation, you first have to build it. Several packages are necessary to build the doc, +you can install them with the following command, at the root of the code repository: + +```bash +pip install -e ".[docs]" +``` + +Then you need to install our special tool that builds the documentation: + +```bash +pip install git+https://github.com/huggingface/doc-builder +``` + +--- +**NOTE** + +You only need to generate the documentation to inspect it locally (if you're planning changes and want to +check how they look before committing for instance). You don't have to commit the built documentation. + +--- + +## Building the documentation + +Once you have setup the `doc-builder` and additional packages, you can generate the documentation by +typing the following command: + +```bash +doc-builder build accelerate docs/source/ --build_dir ~/tmp/test-build +``` + +You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate +the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite +Markdown editor. + +## Previewing the documentation + +To preview the docs, first install the `watchdog` module with: + +```bash +pip install watchdog +``` + +Then run the following command: + +```bash +doc-builder preview {package_name} {path_to_docs} +``` + +For example: + +```bash +doc-builder preview transformers docs/source/en/ +``` + +The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. + +--- +**NOTE** + +The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). + +--- + +## Adding a new element to the navigation bar + +Accepted files are Markdown (.md or .mdx). + +Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting +the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/accelerate/blob/main/docs/source/_toctree.yml) file. + +## Renaming section headers and moving sections + +It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. + +Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. + +So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file: + +``` +Sections that were moved: + +[ Section A ] +``` +and of course, if you moved it to another file, then: + +``` +Sections that were moved: + +[ Section A ] +``` + +Use the relative style to link to the new file so that the versioned docs continue to work. + + +## Writing Documentation - Specification + +The `huggingface/accelerate` documentation follows the +[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings, +although we can write them directly in Markdown. + +### Adding a new tutorial + +Adding a new tutorial or section is done in two steps: + +- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md). +- Link that file in `./source/_toctree.yml` on the correct toc-tree. + +Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so +depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or +four. + +### Writing source documentation + +Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names +and objects like True, None, or any strings should usually be put in `code`. + +When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool +adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or +function to be in the main package. + +If you want to create a link to some internal class or function, you need to +provide its path. For instance: \[\`utils.gather\`\]. This will be converted into a link with +`utils.gather` in the description. To get rid of the path and only keep the name of the object you are +linking to in the description, add a ~: \[\`~utils.gather\`\] will generate a link with `gather` in the description. + +The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\`XXXClass.method\`\]. + +#### Defining arguments in a method + +Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and +an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its +description: + +``` + Args: + n_layers (`int`): The number of layers of the model. +``` + +If the description is too long to fit in one line (more than 119 characters in total), another indentation is necessary +before writing the description after the argument. + +Finally, to maintain uniformity if any *one* description is too long to fit on one line, the +rest of the parameters should follow suit and have an indention before their description. + +Here's an example showcasing everything so far: + +``` + Args: + gradient_accumulation_steps (`int`, *optional*, default to 1): + The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with `Accelerator.accumulate`. + cpu (`bool`, *optional*): + Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force the execution on one process only. +``` + +For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the +following signature: + +``` +def my_function(x: str = None, a: float = 1): +``` + +then its documentation should look like this: + +``` + Args: + x (`str`, *optional*): + This argument controls ... and has a description longer than 119 chars. + a (`float`, *optional*, defaults to 1): + This argument is used to ... and has a description longer than 119 chars. +``` + +Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even +if the first line describing your argument type and its default gets long, you can't break it on several lines. You can +however write as many lines as you want in the indented description (see the example above with `input_ids`). + +#### Writing a multi-line code block + +Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown: + + +```` +```python +# first line of code +# second line +# etc +``` +```` + +#### Writing a return block + +The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation. +The first line should be the type of the return, followed by a line return. No need to indent further for the elements +building the return. + +Here's an example of a single value return: + +``` + Returns: + `List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token. +``` + +Here's an example of a tuple return, comprising several objects: + +``` + Returns: + `tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs: + - ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` -- + Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss. + - **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) -- + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). +``` + +## Styling the docstring + +We have an automatic script running with the `make style` comment that will make sure that: +- the docstrings fully take advantage of the line width +- all code examples are formatted using black, like the code of the Transformers library + +This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's +recommended to commit your changes before running `make style`, so you can revert the changes done by that script +easily. + +## Writing documentation examples + +The syntax for Example docstrings can look as follows: + +``` + Example: + + ```python + >>> import time + >>> from accelerate import Accelerator + >>> accelerator = Accelerator() + >>> if accelerator.is_main_process: + ... time.sleep(2) + >>> else: + ... print("I'm waiting for the main process to finish its sleep...") + >>> accelerator.wait_for_everyone() + >>> # Should print on every process at the same time + >>> print("Everyone is here") + ``` +``` + +The docstring should give a minimal, clear example of how the respective function +is to be used in inference and also include the expected (ideally sensible) +output. +Often, readers will try out the example before even going through the function +or class definitions. Therefore, it is of utmost importance that the example +works as expected. \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/_toctree.yml b/testbed/huggingface__accelerate/docs/source/_toctree.yml new file mode 100644 index 0000000000000000000000000000000000000000..fc3a352a8d3607eb8469b9d5ef90634c43cd976f --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/_toctree.yml @@ -0,0 +1,78 @@ +- sections: + - local: index + title: 🤗 Accelerate + - local: basic_tutorials/install + title: Installation + - local: quicktour + title: Quicktour + title: Getting started +- sections: + - local: basic_tutorials/overview + title: Overview + - local: basic_tutorials/migration + title: Migrating to 🤗 Accelerate + - local: basic_tutorials/launch + title: Launching distributed code + - local: basic_tutorials/notebook + title: Launching distributed training from Jupyter Notebooks + title: Tutorials +- sections: + - local: usage_guides/training_zoo + title: Example Zoo + - local: usage_guides/big_modeling + title: How perform inference on large models with small resources + - local: usage_guides/gradient_accumulation + title: Performing gradient accumulation + - local: usage_guides/checkpoint + title: Saving and loading training states + - local: usage_guides/tracking + title: Using experiment trackers + - local: usage_guides/memory + title: How to avoid CUDA Out-of-Memory + - local: usage_guides/mps + title: How to use Apple Silicon M1 GPUs + - local: usage_guides/deepspeed + title: How to use DeepSpeed + - local: usage_guides/fsdp + title: How to use Fully Sharded Data Parallelism + - local: usage_guides/megatron_lm + title: How to use Megatron-LM + - local: usage_guides/sagemaker + title: How to use 🤗 Accelerate with SageMaker + title: How-To Guides +- sections: + - local: concept_guides/performance + title: Comparing performance across distributed setups + - local: concept_guides/deferring_execution + title: Executing and deferring jobs + - local: concept_guides/gradient_synchronization + title: Gradient synchronization + - local: concept_guides/training_tpu + title: TPU best practices + title: Concepts and fundamentals +- sections: + - local: package_reference/accelerator + title: Main Accelerator class + - local: package_reference/state + title: Stateful configuration classes + - local: package_reference/cli + title: The Command Line + - local: package_reference/torch_wrappers + title: Torch wrapper classes + - local: package_reference/tracking + title: Experiment trackers + - local: package_reference/launchers + title: Distributed launchers + - local: package_reference/deepspeed + title: DeepSpeed utilities + - local: package_reference/logging + title: Logging + - local: package_reference/big_modeling + title: Working with large models + - local: package_reference/kwargs + title: Kwargs handlers + - local: package_reference/utilities + title: Utility functions and classes + - local: package_reference/megatron_lm + title: Megatron-LM Utilities + title: "Reference" \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/basic_tutorials/migration.mdx b/testbed/huggingface__accelerate/docs/source/basic_tutorials/migration.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ab703c96047b45240b858573eb4b16cc3e7bdb6b --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/basic_tutorials/migration.mdx @@ -0,0 +1,123 @@ + + +# Migrating your code to 🤗 Accelerate + +This tutorial will detail how to easily convert existing PyTorch code to use 🤗 Accelerate! +You'll see that by just changing a few lines of code, 🤗 Accelerate can perform its magic and get you on +your way towards running your code on distributed systems with ease! + +## The base training loop + +To begin, write out a very basic PyTorch training loop. + + + + We are under the presumption that `training_dataloader`, `model`, `optimizer`, `scheduler`, and `loss_function` have been defined beforehand. + + + +```python +device = "cuda" +model.to(device) + +for batch in training_dataloader: + optimizer.zero_grad() + inputs, targets = batch + inputs = inputs.to(device) + targets = targets.to(device) + outputs = model(inputs) + loss = loss_function(outputs, targets) + loss.backward() + optimizer.step() + scheduler.step() +``` + +## Add in 🤗 Accelerate + +To start using 🤗 Accelerate, first import and create an [`Accelerator`] instance: +```python +from accelerate import Accelerator + +accelerator = Accelerator() +``` +[`Accelerator`] is the main force behind utilizing all the possible options for distributed training! + +### Setting the right device + +The [`Accelerator`] class knows the right device to move any PyTorch object to at any time, so you should +change the definition of `device` to come from [`Accelerator`]: + +```diff +- device = 'cuda' ++ device = accelerator.device + model.to(device) +``` + +### Preparing your objects + +Next you need to pass all of the important objects related to training into [`~Accelerator.prepare`]. 🤗 Accelerate will +make sure everything is setup in the current environment for you to start training: + +``` +model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler +) +``` +These objects are returned in the same order they were sent in with. By default when using `device_placement=True`, all of the objects that can be sent to the right device will be. +If you need to work with data that isn't passed to [~Accelerator.prepare] but should be on the active device, you should pass in the `device` you made earlier. + + + + Accelerate will only prepare objects that inherit from their respective PyTorch classes (such as `torch.optim.Optimizer`). + + + +### Modifying the training loop + +Finally, three lines of code need to be changed in the training loop. 🤗 Accelerate's DataLoader classes will automatically handle the device placement by default, +and [`~Accelerator.backward`] should be used for performing the backward pass: + +```diff +- inputs = inputs.to(device) +- targets = targets.to(device) + outputs = model(inputs) + loss = loss_function(outputs, targets) +- loss.backward() ++ accelerator.backward(loss) +``` + +With that, your training loop is now ready to use 🤗 Accelerate! + +## The finished code + +Below is the final version of the converted code: + +```python +from accelerate import Accelerator + +accelerator = Accelerator() + +model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler +) + +for batch in training_dataloader: + optimizer.zero_grad() + inputs, targets = batch + outputs = model(inputs) + loss = loss_function(outputs, targets) + accelerator.backward(loss) + optimizer.step() + scheduler.step() +``` + diff --git a/testbed/huggingface__accelerate/docs/source/basic_tutorials/notebook.mdx b/testbed/huggingface__accelerate/docs/source/basic_tutorials/notebook.mdx new file mode 100644 index 0000000000000000000000000000000000000000..903a992d3332c236f9b93d0acfce6cb426d7211b --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/basic_tutorials/notebook.mdx @@ -0,0 +1,429 @@ + + +# Launching Multi-Node Training from a Jupyter Environment + +This tutorial teaches you how to fine tune a computer vision model with 🤗 Accelerate from a Jupyter Notebook on a distributed system. +You will also learn how to setup a few requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training. + + + + This tutorial is also available as a Jupyter Notebook [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_cv_example.ipynb) + + + +## Configuring the Environment + +Before any training can be performed, a 🤗 Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts: + +```bash +accelerate config +``` + +However, if general defaults are fine and you are *not* running on a TPU, 🤗Accelerate has a utility to quickly write your GPU configuration into a config file via [`utils.write_basic_config`]. + +The following code will restart Jupyter after writing the configuration, as CUDA code was called to perform this. + + + + CUDA can't be initialized more than once on a multi-node system. It's fine to debug in the notebook and have calls to CUDA, but in order to finally train a full cleanup and restart will need to be performed. + + + +```python +import os +from accelerate.utils import write_basic_config + +write_basic_config() # Write a config file +os._exit(00) # Restart the notebook +``` + +## Preparing the Dataset and Model + +Next you should prepare your dataset. As mentioned at earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU. + +If you do, it is recommended to put that specific code into a function and call that from within the notebook launcher interface, which will be shown later. + +Make sure the dataset is downloaded based on the directions [here](https://github.com/huggingface/accelerate/tree/main/examples#simple-vision-example) + +```python +import os, re, torch, PIL +import numpy as np + +from torch.optim.lr_scheduler import OneCycleLR +from torch.utils.data import DataLoader, Dataset +from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor + +from accelerate import Accelerator +from accelerate.utils import set_seed +from timm import create_model +``` + +First you need to create a function to extract the class name based on a filename: + +```python +import os + +data_dir = "../../images" +fnames = os.listdir(data_dir) +fname = fnames[0] +print(fname) +``` + +```python out +beagle_32.jpg +``` + +In the case here, the label is `beagle`. Using regex you can extract the label from the filename: + +```python +import re + + +def extract_label(fname): + stem = fname.split(os.path.sep)[-1] + return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] +``` + +```python +extract_label(fname) +``` + +And you can see it properly returned the right name for our file: + +```python out +"beagle" +``` + +Next a `Dataset` class should be made to handle grabbing the image and the label: + +```python +class PetsDataset(Dataset): + def __init__(self, file_names, image_transform=None, label_to_id=None): + self.file_names = file_names + self.image_transform = image_transform + self.label_to_id = label_to_id + + def __len__(self): + return len(self.file_names) + + def __getitem__(self, idx): + fname = self.file_names[idx] + raw_image = PIL.Image.open(fname) + image = raw_image.convert("RGB") + if self.image_transform is not None: + image = self.image_transform(image) + label = extract_label(fname) + if self.label_to_id is not None: + label = self.label_to_id[label] + return {"image": image, "label": label} +``` + +Now to build the dataset. Outside the training function you can find and declare all the filenames and labels and use them as references inside the +launched function: + +```python +fnames = [os.path.join("../../images", fname) for fname in fnames if fname.endswith(".jpg")] +``` + +Next gather all the labels: + +```python +all_labels = [extract_label(fname) for fname in fnames] +id_to_label = list(set(all_labels)) +id_to_label.sort() +label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} +``` + +Next, you should make a `get_dataloaders` function that will return your built dataloaders for you. As mentioned earlier, if data is automatically +sent to the GPU or a TPU device when building your `DataLoaders`, they must be built using this method. + +```python +def get_dataloaders(batch_size: int = 64): + "Builds a set of dataloaders with a batch_size" + random_perm = np.random.permutation(len(fnames)) + cut = int(0.8 * len(fnames)) + train_split = random_perm[:cut] + eval_split = random_perm[:cut] + + # For training a simple RandomResizedCrop will be used + train_tfm = Compose([RandomResizedCrop((224, 224), scale=(0.5, 1.0)), ToTensor()]) + train_dataset = PetsDataset([fnames[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id) + + # For evaluation a deterministic Resize will be used + eval_tfm = Compose([Resize((224, 224)), ToTensor()]) + eval_dataset = PetsDataset([fnames[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) + + # Instantiate dataloaders + train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) + eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size * 2, num_workers=4) + return train_dataloader, eval_dataloader +``` + +Finally, you should import the scheduler to be used later: + +```python +from torch.optim.lr_scheduler import CosineAnnealingLR +``` + +## Writing the Training Function + +Now you can build the training loop. [`notebook_launcher`] works by passing in a function to call that will be ran across the distributed system. + +Here is a basic training loop for the animal classification problem: + + + + The code has been split up to allow for explainations on each section. A full version that can be copy and pasted will be available at the end + + + + +```python +def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64): + set_seed(seed) + accelerator = Accelerator(mixed_precision=mixed_precision) +``` + +First you should set the seed and create an [`Accelerator`] object as early in the training loop as possible. + + + + If training on the TPU, your training loop should take in the model as a parameter and it should be instantiated + outside of the training loop function. See the [TPU best practices](../concept_guides/training_tpu) + to learn why + + + +Next you should build your dataloaders and create your model: + +```python + train_dataloader, eval_dataloader = get_dataloaders(batch_size) + model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) +``` + + + + You build the model here so that the seed also controls the new weight initialization + + + +As you are performing transfer learning in this example, the encoder of the model starts out frozen so the head of the model can be +trained only initially: + +```python + for param in model.parameters(): + param.requires_grad = False + for param in model.get_classifier().parameters(): + param.requires_grad = True +``` + +Normalizing the batches of images will make training a little faster: + +```python + mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None] + std = torch.tensor(model.default_cfg["std"])[None, :, None, None] +``` + +To make these constants available on the active device, you should set it to the Accelerator's device: + +```python + mean = mean.to(accelerator.device) + std = std.to(accelerator.device) +``` + +Next instantiate the rest of the PyTorch classes used for training: + +```python + optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25) + lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader)) +``` + +Before passing everything to [`~Accelerator.prepare`]. + + + + There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the prepare method. + + + +```python + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) +``` + +Now train the model: + +```python + for epoch in range(5): + model.train() + for batch in train_dataloader: + inputs = (batch["image"] - mean) / std + outputs = model(inputs) + loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) + accelerator.backward(loss) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() +``` + +The evaluation loop will look slightly different compared to the training loop. The number of elements passed as well as the overall +total accuracy of each batch will be added to two constants: + +```python + model.eval() + accurate = 0 + num_elems = 0 +``` + +Next you have the rest of your standard PyTorch loop: + +```python + for batch in eval_dataloader: + inputs = (batch["image"] - mean) / std + with torch.no_grad(): + outputs = model(inputs) + predictions = outputs.argmax(dim=-1) +``` + +Before finally the last major difference. + +When performing distributed evaluation, the predictions and labels need to be passed through +[`~Accelerator.gather`] so that all of the data is available on the current device and a properly calculated metric can be achieved: + +```python + accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch["label"]) + num_elems += accurate_preds.shape[0] + accurate += accurate_preds.long().sum() +``` + +Now you just need to calculate the actual metric for this problem, and you can print it on the main process using [`~Accelerator.print`]: + +```python + eval_metric = accurate.item() / num_elems + accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") +``` + +A full version of this training loop is available below: + +```python +def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64): + set_seed(seed) + # Initialize accelerator + accelerator = Accelerator(mixed_precision=mixed_precision) + # Build dataloaders + train_dataloader, eval_dataloader = get_dataloaders(batch_size) + + # Instantiate the model (you build the model here so that the seed also controls new weight initaliziations) + model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) + + # Freeze the base model + for param in model.parameters(): + param.requires_grad = False + for param in model.get_classifier().parameters(): + param.requires_grad = True + + # You can normalize the batches of images to be a bit faster + mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None] + std = torch.tensor(model.default_cfg["std"])[None, :, None, None] + + # To make this constant available on the active device, set it to the accelerator device + mean = mean.to(accelerator.device) + std = std.to(accelerator.device) + + # Intantiate the optimizer + optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25) + + # Instantiate the learning rate scheduler + lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader)) + + # Prepare everything + # There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # Now you train the model + for epoch in range(5): + model.train() + for batch in train_dataloader: + inputs = (batch["image"] - mean) / std + outputs = model(inputs) + loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) + accelerator.backward(loss) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + model.eval() + accurate = 0 + num_elems = 0 + for batch in eval_dataloader: + inputs = (batch["image"] - mean) / std + with torch.no_grad(): + outputs = model(inputs) + predictions = outputs.argmax(dim=-1) + accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch["label"]) + num_elems += accurate_preds.shape[0] + accurate += accurate_preds.long().sum() + + eval_metric = accurate.item() / num_elems + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") +``` + +## Using the notebook_launcher + +All that's left is to use the [`notebook_launcher`]. + +You pass in the function, the arguments (as a tuple), and the number of processes to train on. (See the [documentation](../package_reference/launchers) for more information) + +```python +from accelerate import notebook_launcher +``` + +```python +args = ("fp16", 42, 64) +notebook_launcher(training_loop, args, num_processes=2) +``` + +In the case of running on the TPU, it would look like so: + +```python +model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) + +args = (model, "fp16", 42, 64) +notebook_launcher(training_loop, args, num_processes=8) +``` + +As it's running it will print the progress as well as state how many devices you ran on. This tutorial was ran with two GPUs: + +```python out +Launching training on 2 GPUs. +epoch 0: 88.12 +epoch 1: 91.73 +epoch 2: 92.58 +epoch 3: 93.90 +epoch 4: 94.71 +``` + +And that's it! + +## Conclusion + +This notebook showed how to perform distributed training from inside of a Jupyter Notebook. Some key notes to remember: + +- Make sure to save any code that use CUDA (or CUDA imports) for the function passed to [`notebook_launcher`] +- Set the `num_processes` to be the number of devices used for training (such as number of GPUs, CPUs, TPUs, etc) +- If using the TPU, declare your model outside the training loop function \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/basic_tutorials/overview.mdx b/testbed/huggingface__accelerate/docs/source/basic_tutorials/overview.mdx new file mode 100644 index 0000000000000000000000000000000000000000..59ff9cbae8cdfef581a569533fb44ba03389191f --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/basic_tutorials/overview.mdx @@ -0,0 +1,21 @@ + + +# Overview + +Welcome to the 🤗 Accelerate tutorials! These introductory guides will help catch you up to speed on working with 🤗 Accelerate. +You'll learn how to modify your code to have it work with the API seamlessly, how to launch your script properly, +and more! + +These tutorials assume some basic knowledge of Python and familiarity with the PyTorch framework. + +If you have any questions about 🤗 Accelerate, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/accelerate/18). \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/concept_guides/deferring_execution.mdx b/testbed/huggingface__accelerate/docs/source/concept_guides/deferring_execution.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cb80ee0dab5d6c2c013b9a54eed4ee1a6759ad94 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/concept_guides/deferring_execution.mdx @@ -0,0 +1,107 @@ + + +# Deferring Executions + +When you run your usual script, instructions are executed in order. Using 🤗 Accelerate to deploy your script on several +GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be +faster than others. + +You might need to wait for all processes to have reached a certain point before executing a given instruction. For +instance, you shouldn't save a model before being sure every process is done with training, and you wouldn't want to +continue training before all the model weights have been loaded in. To do this, just write the following line in your code: + +``` +accelerator.wait_for_everyone() +``` + +This instruction will block all the processes that arrive first until all the other processes have reached that +point (if you run your script on just one GPU or CPU, this won't do anything). + +A few example cases for when to use this utility are listed below: + + + + Some of these are utilized with the [`~Accelerator.main_process_first`] context manager, which utilizes [`~Accelerator.wait_for_everyone`] to + run a particular set of code on the main process beforehand before triggering and launching the other processes + + + +## Downloading a Dataset + +When downloading a dataset, you should download it first on the main process and then loading the cached dataset in afterwards + + + + `load_dataset` will perform a lock under the hood to stop multiple downloads from happening at once, but if you are downloading something + not using this library you should use this method. + + + +```python +with accelerator.main_process_first(): + datasets = load_dataset("glue", "mrpc") +``` + +Under the hood this is the same as calling: + +```python +# First do something on the main process +if accelerator.is_main_process: + datasets = load_dataset("glue", "mrpc") +else: + accelerator.wait_for_everyone() + +# And then send it to the rest of them +if not accelerator.is_main_process: + datasets = load_dataset("glue", "mrpc") +else: + accelerator.wait_for_everyone() +``` + +## Saving the `state_dict` + +When saving the `state_dict` of the model, since you would normally save one file on just the main process +you should specify that: + +```python +if accelerator.is_main_process: + model = accelerator.unwrap_model(model) + torch.save(model.state_dict(), "weights.pth") +``` + +## Loading in the `state_dict` + +When loading in the `state_dict` to a model, optimizer, or scheduler, you should wait +for all workers to have the weights loaded in before moving on to training + +```python +with accelerator.main_process_first(): + state = torch.load("weights.pth") + model.load_state_dict(state) +``` + +## Applying a multi-worker CPU operation + +Applying a `map()` operation on multiple workers, such as tokenizing should be done on the +main process first, and then propagated to each one. + +```python +datasets = load_dataset("glue", "mrpc") + +with accelerator.main_process_first(): + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) +``` \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/concept_guides/gradient_synchronization.mdx b/testbed/huggingface__accelerate/docs/source/concept_guides/gradient_synchronization.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ea4de3d72308f1088c876882d65d0c323da77f1d --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/concept_guides/gradient_synchronization.mdx @@ -0,0 +1,119 @@ + + +# Gradient Synchronization + +PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system. +This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints +when using the `ddp` module. + +These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. +This happens when the model is wrapped with `DistributedDataParallel`: +```python +import torch.nn as nn +from torch.nn.parallel import DistributedDataParallel + +model = nn.Linear(10, 10) +ddp_model = DistributedDataParallel(model) +``` +In 🤗 Accelerate this conversion happens automatically when calling [`~Accelerator.prepare`] and passing in your model. + +```diff ++ from accelerate import Accelerator ++ accelerator = Accelerator() + import torch.nn as nn +- from torch.nn.parallel import DistributedDataParallel + + model = nn.Linear(10,10) ++ model = accelerator.prepare(model) +``` + +## The slowdown in gradient accumulation + +You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when +training in a distributed setup. But how does this risk slowing down your code? + +In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected +at specific points and these must also occur at roughly the same time before moving on. + +The most direct example is when you update all of the parameters in a model through `.backward()`. All instances of the model +need to have updated their gradients, collated, and updated again before moving onto the next batch of data. But when performing +gradient accumulation, you accumulate `n` losses and skip `.backward()` until `n` batches have been reached. This +can cause a significant slowdown since all the processes need to communicate with them more times than needed. How +can you avoid this overhead? + +## Solving the slowdown problem + +Since you are skipping these batches, their gradients do not need to be synchronized until the point where `.backward()` is actually called. +PyTorch cannot automagically tell when you need to do this, but they do provide a tool to help through the [`no_sync`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel.no_sync) context manager +that is added to your model after converting it to DDP. + +Under this context manager, PyTorch will skip synchronizing the gradients when `.backward()` is called, and the first call to `.backward()` outside this +context manager will trigger the synchronization. See an example below: +```python +ddp_model, dataloader = accelerator.prepare(model, dataloader) + +for index, batch in enumerate(dataloader): + inputs, targets = batch + # Trigger gradient synchronization on the last batch + if index != (len(dataloader) - 1): + with ddp_model.no_sync(): + # Gradients only accumulate + outputs = ddp_model(inputs) + loss = loss_func(outputs) + accelerator.backward(loss) + else: + # Gradients finally sync + outputs = ddp_model(inputs) + loss = loss_func(outputs) + accelerator.backward(loss) +``` + +In 🤗 Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!), +`ddp_model.no_sync` gets replaced with [`~Accelerator.no_sync`] and operates the same way: + +```diff + ddp_model, dataloader = accelerator.prepare(model, dataloader) + + for index, batch in enumerate(dataloader): + inputs, targets = batch + # Trigger gradient synchronization on the last batch + if index != (len(dataloader)-1): +- with ddp_model.no_sync(): ++ with accelerator.no_sync(model): + # Gradients only accumulate + outputs = ddp_model(inputs) + loss = loss_func(outputs, targets) + accelerator.backward(loss) + else: + # Gradients finally sync + outputs = ddp_model(inputs) + loss = loss_func(outputs) + accelerator.backward(loss) +``` + +As you may expect, the [`~Accelerator.accumulate`] function wraps around this conditional check by keeping track of the current batch number, leaving you with the final +gradient accumulation API: + +```python +ddp_model, dataloader = accelerator.prepare(model, dataloader) + +for batch in dataloader: + with accelerator.accumulate(model): + optimizer.zero_grad() + inputs, targets = batch + outputs = model(inputs) + loss = loss_function(outputs, targets) + accelerator.backward(loss) +``` + +As a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice. \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/concept_guides/performance.mdx b/testbed/huggingface__accelerate/docs/source/concept_guides/performance.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c974b322fec28c8e2c24b2f71747d845262f7a93 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/concept_guides/performance.mdx @@ -0,0 +1,91 @@ + + +# Comparing performance between different device setups + +Evaluating and comparing the performance from different setups can be quite tricky if you don't know what to look for. +For example, you cannot run the same script with the same batch size across TPU, multi-GPU, and single-GPU with Accelerate +and expect your results to line up. + +But why? + +There's three reasons for this that this tutorial will cover: + +1. **Setting the right seeds** +2. **Observed Batch Sizes** +3. **Learning Rates** + +## Setting the Seed + +While this issue has not come up as much, make sure to use [`utils.set_seed`] to fully set the seed in all distributed cases so training will be reproducable: + +```python +from accelerate import set_seed + +set_seed(42) +``` + +Why is this important? Under the hood this will set **5** different seed settings: + +```python + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + # ^^ safe to call this function even if cuda is not available + if is_tpu_available(): + xm.set_rng_state(seed) +``` + +The random state, numpy's state, torch, torch's cuda state, and if TPUs are available torch_xla's cuda state. + +## Observed Batch Sizes + +When training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**. What this entails is +a batch size of 64 on two GPUs is truly a batch size of 128. As a result, when testing on a single GPU this needs to be accounted for, +as well as similarly for TPUs. + +The below table can be used as a quick reference to try out different batch sizes: + + + +In this example there are two GPUs for "Multi-GPU" and a TPU pod with 8 workers + + + +| Single GPU Batch Size | Multi-GPU Equivalent Batch Size | TPU Equivalent Batch Size | +|-----------------------|---------------------------------|---------------------------| +| 256 | 128 | 32 | +| 128 | 64 | 16 | +| 64 | 32 | 8 | +| 32 | 16 | 4 | + +## Learning Rates + +As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/tlt-mi_archive/clara-train-sdk-v2.0/nvmidl/appendix/training_with_multiple_gpus.html)], the learning rate should be scaled *linearly* based on the number of devices present. The below +snippet shows doing so with Accelerate: + + + +Since users can have their own learning rate schedulers defined, we leave this up to the user to decide if they wish to scale their +learning rate or not. + + + +```python +learning_rate = 1e-3 +accelerator = Accelerator() +learning_rate *= accelerator.num_processes + +optimizer = AdamW(params=model.parameters(), lr=learning_rate) +``` + diff --git a/testbed/huggingface__accelerate/docs/source/concept_guides/training_tpu.mdx b/testbed/huggingface__accelerate/docs/source/concept_guides/training_tpu.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7fe54b14af07babc71fed6060091e636d16da1f9 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/concept_guides/training_tpu.mdx @@ -0,0 +1,164 @@ + + +# Training on TPUs with 🤗 Accelerate + +Training on TPUs can be slightly different than training on multi-gpu, even with 🤗 Accelerate. This guide aims to show you +where you should be careful and why, as well as the best practices in general. + +## Training in a Notebook + +The main carepoint when training on TPUs comes from the [`notebook_launcher`]. As mentioned in the [notebook tutorial](../usage_guides/notebook), you need to +restructure your training code into a function that can get passed to the [`notebook_launcher`] function and be careful about not declaring any tensors on the GPU. + +While on a TPU that last part is not as important, a critical part to understand is that when you launch code from a notebook you do so through a process called **forking**. +When launching from the command-line, you perform **spawning**, where a python process is not currently running and you *spawn* a new process in. Since your Jupyter notebook is already +utilizing a python process, you need to *fork* a new process from it to launch your code. + +Where this becomes important is in regards to declaring your model. On forked TPU processes, it is recommended that you instantiate your model *once* and pass this into your +training function. This is different than training on GPUs where you create `n` models that have their gradients synced and back-propagated at certain moments. Instead one +model instance is shared between all the nodes and it is passed back and forth. This is important especially when training on low-resource TPUs such as those provided in Kaggle kernels or +on Google Colaboratory. + +Below is an example of a training function passed to the [`notebook_launcher`] if training on CPUs or GPUs: + + + + This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate/simple_nlp_example.ipynb) with slight + modifications for the sake of simplicity + + + +```python +def training_function(): + # Initialize accelerator + accelerator = Accelerator() + model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2) + train_dataloader, eval_dataloader = create_dataloaders( + train_batch_size=hyperparameters["train_batch_size"], eval_batch_size=hyperparameters["eval_batch_size"] + ) + + # Instantiate optimizer + optimizer = AdamW(params=model.parameters(), lr=hyperparameters["learning_rate"]) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader + ) + + num_epochs = hyperparameters["num_epochs"] + # Now we train the model + for epoch in range(num_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + outputs = model(**batch) + loss = outputs.loss + accelerator.backward(loss) + + optimizer.step() + optimizer.zero_grad() +``` + +```python +from accelerate import notebook_launcher + +notebook_launcher(training_function) +``` + + + + The `notebook_launcher` will default to 8 processes if 🤗 Accelerate has been configured for a TPU + + + +If you use this example and declare the model *inside* the training loop, then on a low-resource system you will potentially see an error +like: + +``` +ProcessExitedException: process 0 terminated with signal SIGSEGV +``` + +This error is *extremely* cryptic but the basic explanation is you ran out of system RAM. You can avoid this entirely by reconfiguring the training function to +accept a single `model` argument, and declare it in an outside cell: + +```python +# In another Jupyter cell +model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2) +``` + +```diff ++ def training_function(model): + # Initialize accelerator + accelerator = Accelerator() +- model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2) + train_dataloader, eval_dataloader = create_dataloaders( + train_batch_size=hyperparameters["train_batch_size"], eval_batch_size=hyperparameters["eval_batch_size"] + ) + ... +``` + +And finally calling the training function with: + +```diff + from accelerate import notebook_launcher +- notebook_launcher(training_function) ++ notebook_launcher(training_function, (model,)) +``` + + + + The above workaround is only needed when launching a TPU instance from a Jupyter Notebook on a low-resource server such as Google Colaboratory or Kaggle. If + using a script or launching on a much beefier server declaring the model beforehand is not needed. + + + +## Mixed Precision and Global Variables + +As mentioned in the [mixed precision tutorial](../usage_guides/mixed_precision), 🤗 Accelerate supports fp16 and bf16, both of which can be used on TPUs. +That being said, ideally `bf16` should be utilized as it is extremely efficient to use. + +There are two "layers" when using `bf16` and 🤗 Accelerate on TPUs, at the base level and at the operation level. + +At the base level, this is enabled when passing `mixed_precision="bf16"` to `Accelerator`, such as: +```python +accelerator = Accelerator(mixed_precision="bf16") +``` +By default this will cast `torch.float` and `torch.double` to `bfloat16` on TPUs. +The specific configuration being set is an environmental variable of `XLA_USE_BF16` is set to `1`. + +There is a further configuration you can perform which is setting the `XLA_DOWNCAST_BF16` environmental variable. If set to `1`, then +`torch.float` is `bfloat16` and `torch.double` is `float32`. + +This is performed in the `Accelerator` object when passing `downcast_bf16=True`: +```python +accelerator = Accelerator(mixed_precision="bf16", downcast_bf16=True) +``` + +Using downcasting instead of bf16 everywhere is good for when you are trying to calculate metrics, log values, and more where raw bf16 tensors would be unusable. + +## Training Times on TPUs + +As you launch your script, you may notice that training seems exceptionally slow at first. This is because TPUs +first run through a few batches of data to see how much memory to allocate before finally utilizing this configured +memory allocation extremely efficiently. + +If you notice that your evaluation code to calculate the metrics of your model takes longer due to a larger batch size being used, +it is recommended to keep the batch size the same as the training data if it is too slow. Otherwise the memory will reallocate to this +new batch size after the first few iterations. + + + + Just because the memory is allocated does not mean it will be used or that the batch size will increase when going back to your training dataloader. + + \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/index.mdx b/testbed/huggingface__accelerate/docs/source/index.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a87bd6e32574ed03a036dde4e5d2be9e43b1e3a9 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/index.mdx @@ -0,0 +1,71 @@ + + +# Accelerate + +🤗 Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable. + +```diff ++ from accelerate import Accelerator ++ accelerator = Accelerator() + ++ model, optimizer, training_dataloader, scheduler = accelerator.prepare( ++ model, optimizer, training_dataloader, scheduler ++ ) + + for batch in training_dataloader: + optimizer.zero_grad() + inputs, targets = batch + inputs = inputs.to(device) + targets = targets.to(device) + outputs = model(inputs) + loss = loss_function(outputs, targets) ++ accelerator.backward(loss) + optimizer.step() + scheduler.step() +``` + +Built on `torch_xla` and `torch.distributed`, 🤗 Accelerate takes care of the heavy lifting, so you don't have to write any custom code to adapt to these platforms. +Convert existing codebases to utilize [DeepSpeed](usage_guides/deepspeed), perform [fully sharded data parallelism](usage_guides/fsdp), and have automatic support for mixed-precision training! + + + + To get a better idea of this process, make sure to check out the [Tutorials](basic_tutorials/overview)! + + + + +This code can then be launched on any system through Accelerate's CLI interface: +```bash +accelerate launch {my_script.py} +``` + + diff --git a/testbed/huggingface__accelerate/docs/source/package_reference/accelerator.mdx b/testbed/huggingface__accelerate/docs/source/package_reference/accelerator.mdx new file mode 100644 index 0000000000000000000000000000000000000000..fb20f1a66638d944f7c6cb8283ffc3209ecbbb08 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/package_reference/accelerator.mdx @@ -0,0 +1,163 @@ + + +# Accelerator + +The [`Accelerator`] is the main class provided by 🤗 Accelerate. +It serves at the main entrypoint for the API. + +## Quick adaptation of your code + +To quickly adapt your script to work on any kind of setup with 🤗 Accelerate just: + +1. Initialize an [`Accelerator`] object (that we will call `accelerator` throughout this page) as early as possible in your script. +2. Pass your dataloader(s), model(s), optimizer(s), and scheduler(s) to the [`~Accelerator.prepare`] method. +3. Remove all the `.cuda()` or `.to(device)` from your code and let the `accelerator` handle the device placement for you. + + + + Step three is optional, but considered a best practice. + + + +4. Replace `loss.backward()` in your code with `accelerator.backward(loss)` +5. Gather your predictions and labels before storing them or using them for metric computation using [`~Accelerator.gather`] + + + + Step five is mandatory when using distributed evaluation + + + +In most cases this is all that is needed. The next section lists a few more advanced use cases and nice features +you should search for and replace by the corresponding methods of your `accelerator`: + +## Advanced recommendations + +### Printing + +`print` statements should be replaced by [`~Accelerator.print`] to be printed once per process + +```diff +- print("My thing I want to print!") ++ accelerator.print("My thing I want to print!") +``` + +### Executing processes + +#### Once on a single server + +For statements that should be executed once per server, use [`~Accelerator.is_local_main_process`]: + +```python +if accelerator.is_local_main_process: + do_thing_once_per_server() +``` + +A function can be wrapped using the [`~Accelerator.on_local_main_process`] function to achieve the same +behavior on a function's execution: + +```python +@accelerator.on_local_main_process +def do_my_thing(): + "Something done once per server" + do_thing_once_per_server() +``` + +#### Only ever once across all servers + +For statements that should only ever be executed once, use [`~Accelerator.is_main_process`]: + +```python +if accelerator.is_main_process: + do_thing_once() +``` + +A function can be wrapped using the [`~Accelerator.on_main_process`] function to achieve the same +behavior on a function's execution: + +```python +@accelerator.on_main_process +def do_my_thing(): + "Something done once per server" + do_thing_once() +``` + +#### On specific processes + +If a function should be ran on a specific overall or local process index, there are similar decorators +to achieve this: + +```python +@accelerator.on_local_process(local_process_idx=0) +def do_my_thing(): + "Something done on process index 0 on each server" + do_thing_on_index_zero_on_each_server() +``` + +```python +@accelerator.on_process(process_index=0) +def do_my_thing(): + "Something done on process index 0" + do_thing_on_index_zero() +``` + +### Synchronicity control + +Use [`~Accelerator.wait_for_everyone`] to make sure all processes join that point before continuing. (Useful before a model save for instance) + +### Saving and loading + +Use [`~Accelerator.unwrap_model`] before saving to remove all special model wrappers added during the distributed process. + +```python +model = MyModel() +model = accelerator.prepare(model) +# Unwrap +model = accelerator.unwrap_model(model) +``` + +Use [`~Accelerator.save`] instead of `torch.save`: + +```diff + state_dict = model.state_dict() +- torch.save(state_dict, "my_state.pkl") ++ accelerator.save(state_dict, "my_state.pkl") +``` + +### Operations + +Use [`~Accelerator.clip_grad_norm_`] instead of ``torch.nn.utils.clip_grad_norm_`` and [`~Accelerator.clip_grad_value_`] instead of ``torch.nn.utils.clip_grad_value`` + +### Gradient Accumulation + +To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a gradient_accumulation_steps. +This will also automatically ensure the gradients are synced or unsynced when on +multi-device training, check if the step should actually be performed, and auto-scale the loss: + +```diff +- accelerator = Accelerator() ++ accelerator = Accelerator(gradient_accumulation_steps=2) + + for (input, label) in training_dataloader: ++ with accelerator.accumulate(model): + predictions = model(input) + loss = loss_function(predictions, labels) + accelerator.backward(loss) + optimizer.step() + scheduler.step() + optimizer.zero_grad() +``` + +## Overall API documentation: + +[[autodoc]] Accelerator \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/package_reference/big_modeling.mdx b/testbed/huggingface__accelerate/docs/source/package_reference/big_modeling.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e54ac08076c44e98c93e53ee75acb90ab7c3cbf9 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/package_reference/big_modeling.mdx @@ -0,0 +1,41 @@ + + +# Working with large models + +## Dispatching and Offloading Models + +[[autodoc]] big_modeling.init_empty_weights +[[autodoc]] big_modeling.cpu_offload +[[autodoc]] big_modeling.disk_offload +[[autodoc]] big_modeling.dispatch_model +[[autodoc]] big_modeling.load_checkpoint_and_dispatch + +## Model Hooks + +### Hook Classes + +[[autodoc]] hooks.ModelHook +[[autodoc]] hooks.AlignDevicesHook +[[autodoc]] hooks.SequentialHook + +### Adding Hooks + +[[autodoc]] hooks.add_hook_to_module +[[autodoc]] hooks.attach_execution_device_hook +[[autodoc]] hooks.attach_align_device_hook +[[autodoc]] hooks.attach_align_device_hook_on_blocks + +### Removing Hooks + +[[autodoc]] hooks.remove_hook_from_module +[[autodoc]] hooks.remove_hook_from_submodules \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/package_reference/cli.mdx b/testbed/huggingface__accelerate/docs/source/package_reference/cli.mdx new file mode 100644 index 0000000000000000000000000000000000000000..402d5778142c65915167f6e699cc3ca0b70eb9f3 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/package_reference/cli.mdx @@ -0,0 +1,273 @@ + + +# The Command Line + +Below is a list of all the available commands 🤗 Accelerate with their parameters + +## accelerate config + +**Command**: + +`accelerate config` or `accelerate-config` + +Launches a series of prompts to create and save a `default_config.yml` configuration file for your training system. Should +always be ran first on your machine. + +**Usage**: + +```bash +accelerate config [arguments] +``` + +**Optional Arguments**: +* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content + of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory + (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`. +* `-h`, `--help` (`bool`) -- Show a help message and exit + +## accelerate config default + +**Command**: + +`accelerate config default` or `accelerate-config default` + +Create a default config file for Accelerate with only a few flags set. + +**Usage**: + +```bash +accelerate config default [arguments] +``` + +**Optional Arguments**: +* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content + of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory + (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`. + +* `-h`, `--help` (`bool`) -- Show a help message and exit +* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later. + +## accelerate config update + +**Command**: + +`accelerate config update` or `accelerate-config update` + +Update an existing config file with the latest defaults while maintaining the old configuration. + +**Usage**: + +```bash +accelerate config update [arguments] +``` + +**Optional Arguments**: +* `--config_file CONFIG_FILE` (`str`) -- The path to the config file to update. Will default to a file named default_config.yaml in the cache location, which is the content + of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory + (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`. + +* `-h`, `--help` (`bool`) -- Show a help message and exit + + +## accelerate env + +**Command**: + +`accelerate env` or `accelerate-env` + +Lists the contents of the passed 🤗 Accelerate configuration file. Should always be used when opening an issue on the [GitHub repository](https://github.com/huggingface/accelerate). + +**Usage**: + +```bash +accelerate env [arguments] +``` + +**Optional Arguments**: +* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content + of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory + (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`. +* `-h`, `--help` (`bool`) -- Show a help message and exit + +## accelerate launch + +**Command**: + +`accelerate launch` or `accelerate-launch` + +Launches a specified script on a distributed system with the right parameters. + +**Usage**: + +```bash +accelerate launch [arguments] {training_script} --{training_script-argument-1} --{training_script-argument-2} ... +``` + +**Positional Arguments**: + +- `{training_script}` -- The full path to the script to be launched in parallel +- `--{training_script-argument-1}` -- Arguments of the training script + +**Optional Arguments**: + +* `-h`, `--help` (`bool`) -- Show a help message and exit +* `--config_file CONFIG_FILE` (`str`)-- The config file to use for the default values in the launching script. +* `-m`, `--module` (`bool`) -- Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'. +* `--no_python` (`bool`) -- Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script. +* `--debug` (`bool`) -- Whether to print out the torch.distributed stack trace when something fails. +* `-q`, `--quiet` (`bool`) -- Silence subprocess errors from the launch stack trace to only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations). + + +The rest of these arguments are configured through `accelerate config` and are read in from the specified `--config_file` (or default configuration) for their +values. They can also be passed in manually. + +**Hardware Selection Arguments**: + +* `--cpu` (`bool`) -- Whether or not to force the training on the CPU. +* `--multi_gpu` (`bool`) -- Whether or not this should launch a distributed GPU training. +* `--mps` (`bool`) -- Whether or not this should use MPS-enabled GPU device on MacOS machines. +* `--tpu` (`bool`) -- Whether or not this should launch a TPU training. + +**Resource Selection Arguments**: + +The following arguments are useful for fine-tuning how available hardware should be used + +* `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later. +* `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel. +* `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training. +* `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance. + +**Training Paradigm Arguments**: + +The following arguments are useful for selecting which training paradigm to use. + +* `--use_deepspeed` (`bool`) -- Whether or not to use DeepSpeed for training. +* `--use_fsdp` (`bool`) -- Whether or not to use FullyShardedDataParallel for training. +* `--use_megatron_lm` (`bool`) -- Whether or not to use Megatron-LM for training. + +**Distributed GPU Arguments**: + +The following arguments are only useful when `multi_gpu` is passed or multi-gpu training is configured through `accelerate config`: + +* `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-seperated list +* `--same_network` (`bool`) -- Whether all machines used for multinode training exist on the same local network. +* `--machine_rank MACHINE_RANK` (`int`) -- The rank of the machine on which this script is launched. +* `--main_process_ip MAIN_PROCESS_IP` (`str`) -- The IP address of the machine of rank 0. +* `--main_process_port MAIN_PROCESS_PORT` (`int`) -- The port to use to communicate with the machine of rank 0. +* `--rdzv_conf` (`str`) -- Additional rendezvous configuration (=,=,...). +* `--max_restarts` (`int`) -- Maximum number of worker group restarts before failing. +* `--monitor_interval` (`float`) -- Interval, in seconds, to monitor the state of workers. + +**TPU Arguments**: + +The following arguments are only useful when `tpu` is passed or TPU training is configured through `accelerate config`: + +* `--main_training_function MAIN_TRAINING_FUNCTION` (`str`) -- The name of the main function to be executed in your script. +* `--downcast_bf16` (`bool`) -- Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32. + +**DeepSpeed Arguments**: + +The following arguments are only useful when `use_deepspeed` is passed or `deepspeed` is configured through `accelerate config`: + +* `--deepspeed_config_file` (`str`) -- DeepSpeed config file. +* `--zero_stage` (`int`) -- DeepSpeed's ZeRO optimization stage. +* `--offload_optimizer_device` (`str`) -- Decides where (none|cpu|nvme) to offload optimizer states. +* `--offload_param_device` (`str`) -- Decides where (none|cpu|nvme) to offload parameters. +* `--gradient_accumulation_steps` (`int`) -- No of gradient_accumulation_steps used in your training script. +* `--gradient_clipping` (`float`) -- Gradient clipping value used in your training script. +* `--zero3_init_flag` (`str`) -- Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with DeepSpeed ZeRO Stage-3. +* `--zero3_save_16bit_model` (`str`) -- Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. Only applicable with DeepSpeed ZeRO Stage-3. +* `--deepspeed_hostfile` (`str`) -- DeepSpeed hostfile for configuring multi-node compute resources. +* `--deepspeed_exclusion_filter` (`str`) -- DeepSpeed exclusion filter string when using mutli-node setup. +* `--deepspeed_inclusion_filter` (`str`) -- DeepSpeed inclusion filter string when using mutli-node setup. +* `--deepspeed_multinode_launcher` (`str`) -- DeepSpeed multi-node launcher to use. + +**Fully Sharded Data Parallelism Arguments**: + +The following arguments are only useful when `use_fdsp` is passed or Fully Sharded Data Parallelism is configured through `accelerate config`: + +* `--fsdp_offload_params` (`str`) -- Decides Whether (true|false) to offload parameters and gradients to CPU. +* `--fsdp_min_num_params` (`int`) -- FSDP's minimum number of parameters for Default Auto Wrapping. +* `--fsdp_sharding_strategy` (`int`) -- FSDP's Sharding Strategy. +* `--fsdp_auto_wrap_policy` (`str`) -- FSDP's auto wrap policy. +* `--fsdp_transformer_layer_cls_to_wrap` (`str`) -- Transformer layer class name (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` ... +* `--fsdp_backward_prefetch_policy` (`str`) -- FSDP's backward prefetch policy. +* `--fsdp_state_dict_type` (`str`) -- FSDP's state dict type. + +**Megatron-LM Arguments**: + +The following arguments are only useful when `use_megatron_lm` is passed or Megatron-LM is configured through `accelerate config`: + +* `--megatron_lm_tp_degree` (``) -- Megatron-LM's Tensor Parallelism (TP) degree. +* `--megatron_lm_pp_degree` (``) -- Megatron-LM's Pipeline Parallelism (PP) degree. +* `--megatron_lm_num_micro_batches` (``) -- Megatron-LM's number of micro batches when PP degree > 1. +* `--megatron_lm_sequence_parallelism` (``) -- Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. +* `--megatron_lm_recompute_activations` (``) -- Decides Whether (true|false) to enable Selective Activation Recomputation. +* `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Pralellel (DP) ranks. +* `--megatron_lm_gradient_clipping` (``) -- Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). + +**AWS SageMaker Arguments**: + +The following arguments are only useful when training in SageMaker + +* `--aws_access_key_id AWS_ACCESS_KEY_ID` (`str`) -- The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job +* `--aws_secret_access_key AWS_SECRET_ACCESS_KEY` (`str`) -- The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job + +## accelerate tpu-config + +`accelerate tpu-config` + +**Usage**: + +```bash +accelerate tpu-config [arguments] +``` + +**Optional Arguments**: +* `-h`, `--help` (`bool`) -- Show a help message and exit + +**Config Arguments**: + +Arguments that can be configured through `accelerate config`. + +* `--config_file` (`str`) -- Path to the config file to use for accelerate. +* `--tpu_name` (`str`) -- The name of the TPU to use. If not specified, will use the TPU specified in the config file. +* `--tpu_zone` (`str`) -- The zone of the TPU to use. If not specified, will use the zone specified in the config file. + +**TPU Arguments**: + +Arguments for options ran inside the TPU. + +* `--command_file` (`str`) -- The path to the file containing the commands to run on the pod on startup. +* `--command` (`str`) -- A command to run on the pod. Can be passed multiple times. +* `--install_accelerate` (`bool`) -- Whether to install accelerate on the pod. Defaults to False. +* `--accelerate_version` (`str`) -- The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub. +* `--debug` (`bool`) -- If set, will print the command that would be run instead of running it. + +## accelerate test + +`accelerate test` or `accelerate-test` + +Runs `accelerate/test_utils/test_script.py` to verify that 🤗 Accelerate has been properly configured on your system and runs. + +**Usage**: + +```bash +accelerate test [arguments] +``` + +**Optional Arguments**: +* `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content + of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory + (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`. +* `-h`, `--help` (`bool`) -- Show a help message and exit diff --git a/testbed/huggingface__accelerate/docs/source/package_reference/deepspeed.mdx b/testbed/huggingface__accelerate/docs/source/package_reference/deepspeed.mdx new file mode 100644 index 0000000000000000000000000000000000000000..fee886f4ed2127ad5e85448dab5b4526958f2052 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/package_reference/deepspeed.mdx @@ -0,0 +1,25 @@ + + +# Utilities for DeepSpeed + +[[autodoc]] utils.DeepSpeedPlugin + +[[autodoc]] utils.DummyOptim + +[[autodoc]] utils.DummyScheduler + +[[autodoc]] utils.DeepSpeedEngineWrapper + +[[autodoc]] utils.DeepSpeedOptimizerWrapper + +[[autodoc]] utils.DeepSpeedSchedulerWrapper diff --git a/testbed/huggingface__accelerate/docs/source/package_reference/kwargs.mdx b/testbed/huggingface__accelerate/docs/source/package_reference/kwargs.mdx new file mode 100644 index 0000000000000000000000000000000000000000..04f58af37580bddc498b50a618c041c4c5581b2e --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/package_reference/kwargs.mdx @@ -0,0 +1,29 @@ + + +# Kwargs Handlers + +The following objects can be passed to the main [`Accelerator`] to customize how some PyTorch objects +related to distributed training or mixed precision are created. + + +## DistributedDataParallelKwargs + +[[autodoc]] DistributedDataParallelKwargs + +## GradScalerKwargs + +[[autodoc]] GradScalerKwargs + +## InitProcessGroupKwargs + +[[autodoc]] InitProcessGroupKwargs diff --git a/testbed/huggingface__accelerate/docs/source/package_reference/launchers.mdx b/testbed/huggingface__accelerate/docs/source/package_reference/launchers.mdx new file mode 100644 index 0000000000000000000000000000000000000000..6f37f0af01896f2c313cd9b34ab03fee9d98f906 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/package_reference/launchers.mdx @@ -0,0 +1,19 @@ + + +# Launchers + +Functions for launching training on distributed processes. + + +[[autodoc]] accelerate.notebook_launcher +[[autodoc]] accelerate.debug_launcher \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/package_reference/logging.mdx b/testbed/huggingface__accelerate/docs/source/package_reference/logging.mdx new file mode 100644 index 0000000000000000000000000000000000000000..85e844690d7fde51d8394961958c16807f2c43ff --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/package_reference/logging.mdx @@ -0,0 +1,34 @@ + + +# Logging with Accelerate + +Accelerate has its own logging utility to handle logging while in a distributed system. +To utilize this replace cases of `logging` with `accelerate.logging`: +```diff +- import logging ++ from accelerate.logging import get_logger +- logger = logging.getLogger(__name__) ++ logger = get_logger(__name__) +``` + +## Setting the log level + +The log level can be set with the `ACCELERATE_LOG_LEVEL` environment variable or by passing +`log_level` to `get_logger`: +```python +from accelerate.logging import get_logger + +logger = get_logger(__name__, log_level="INFO") +``` + +[[autodoc]] logging.get_logger \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/package_reference/megatron_lm.mdx b/testbed/huggingface__accelerate/docs/source/package_reference/megatron_lm.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b59768bec91e5b9c42574552d78d4cd273619a21 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/package_reference/megatron_lm.mdx @@ -0,0 +1,29 @@ + + +# Utilities for Megatron-LM + +[[autodoc]] utils.MegatronLMPlugin + +[[autodoc]] utils.MegatronLMDummyScheduler + +[[autodoc]] utils.MegatronLMDummyDataLoader + +[[autodoc]] utils.AbstractTrainStep + +[[autodoc]] utils.GPTTrainStep + +[[autodoc]] utils.BertTrainStep + +[[autodoc]] utils.T5TrainStep + +[[autodoc]] utils.avg_losses_across_data_parallel_group diff --git a/testbed/huggingface__accelerate/docs/source/package_reference/state.mdx b/testbed/huggingface__accelerate/docs/source/package_reference/state.mdx new file mode 100644 index 0000000000000000000000000000000000000000..f1f5ef9743477e76889ae2d328a45fed9ff6e950 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/package_reference/state.mdx @@ -0,0 +1,23 @@ + + +# Stateful Classes + +Below are variations of a [singleton class](https://en.wikipedia.org/wiki/Singleton_pattern) in the sense that all +instances share the same state, which is initialized on the first instantiation. + +These classes are immutable and store information about certain configurations or +states. + +[[autodoc]] state.AcceleratorState + +[[autodoc]] state.GradientState \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/package_reference/torch_wrappers.mdx b/testbed/huggingface__accelerate/docs/source/package_reference/torch_wrappers.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4ac8ae572e1b54b57b3b4d92e893596392c8d43b --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/package_reference/torch_wrappers.mdx @@ -0,0 +1,33 @@ + + +# Wrapper classes for torch Dataloaders, Optimizers, and Schedulers + +The internal classes Accelerate uses to prepare objects for distributed training +when calling [`~Accelerator.prepare`]. + +## Datasets and DataLoaders + +[[autodoc]] data_loader.prepare_data_loader + +[[autodoc]] data_loader.BatchSamplerShard +[[autodoc]] data_loader.IterableDatasetShard +[[autodoc]] data_loader.DataLoaderShard +[[autodoc]] data_loader.DataLoaderDispatcher + +## Optimizers + +[[autodoc]] optimizer.AcceleratedOptimizer + +## Schedulers + +[[autodoc]] scheduler.AcceleratedScheduler \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/package_reference/tracking.mdx b/testbed/huggingface__accelerate/docs/source/package_reference/tracking.mdx new file mode 100644 index 0000000000000000000000000000000000000000..5e7a97f24cd5d370a135f3108ca66340e7dbf469 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/package_reference/tracking.mdx @@ -0,0 +1,26 @@ + + +# Experiment Tracking + +## The Base Tracker Class + +[[autodoc]] tracking.GeneralTracker + +## Integrated Trackers + +[[autodoc]] tracking.TensorBoardTracker + - __init__ +[[autodoc]] tracking.WandBTracker + - __init__ +[[autodoc]] tracking.CometMLTracker + - __init__ diff --git a/testbed/huggingface__accelerate/docs/source/package_reference/utilities.mdx b/testbed/huggingface__accelerate/docs/source/package_reference/utilities.mdx new file mode 100644 index 0000000000000000000000000000000000000000..9ebb60d346c31eac71aaf1b92b5889684ac4e25a --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/package_reference/utilities.mdx @@ -0,0 +1,104 @@ + + +# Helpful Utilities + +Below are a variety of utility functions that 🤗 Accelerate provides, broken down by use-case. + +## Data Classes + +These are basic dataclasses used throughout 🤗 Accelerate and they can be passed in as parameters. + +[[autodoc]] utils.DistributedType + +[[autodoc]] utils.LoggerType + +[[autodoc]] utils.PrecisionType + +[[autodoc]] utils.ProjectConfiguration + +## Data Manipulation and Operations + +These include data operations that mimic the same `torch` ops but can be used on distributed processes. + +[[autodoc]] utils.broadcast + +[[autodoc]] utils.concatenate + +[[autodoc]] utils.gather + +[[autodoc]] utils.pad_across_processes + +[[autodoc]] utils.reduce + +[[autodoc]] utils.send_to_device + +## Environment Checks + +These functionalities check the state of the current working environment including information about the operating system itself, what it can support, and if particular dependencies are installed. + +[[autodoc]] utils.is_bf16_available + +[[autodoc]] utils.is_torch_version + +[[autodoc]] utils.is_tpu_available + +## Environment Configuration + +[[autodoc]] utils.write_basic_config + +When setting up 🤗 Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as an alternative for quick configuration. + +## Memory + +[[autodoc]] utils.get_max_memory + +[[autodoc]] utils.find_executable_batch_size + +## Modeling + +These utilities relate to interacting with PyTorch models + +[[autodoc]] utils.extract_model_from_parallel + +[[autodoc]] utils.get_max_layer_size + +[[autodoc]] utils.offload_state_dict + + +## Parallel + +These include general utilities that should be used when working in parallel. + +[[autodoc]] utils.extract_model_from_parallel + +[[autodoc]] utils.save + +[[autodoc]] utils.wait_for_everyone + + +## Random + +These utilities relate to setting and synchronizing of all the random states. + +[[autodoc]] utils.set_seed + +[[autodoc]] utils.synchronize_rng_state + +[[autodoc]] utils.synchronize_rng_states + + +## PyTorch XLA + +These include utilities that are useful while using PyTorch with XLA. + +[[autodoc]] utils.install_xla diff --git a/testbed/huggingface__accelerate/docs/source/quicktour.mdx b/testbed/huggingface__accelerate/docs/source/quicktour.mdx new file mode 100644 index 0000000000000000000000000000000000000000..13a8d7afa887ff4fda505be41c4fea79068ff981 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/quicktour.mdx @@ -0,0 +1,505 @@ + + +# Quick tour + +Let's have a look at the 🤗 Accelerate main features and traps to avoid. + +## Main use + +To use 🤗 Accelerate in your own script, you have to change four things: + +1. Import the [`Accelerator`] main class and instantiate one in an `accelerator` object: + +```python +from accelerate import Accelerator + +accelerator = Accelerator() +``` + +This should happen as early as possible in your training script as it will initialize everything necessary for +distributed training. You don't need to indicate the kind of environment you are in (just one machine with a GPU, one +machines with several GPUs, several machines with multiple GPUs or a TPU), the library will detect this automatically. + +2. Remove the call `.to(device)` or `.cuda()` for your model and input data. The `accelerator` object +will handle this for you and place all those objects on the right device for you. If you know what you're doing, you +can leave those `.to(device)` calls but you should use the device provided by the `accelerator` object: +`accelerator.device`. + +To fully deactivate the automatic device placement, pass along `device_placement=False` when initializing your +[`Accelerator`]. + + + + If you place your objects manually on the proper device, be careful to create your optimizer after putting your + model on `accelerator.device` or your training will fail on TPU. + + + +3. Pass all objects relevant to training (optimizer, model, training dataloader, learning rate scheduler) to the +[`~Accelerator.prepare`] method. This will make sure everything is ready for training. + +```python +model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, lr_scheduler +) +``` + +In particular, your training dataloader will be sharded across all GPUs/TPU cores available so that each one sees a +different portion of the training dataset. Also, the random states of all processes will be synchronized at the +beginning of each iteration through your dataloader, to make sure the data is shuffled the same way (if you decided to +use `shuffle=True` or any kind of random sampler). + + + + The actual batch size for your training will be the number of devices used multiplied by the batch size you set in + your script: for instance training on 4 GPUs with a batch size of 16 set when creating the training dataloader will + train at an actual batch size of 64. + + + +Alternatively, you can use the option `split_batches=True` when creating initializing your +[`Accelerator`], in which case the batch size will always stay the same, whether your run your +script on 1, 2, 4 or 64 GPUs. + +You should execute this instruction as soon as all objects for training are created, before starting your actual +training loop. + + + + You should only pass the learning rate scheduler to [`~Accelerator.prepare`] when the scheduler needs to be stepped + at each optimizer step. + + + + + + Your training dataloader may change length when going through this method: if you run on X GPUs, it will have its + length divided by X (since your actual batch size will be multiplied by X), unless you set + `split_batches=True`. + + + +Any instruction using your training dataloader length (for instance if you want to log the number of total training +steps) should go after the call to [`~Accelerator.prepare`]. + +You can perfectly send your dataloader to [`~Accelerator.prepare`] on its own, but it's best to send the +model and optimizer to [`~Accelerator.prepare`] together. + +You may or may not want to send your validation dataloader to [`~Accelerator.prepare`], depending on +whether you want to run distributed evaluation or not (see below). + +4. Replace the line `loss.backward()` by `accelerator.backward(loss)`. + +And you're all set! With all these changes, your script will run on your local machine as well as on multiple GPUs or a +TPU! You can either use your favorite tool to launch the distributed training, or you can use the 🤗 Accelerate +launcher. + + +## Distributed evaluation + +You can perform regular evaluation in your training script, if you leave your validation dataloader out of the +[`~Accelerator.prepare`] method. In this case, you will need to put the input data on the +`accelerator.device` manually. + +To perform distributed evaluation, send along your validation dataloader to the [`~Accelerator.prepare`] +method: + +```python +validation_dataloader = accelerator.prepare(validation_dataloader) +``` + +As for your training dataloader, it will mean that (should you run your script on multiple devices) each device will +only see part of the evaluation data. This means you will need to group your predictions together. This is very easy to +do with the [`~Accelerator.gather_for_metrics`] method. + +```python +for inputs, targets in validation_dataloader: + predictions = model(inputs) + # Gather all predictions and targets + all_predictions, all_targets = accelerator.gather_for_metrics((predictions, targets)) + # Example of use with a *Datasets.Metric* + metric.add_batch(all_predictions, all_targets) +``` + + + + Similar to the training dataloader, passing your validation dataloader through + [`~Accelerator.prepare`] may change it: if you run on X GPUs, it will have its length divided by X + (since your actual batch size will be multiplied by X), unless you set `split_batches=True`. + + + +Any instruction using your training dataloader length (for instance if you need the number of total training steps +to create a learning rate scheduler) should go after the call to [`~Accelerator.prepare`]. + +Some data at the end of the dataset may be duplicated so the batch can be divided equally among all workers. As a result, metrics +should be calculated through the [`~Accelerator.gather_for_metrics`] method to automatically remove the duplicated data while gathering. + + + + If for some reason you don't wish to have this automatically done, [`~Accelerator.gather`] can be used instead to gather + the data across all processes and this can manually be done instead. + + + + + + + The [`~Accelerator.gather`] and [`~Accelerator.gather_for_metrics`] methods require the tensors to be all the same size on each process. If + you have tensors of different sizes on each process (for instance when dynamically padding to the maximum length in + a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the + biggest size across processes. + + + +## Launching your distributed script + +You can use the regular commands to launch your distributed training (like `torch.distributed.launch` for +PyTorch), they are fully compatible with 🤗 Accelerate. The only caveat here is that 🤗 Accelerate uses the environment +to determine all useful information, so `torch.distributed.launch` should be used with the flag `--use_env`. + +🤗 Accelerate also provides a CLI tool that unifies all launchers, so you only have to remember one command. To use it, +just run: + +```bash +accelerate config +``` + +on your machine and reply to the questions asked. This will save a *default_config.yaml* file in your cache folder for +🤗 Accelerate. That cache folder is (with decreasing order of priority): + +- The content of your environment variable `HF_HOME` suffixed with *accelerate*. +- If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with + *huggingface/accelerate*. +- If this does not exist either, the folder *~/.cache/huggingface/accelerate* + +You can also specify with the flag `--config_file` the location of the file you want to save. + +Once this is done, you can test everything is going well on your setup by running: + +```bash +accelerate test +``` + +This will launch a short script that will test the distributed environment. If it runs fine, you are ready for the next +step! + +Note that if you specified a location for the config file in the previous step, you need to pass it here as well: + +```bash +accelerate test --config_file path_to_config.yaml +``` + +Now that this is done, you can run your script with the following command: + +```bash +accelerate launch path_to_script.py --args_for_the_script +``` + +If you stored the config file in a non-default location, you can indicate it to the launcher like this: + +```bash +accelerate launch --config_file path_to_config.yaml path_to_script.py --args_for_the_script +``` + +You can also override any of the arguments determined by your config file. +To see the complete list of parameters that you can pass in, run `accelerate launch -h`. + +Check out the [Launch tutorial](basic_tutorials/launch) for more information about launching your scripts. + + +## Launching training from a notebook + +In Accelerate 0.3.0, a new [`notebook_launcher`] has been introduced to help you launch your training +function from a notebook. This launcher supports launching a training with TPUs on Colab or Kaggle, as well as training +on several GPUs (if the machine on which you are running your notebook has them). + +Just define a function responsible for your whole training and/or evaluation in a cell of the notebook, then execute a +cell with the following code: + +```python +from accelerate import notebook_launcher + +notebook_launcher(training_function) +``` + + + + Your [`Accelerator`] object should only be defined inside the training function. This is because the + initialization should be done inside the launcher only. + + + +Check out the [Notebook Launcher tutorial](basic_tutorials/notebook) for more information about training on TPUs. + + +## Training on TPU + +If you want to launch your script on TPUs, there are a few caveats you should be aware of. Behind the scenes, the TPUs +will create a graph of all the operations happening in your training step (forward pass, backward pass and optimizer +step). This is why your first step of training will always be very long as building and compiling this graph for +optimizations takes some time. + +The good news is that this compilation will be cached so the second step and all the following will be much faster. The +bad news is that it only applies if all of your steps do exactly the same operations, which implies: + +- having all tensors of the same length in all your batches +- having static code (i.e., not a for loop of length that could change from step to step) + +Having any of the things above change between two steps will trigger a new compilation which will, once again, take a +lot of time. In practice, that means you must take special care to have all your tensors in your inputs of the same +shape (so no dynamic padding for instance if you are in an NLP problem) and should not use layers with for loops that +have different lengths depending on the inputs (such as an LSTM) or the training will be excruciatingly slow. + +To introduce special behavior in your script for TPUs you can check the `distributed_type` of your +`accelerator`: + +```python docstyle-ignore +from accelerate import DistributedType + +if accelerator.distributed_type == DistributedType.TPU: + # do something of static shape +else: + # go crazy and be dynamic +``` + +The [NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py) shows an example in a +situation with dynamic padding. + +One last thing to pay close attention to: if your model has tied weights (such as language models which tie the weights +of the embedding matrix with the weights of the decoder), moving this model to the TPU (either yourself or after you +passed your model to [`~Accelerator.prepare`]) will break the tying. You will need to retie the weights +after. You can find an example of this in the [run_clm_no_trainer](https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py) script in +the Transformers repository. + +Check out the [TPU tutorial](concept_guides/training_tpu) for more information about training on TPUs. + + +## Other caveats + +We list here all smaller issues you could have in your script conversion and how to resolve them. + +### Execute a statement only on one processes + +Some of your instructions only need to run for one process on a given server: for instance a data download or a log +statement. To do this, wrap the statement in a test like this: + +```python docstyle-ignore +if accelerator.is_local_main_process: + # Is executed once per server +``` + +Another example is progress bars: to avoid having multiple progress bars in your output, you should only display one on +the local main process: + +```python +from tqdm.auto import tqdm + +progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) +``` + +The *local* means per machine: if you are running your training on two servers with several GPUs, the instruction will +be executed once on each of those servers. If you need to execute something only once for all processes (and not per +machine) for instance, uploading the final model to the 🤗 model hub, wrap it in a test like this: + +```python docstyle-ignore +if accelerator.is_main_process: + # Is executed once only +``` + +For printing statements you only want executed once per machine, you can just replace the `print` function by +`accelerator.print`. + + +### Defer execution + +When you run your usual script, instructions are executed in order. Using 🤗 Accelerate to deploy your script on several +GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be +faster than others. + +You might need to wait for all processes to have reached a certain point before executing a given instruction. For +instance, you shouldn't save a model before being sure every process is done with training. To do this, just write the +following line in your code: + +``` +accelerator.wait_for_everyone() +``` + +This instruction will block all the processes that arrive first until all the other processes have reached that +point (if you run your script on just one GPU or CPU, this won't do anything). + + +### Saving/loading a model + +Saving the model you trained might need a bit of adjustment: first you should wait for all processes to reach that +point in the script as shown above, and then, you should unwrap your model before saving it. This is because when going +through the [`~Accelerator.prepare`] method, your model may have been placed inside a bigger model, +which deals with the distributed training. This in turn means that saving your model state dictionary without taking +any precaution will take that potential extra layer into account, and you will end up with weights you can't load back +in your base model. + +This is why it's recommended to *unwrap* your model first. Here is an example: + +``` +accelerator.wait_for_everyone() +unwrapped_model = accelerator.unwrap_model(model) +accelerator.save(unwrapped_model.state_dict(), filename) +``` + +If your script contains logic to load a checkpoint, we also recommend you load your weights in the unwrapped model +(this is only useful if you use the load function after making your model go through +[`~Accelerator.prepare`]). Here is an example: + +``` +unwrapped_model = accelerator.unwrap_model(model) +unwrapped_model.load_state_dict(torch.load(filename)) +``` + +Note that since all the model parameters are references to tensors, this will load your weights inside `model`. + +## Saving/loading entire states + +When training your model, you may want to save the current state of the model, optimizer, random generators, and potentially LR schedulers to be restored in the _same script_. +You can use [`~Accelerator.save_state`] and [`~Accelerator.load_state`] respectively to do so. + +To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example +if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`. + +If you have registered any other stateful items to be stored through [`~Accelerator.register_for_checkpointing`] they will also be saved and/or loaded. + + + + Every object passed to [`~Accelerator.register_for_checkpointing`] must have a `load_state_dict` and `state_dict` function to be stored + + + + +### Gradient clipping + +If you are using gradient clipping in your script, you should replace the calls to +`torch.nn.utils.clip_grad_norm_` or `torch.nn.utils.clip_grad_value_` with [`~Accelerator.clip_grad_norm_`] +and [`~Accelerator.clip_grad_value_`] respectively. + + +### Mixed Precision training + +If you are running your training in Mixed Precision with 🤗 Accelerate, you will get the best result with your loss being +computed inside your model (like in Transformer models for instance). Every computation outside of the model will be +executed in full precision (which is generally what you want for loss computation, especially if it involves a +softmax). However you might want to put your loss computation inside the *accelerator.autocast* context manager: + +``` +with accelerator.autocast(): + loss = complex_loss_function(outputs, target): +``` + +Another caveat with Mixed Precision training is that the gradient will skip a few updates at the beginning and +sometimes during training: because of the dynamic loss scaling strategy, there are points during training where the +gradients have overflown, and the loss scaling factor is reduced to avoid this happening again at the next step. + +This means that you may update your learning rate scheduler when there was no update, which is fine in general, but may +have an impact when you have very little training data, or if the first learning rate values of your scheduler are very +important. In this case, you can skip the learning rate scheduler updates when the optimizer step was not done like +this: + +``` +if not accelerator.optimizer_step_was_skipped: + lr_scheduler.step() +``` + +### Gradient Accumulation + +To perform gradient accumulation use [`~Accelerator.accumulate`] and specify a `gradient_accumulation_steps`. +This will also automatically ensure the gradients are synced or unsynced when on multi-device training, check if the step should +actually be performed, and auto-scale the loss: + +```python +accelerator = Accelerator(gradient_accumulation_steps=2) +model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader) + +for input, label in training_dataloader: + with accelerator.accumulate(model): + predictions = model(input) + loss = loss_function(predictions, label) + accelerator.backward(loss) + optimizer.step() + scheduler.step() + optimizer.zero_grad() +``` + +### DeepSpeed + +DeepSpeed support is experimental, so the underlying API will evolve in the near future and may have some slight +breaking changes. In particular, 🤗 Accelerate does not support DeepSpeed config you have written yourself yet, this +will be added in a next version. + + + + The [`notebook_launcher`] does not support the DeepSpeed integration yet. + + + +## Internal mechanism + +Internally, the library works by first analyzing the environment in which the script is launched to determine which +kind of distributed setup is used, how many different processes there are and which one the current script is in. All +that information is stored in the [`~AcceleratorState`]. + +This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any +specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of +[`~state.AcceleratorState`]. + +Then, when calling [`~Accelerator.prepare`], the library: + +- wraps your model(s) in the container adapted for the distributed setup, +- wraps your optimizer(s) in a [`~optimizer.AcceleratedOptimizer`], +- creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`]. + +While the model(s) and optimizer(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly +because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the +library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other +`num_processes` batches. + +The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality: + +- it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any + randomization (like shuffling) is done the exact same way across processes. +- it puts the batches on the proper device before yielding them (unless you have opted out of + `device_placement=True`). + +The random number generator synchronization will by default synchronize: + +- the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6 +- the main random number generator in PyTorch <=1.5.1 + +You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main +[`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid +setting the same seed in the main random number generator in all processes. + + + + Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random + artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get + the same random numbers from the torch random modules (so will apply the same random data augmentation if it's + controlled by torch). + + + + + + The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local + `torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example. + + + +For more details about the internals, see the [Internals page](package_reference/torch_wrappers). diff --git a/testbed/huggingface__accelerate/docs/source/usage_guides/big_modeling.mdx b/testbed/huggingface__accelerate/docs/source/usage_guides/big_modeling.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d7523a03bf8cb1c63b88a91959111b508b663fd6 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/usage_guides/big_modeling.mdx @@ -0,0 +1,294 @@ + + +# Handling big models for inference + +When loading a pretrained model in PyTorch, the usual workflow looks like this: + +```py +import torch + +my_model = ModelClass(...) +state_dict = torch.load(checkpoint_file) +my_model.load_state_dict(state_dict) +``` + +In plain English, those steps are: +1. Create the model with randomly initialized weights +2. Load the model weights (in a dictionary usually called a state dict) from the disk +3. Load those weights inside the model + +While this works very well for regularly sized models, this workflow has some clear limitations when we deal with a huge model: in step 1, we load a full version of the model in RAM, and spend some time randomly initializing the weights (which will be discarded in step 3). In step 2, we load another full version of the model in RAM, with the pretrained weights. If you're loading a model with 6 billions parameters, this means you will need 24GB of RAM for each copy of the model, so 48GB in total (half of it to load the model in FP16). + + + + This API is quite new and still in its experimental stage. While we strive to provide a stable API, it's possible some small parts of the public API will change in the future. + + + +## How the Process Works: A Quick Overview + + + +## How the Process Works: Working with Code + +### Instantiating an empty model + +The first tool 🤗 Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM, so that step 1 can be done on models of any size. Here is how it works: + +```py +from accelerate import init_empty_weights + +with init_empty_weights(): + my_model = ModelClass(...) +``` + +For instance: + +```py +with init_empty_weights(): + model = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) +``` + +initializes an empty model with a bit more than 100B parameters. Behind the scenes, this relies on the meta device introduced in PyTorch 1.9. During the initialization under the context manager, each time a parameter is created, it is instantly moved on that device. + + + + You can't move a model initialized like this on CPU or another device directly, since it doesn't have any data. It's also very likely that a forward pass with that empty model will fail, as not all operations are supported on the meta device. + + + +### Sharded checkpoints + +It's possible your model is so big that even a single copy won't fit in RAM. That doesn't mean it can't be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it's better if your checkpoint is split in several smaller files that we call checkpoint shards. + +🤗 Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. For instance we could have a folder containing: + +```bash +first_state_dict.bin +index.json +second_state_dict.bin +``` + +with index.json being the following file: + +``` +{ + "linear1.weight": "first_state_dict.bin", + "linear1.bias": "first_state_dict.bin", + "linear2.weight": "second_state_dict.bin", + "linear2.bias": "second_state_dict.bin" +} +``` + +and `first_state_dict.bin` containing the weights for `"linear1.weight"` and `"linear1.bias"`, `second_state_dict.bin` the ones for `"linear2.weight"` and `"linear2.bias"` + +### Loading weights + +The second tool 🤗 Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard. + +Here is how we can use this to load the [GPT-J-6B](https://huggingface.co/EleutherAI/gpt-j-6B) model. You clone the sharded version of this model with: + +```bash +git clone https://huggingface.co/sgugger/sharded-gpt-j-6B +cd sharded-gpt-j-6B +git-lfs install +git pull +``` + +then we can initialize the model with + +```py +from accelerate import init_empty_weights +from transformers import AutoConfig, AutoModelForCausalLM + +checkpoint = "EleutherAI/gpt-j-6B" +config = AutoConfig.from_pretrained(checkpoint) + +with init_empty_weights(): + model = AutoModelForCausalLM.from_config(config) +``` + +and load the checkpoint we just downloaded with: + +```py +from accelerate import load_checkpoint_and_dispatch + +model = load_checkpoint_and_dispatch( + model, "sharded-gpt-j-6B", device_map="auto", no_split_module_classes=["GPTJBlock"] +) +``` + +By passing `device_map="auto"`, we tell 🤗 Accelerate to determine automatically where to put each layer of the model depending on the available resources: +- first we use the maximum space available on the GPU(s) +- if we still need space, we store the remaining weights on the CPU +- if there is not enough RAM, we store the remaining weights on the hard drive as memory-mapped tensors + +`no_split_module_classes=["GPTJBlock"]` indicates that the modules that are `GPTJBlock` should not be split on different devices. You should set here all blocks that include a residual connection of some kind. + +You can see the `device_map` that 🤗 Accelerate picked by accessing the `hf_device_map` attribute of your model: + +```py +model.hf_device_map +``` + +```python out +{'transformer.wte': 0, + 'transformer.drop': 0, + 'transformer.h.0': 0, + 'transformer.h.1': 0, + 'transformer.h.2': 0, + 'transformer.h.3': 0, + 'transformer.h.4': 0, + 'transformer.h.5': 0, + 'transformer.h.6': 0, + 'transformer.h.7': 0, + 'transformer.h.8': 0, + 'transformer.h.9': 0, + 'transformer.h.10': 0, + 'transformer.h.11': 0, + 'transformer.h.12': 0, + 'transformer.h.13': 0, + 'transformer.h.14': 0, + 'transformer.h.15': 0, + 'transformer.h.16': 0, + 'transformer.h.17': 0, + 'transformer.h.18': 0, + 'transformer.h.19': 0, + 'transformer.h.20': 0, + 'transformer.h.21': 0, + 'transformer.h.22': 0, + 'transformer.h.23': 0, + 'transformer.h.24': 1, + 'transformer.h.25': 1, + 'transformer.h.26': 1, + 'transformer.h.27': 1, + 'transformer.ln_f': 1, + 'lm_head': 1} + ``` + +You can also design your `device_map` yourself, if you prefer to explicitly decide where each layer should be. In this case, the command above becomes: + +```py +model = load_checkpoint_and_dispatch(model, "sharded-gpt-j-6B", device_map=my_device_map) +``` + +### Run the model + +Now that we have done this, our model lies across several devices, and maybe the hard drive. But it can still be used as a regular PyTorch model: + +```py +from transformers import AutoTokenizer + +tokenizer = AutoTokenizer.from_pretrained(checkpoint) +inputs = tokenizer("Hello, my name is", return_tensors="pt") +inputs = inputs.to(0) +output = model.generate(inputs["input_ids"]) +tokenizer.decode(output[0].tolist()) +``` + +Behind the scenes, 🤗 Accelerate added hooks to the model, so that: +- at each layer, the inputs are put on the right device (so even if your model is spread across several GPUs, it works) +- for the weights offloaded on the CPU, they are put on a GPU just before the forward pass, and cleaned up just after +- for the weights offloaded on the hard drive, they are loaded in RAM then put on a GPU just before the forward pass, and cleaned up just after + +This way, you model can run for inference even if it doesn't fit on one of the GPUs or the CPU RAM! + + + + This only supports inference of your model, not training. Most of the computation happens behind `torch.no_grad()` context managers to avoid spending some GPU memory with intermediate activations. + + + +### Designing a device map + +You can let 🤗 Accelerate handle the device map computation by setting `device_map` to one of the supported options (`"auto"`, `"balanced"`, `"balanced_low_0"`, `"sequential"`) or create one yourself, if you want more control over where each layer should go. + + + + You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device. + + + +All the options will produce the same result when you don't have enough GPU memory to accommodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM). + +When you have more GPU memory available than the model size, here the difference between each option: +- `"auto"` and `"balanced"` evenly split the model on all available GPUs, making it possible for you to use a batch size greater than 1. +- `"balanced_low_0"` evenly splits the model on all GPUs except the first one, and only puts on GPU 0 what does not fit on the others. This option is great when you need to use GPU 0 for some processing of the outputs, like when using the `generate` function for Transformers models +- `"sequential"` will fit what it can on GPU 0, then move on GPU 1 and so forth (so won't use the last GPUs if it doesn't need to). + + + + The options `"auto"` and `"balanced"` produce the same results for now, but the behavior of `"auto"` might change in the future if we find a strategy that makes more sense, while `"balanced"` will stay stable. + + + +First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `"cpu"` key for the maximum RAM you want used for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `"10GiB"` or `"10GB"`. + +Here is an example where we don't want to use more than 10GiB on each of two GPUs and no more than 30GiB of CPU RAM for the model weights: + +```python +from accelerate import infer_auto_device_map + +device_map = infer_auto_device_map(my_model, max_memory={0: "10GiB", 1: "10GiB", "cpu": "30GiB"}) +``` + + + + When a first allocation happens in PyTorch, it loads CUDA kernels which take about 1-2GB of memory depending on the GPU. Therefore you always have less usable memory than the actual size of the GPU. To see how much memory is actually used do `torch.ones(1).cuda()` and look at the memory usage. + + Therefore when you create memory maps with `max_memory` make sure to adjust the avaialble memory accordingly to avoid out-of-memory errors. + + + +Additionally, if you do some additional operations with your outputs without placing them back on the CPU (for instance inside the `generate` method of Transformers) and if you placed your inputs on a GPU, that GPU will consume more memory than the others (Accelerate always place the output back to the device of the input). Therefore if you would like to optimize the maximum batch size and you have many GPUs, give the first GPU less memory. For example, with BLOOM-176B on 8x80 A100 setup the close to ideal map is: + +```python +max_memory = {0: "30GIB", 1: "46GIB", 2: "46GIB", 3: "46GIB", 4: "46GIB", 5: "46GIB", 6: "46GIB", 7: "46GIB"} +``` +as you can see we gave the remaining 7 GPUs ~50% more memory than GPU 0. + +If you opt to fully design the `device_map` yourself, it should be a dictionary with keys being module names of your model and values being a valid device identifier (for instance an integer for the GPUs) or `"cpu"` for CPU offload, `"disk"` for disk offload. The keys need to cover the whole model, you can then define your device map as you wish: for instance if your model has two blocks (let's say `block1` and `block2`) which each contain three linear layers (let's say `linear1`, `linear2` and `linear3`), a valid device map can be: + +```python +device_map = {"block1": 0, "block2": 1} +``` + +another one that is valid could be: + +```python +device_map = {"block1": 0, "block2.linear1": 0, "block2.linear2": 1, "block2.linear3": 1} +``` + +On the other hand, this one is not valid as it does not cover every parameter of the model: + +```python +device_map = {"block1": 0, "block2.linear1": 1, "block2.linear2": 1} +``` + + + + To be the most efficient, make sure your device map puts the parameters on the GPUs in a sequential manner (e.g. don't put one of the first weights on GPU 0, then weights on GPU 1 and the last weight back to GPU 0) to avoid making many transfers of data between the GPUs. + + + +## Limits and further development + +We are aware of the current limitations in the API: + +- While this could theoretically work on just one CPU with potential disk offload, you need at least one GPU to run this API. This will be fixed in further development. +- [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) tries to maximize GPU and CPU RAM it sees available when you execute it. While PyTorch is very good at managing GPU RAM efficiently (and giving it back when not needed), it's not entirely true with Python and CPU RAM. Therefore, an automatically computed device map might be too intense on the CPU. Move a few modules to the disk device if you get crashes due to lack of RAM. +- [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) attributes devices sequentially (to avoid moving things back and forth) so if your first layer is bigger than the size of the GPU you have, it will end up with everything on the CPU/Disk. +- [`load_checkpoint_and_dispatch`] and [`load_checkpoint_in_model`] do not perform any check on the correctness of your state dict compared to your model at the moment (this will be fixed in a future version), so you may get some weird errors if trying to load a checkpoint with mismatched or missing keys. +- The model parallelism used when your model is split on several GPUs is naive and not optimized, meaning that only one GPU works at a given time and the other sits idle. +- When weights are offloaded on the CPU/hard drive, there is no pre-fetching (yet, we will work on this for future versions) which means the weights are put on the GPU when they are needed and not before. +- Hard-drive offloading might be very slow if the hardware you run on does not have fast communication between disk and CPU (like NVMes). \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/usage_guides/checkpoint.mdx b/testbed/huggingface__accelerate/docs/source/usage_guides/checkpoint.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3545492051fba4c90e6de18827865d886f85ce5c --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/usage_guides/checkpoint.mdx @@ -0,0 +1,63 @@ + + +# Checkpointing + +When training a PyTorch model with 🤗 Accelerate, you may often want to save and continue a state of training. Doing so requires +saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside 🤗 Accelerate are two convenience functions to achieve this quickly: +- Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location +- Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state` + +To further customize where and how states saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example +if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`. + +It should be noted that the expectation is that those states come from the same training script, they should not be from two separate scripts. + +- By using [`~Accelerator.register_for_checkpointing`], you can register custom objects to be automatically stored or loaded from the two prior functions, +so long as the object has a `state_dict` **and** a `load_state_dict` functionality. This could include objects such as a learning rate scheduler. + +Below is a brief example using checkpointing to save and reload a state during training: + +```python +from accelerate import Accelerator +import torch + +accelerator = Accelerator(project_dir="my/save/path") + +my_scheduler = torch.optim.lr_scheduler.StepLR(my_optimizer, step_size=1, gamma=0.99) +my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, my_optimizer, my_training_dataloader) + +# Register the LR scheduler +accelerator.register_for_checkpointing(my_scheduler) + +# Save the starting state +accelerator.save_state() + +device = accelerator.device +my_model.to(device) + +# Perform training +for epoch in range(num_epochs): + for batch in my_training_dataloader: + my_optimizer.zero_grad() + inputs, targets = batch + inputs = inputs.to(device) + targets = targets.to(device) + outputs = my_model(inputs) + loss = my_loss_function(outputs, targets) + accelerator.backward(loss) + my_optimizer.step() + my_scheduler.step() + +# Restore previous state +accelerator.load_state("my/save/path/checkpointing/checkpoint_0") +``` diff --git a/testbed/huggingface__accelerate/docs/source/usage_guides/deepspeed.mdx b/testbed/huggingface__accelerate/docs/source/usage_guides/deepspeed.mdx new file mode 100644 index 0000000000000000000000000000000000000000..0377296c172cc6a1ec3bdf8a3d44b78d47434ff7 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/usage_guides/deepspeed.mdx @@ -0,0 +1,684 @@ + + +# DeepSpeed + +[DeepSpeed](https://github.com/microsoft/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Currently it provides full support for: + +1. Optimizer state partitioning (ZeRO stage 1) +2. Gradient partitioning (ZeRO stage 2) +3. Parameter partitioning (ZeRO stage 3) +4. Custom mixed precision training handling +5. A range of fast CUDA-extension-based optimizers +6. ZeRO-Offload to CPU and Disk/NVMe + +ZeRO-Offload has its own dedicated paper: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840). And NVMe-support is described in the paper [ZeRO-Infinity: Breaking the GPU +Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857). + +DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no use to inference. + +DeepSpeed ZeRO-3 can be used for inference as well, since it allows huge models to be loaded on multiple GPUs, which +won't be possible on a single GPU. + +🤗 Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options: + +1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of + this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. + User may have to change few lines of code depending on the config. +2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. + User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed. + +## What is integrated? + +Training: + +1. DeepSpeed ZeRO training supports the full ZeRO stages 1, 2 and 3 as well as CPU/Disk offload of optimizer states, gradients and parameters. +Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Optimizer along with diagram from this [blog post](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/) +![ZeRO Data Parallelism](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero.png) + +(Source: [link](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/)) + + a. **Stage 1** : Shards optimizer states across data parallel workers/GPUs + + b. **Stage 2** : Shards optimizer states + gradients across data parallel workers/GPUs + + c. **Stage 3**: Shards optimizer states + gradients + model parameters across data parallel workers/GPUs + + d. **Optimizer Offload**: Offloads the gradients + optimizer states to CPU/Disk building on top of ZERO Stage 2 + + e. **Param Offload**: Offloads the model parameters to CPU/Disk building on top of ZERO Stage 3 + +Note: With respect to Disk Offload, the disk should be an NVME for decent speed but it technically work on any Disk + +Inference: + +1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but + it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see: + [deepspeed-zero-inference](#deepspeed-zero-inference). + + +## How it works? + +**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Installation details](https://github.com/microsoft/DeepSpeed#installation) +for more information. + +We will first look at easy to use integration via `accelerate config`. +Followed by more flexible and feature rich `deepspeed config file` integration. + +### Accelerate DeepSpeed Plugin +On your machine(s) just run: + +```bash +accelerate config +``` + +and answer the questions asked. It will ask whether you want to use a config file for DeepSpeed to which you should answer no. Then answer the following questions to generate a basic DeepSpeed config. +This will generate a config file that will be used automatically to properly set the +default options when doing + +```bash +accelerate launch my_script.py --args_to_my_script +``` + +For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with DeepSpeed Plugin: + +**ZeRO Stage-2 DeepSpeed Plugin Example** +```bash +compute_environment: LOCAL_MACHINE +deepspeed_config: + gradient_accumulation_steps: 1 + gradient_clipping: 1.0 + offload_optimizer_device: none + offload_param_device: none + zero3_init_flag: true + zero_stage: 2 +distributed_type: DEEPSPEED +fsdp_config: {} +machine_rank: 0 +main_process_ip: null +main_process_port: null +main_training_function: main +mixed_precision: fp16 +num_machines: 1 +num_processes: 2 +use_cpu: false +``` + +```bash +accelerate launch examples/nlp_example.py --mixed_precision fp16 +``` + +**ZeRO Stage-3 with CPU Offload DeepSpeed Plugin Example** +```bash +compute_environment: LOCAL_MACHINE +deepspeed_config: + gradient_accumulation_steps: 1 + gradient_clipping: 1.0 + offload_optimizer_device: cpu + offload_param_device: cpu + zero3_init_flag: true + zero3_save_16bit_model: true + zero_stage: 3 +distributed_type: DEEPSPEED +fsdp_config: {} +machine_rank: 0 +main_process_ip: null +main_process_port: null +main_training_function: main +mixed_precision: fp16 +num_machines: 1 +num_processes: 2 +use_cpu: false +``` + +```bash +accelerate launch examples/nlp_example.py --mixed_precision fp16 +``` + +Currently, `Accelerate` supports following config through the CLI: + +```bash +`zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning +`gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them. +`gradient_clipping`: Enable gradient clipping with value. +`offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2. +`offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3. +`zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3. +`zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3. +`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training. +``` +To be able to tweak more options, you will need to use a DeepSpeed config file. + +### DeepSpeed Config File +On your machine(s) just run: + +```bash +accelerate config +``` + +and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer yes +and provide the path to the deepspeed config file. +This will generate a config file that will be used automatically to properly set the +default options when doing + +```bash +accelerate launch my_script.py --args_to_my_script +``` + +For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Config File: + +**ZeRO Stage-2 DeepSpeed Config File Example** +```bash +compute_environment: LOCAL_MACHINE +deepspeed_config: + deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage2_config.json + zero3_init_flag: true +distributed_type: DEEPSPEED +fsdp_config: {} +machine_rank: 0 +main_process_ip: null +main_process_port: null +main_training_function: main +mixed_precision: fp16 +num_machines: 1 +num_processes: 2 +use_cpu: false +``` + +with the contents of `zero_stage2_config.json` being: +```json +{ + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "weight_decay": "auto", + "torch_adam": true, + "adam_w_mode": true + } + }, + "scheduler": { + "type": "WarmupDecayLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto", + "total_num_steps": "auto" + } + }, + "zero_optimization": { + "stage": 2, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": "auto", + "contiguous_gradients": true + }, + "gradient_accumulation_steps": 1, + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} +``` + +```bash +accelerate launch examples/by_feature/deepspeed_with_config_support.py \ +--config_name "gpt2-large" \ +--tokenizer_name "gpt2-large" \ +--dataset_name "wikitext" \ +--dataset_config_name "wikitext-2-raw-v1" \ +--block_size 128 \ +--output_dir "./clm/clm_deepspeed_stage2_accelerate" \ +--learning_rate 5e-4 \ +--per_device_train_batch_size 24 \ +--per_device_eval_batch_size 24 \ +--num_train_epochs 3 \ +--with_tracking \ +--report_to "wandb"\ +``` + +**ZeRO Stage-3 with CPU offload DeepSpeed Config File Example** +```bash +compute_environment: LOCAL_MACHINE +deepspeed_config: + deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage3_offload_config.json + zero3_init_flag: true +distributed_type: DEEPSPEED +fsdp_config: {} +machine_rank: 0 +main_process_ip: null +main_process_port: null +main_training_function: main +mixed_precision: fp16 +num_machines: 1 +num_processes: 2 +use_cpu: false +``` +with the contents of `zero_stage3_offload_config.json` being: +```json +{ + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "weight_decay": "auto" + } + }, + "scheduler": { + "type": "WarmupDecayLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto", + "total_num_steps": "auto" + } + }, + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "sub_group_size": 1e9, + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": "auto" + }, + "gradient_accumulation_steps": 1, + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} +``` + +```bash +accelerate launch examples/by_feature/deepspeed_with_config_support.py \ +--config_name "gpt2-large" \ +--tokenizer_name "gpt2-large" \ +--dataset_name "wikitext" \ +--dataset_config_name "wikitext-2-raw-v1" \ +--block_size 128 \ +--output_dir "./clm/clm_deepspeed_stage3_offload_accelerate" \ +--learning_rate 5e-4 \ +--per_device_train_batch_size 32 \ +--per_device_eval_batch_size 32 \ +--num_train_epochs 3 \ +--with_tracking \ +--report_to "wandb"\ +``` + +**Important code changes when using DeepSpeed Config File** + +1. DeepSpeed Optimizers and Schedulers. For more information on these, +see the [DeepSpeed Optimizers](https://deepspeed.readthedocs.io/en/latest/optimizers.html) and [DeepSpeed Schedulers](https://deepspeed.readthedocs.io/en/latest/schedulers.html) documentation. +We will look at the changes needed in the code when using these. + + a. DS Optim + DS Scheduler: The case when both `optimizer` and `scheduler` keys present in the DeepSpeed config file. + In this situation, those will be used and user has to use `accelerate.utils.DummyOptim` and `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom optimizers and schedulers in their code. + Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this: + ```python + # Creates Dummy Optimizer if `optimizer` was spcified in the config file else creates Adam Optimizer + optimizer_cls = ( + torch.optim.AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate) + + # Creates Dummy Scheduler if `scheduler` was spcified in the config file else creates `args.lr_scheduler_type` Scheduler + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_scheduler( + name=args.lr_scheduler_type, + optimizer=optimizer, + num_warmup_steps=args.num_warmup_steps, + num_training_steps=args.max_train_steps, + ) + else: + lr_scheduler = DummyScheduler( + optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps + ) + ``` + b. Custom Optim + Custom Scheduler: The case when both `optimizer` and `scheduler` keys are absent in the DeepSpeed config file. + In this situation, no code changes are needed from the user and this is the case when using integration via DeepSpeed Plugin. + In the above example we can see that the code remains unchanged if the `optimizer` and `scheduler` keys are absent in the DeepSpeed config file. + + c. Custom Optim + DS Scheduler: The case when only `scheduler` key is present in the DeepSpeed config file. + In this situation, user has to use `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom scheduler in their code. + + d. DS Optim + Custom Scheduler: The case when only `optimizer` key is present in the DeepSpeed config file. + This will result in an error because you can only use DS Scheduler when using DS Optim. + +2. Notice the `auto` values in the above example DeepSpeed config files. These are automatically handled by `prepare` method +based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. +Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user. + +**Things to note when using DeepSpeed Config File** + +Below is a sample script using `deepspeed_config_file` in different scenarios. + +Code `test.py`: + +```python +from accelerate import Accelerator +from accelerate.state import AcceleratorState + + +def main(): + accelerator = Accelerator() + accelerator.print(f"{AcceleratorState()}") + + +if __name__ == "__main__": + main() +``` + +**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries. + +1. Content of the `accelerate` config: + +```yaml +command_file: null +commands: null +compute_environment: LOCAL_MACHINE +deepspeed_config: + gradient_accumulation_steps: 1 + gradient_clipping: 1.0 + offload_optimizer_device: 'cpu' + offload_param_device: 'cpu' + zero3_init_flag: true + zero3_save_16bit_model: true + zero_stage: 3 + deepspeed_config_file: 'ds_config.json' +distributed_type: DEEPSPEED +downcast_bf16: 'no' +dynamo_backend: 'NO' +fsdp_config: {} +gpu_ids: null +machine_rank: 0 +main_process_ip: null +main_process_port: null +main_training_function: main +megatron_lm_config: {} +num_machines: 1 +num_processes: 2 +rdzv_backend: static +same_network: true +tpu_name: null +tpu_zone: null +use_cpu: false +``` + +2. `ds_config.json`: + +```json +{ + "bf16": { + "enabled": true + }, + "zero_optimization": { + "stage": 3, + "stage3_gather_16bit_weights_on_model_save": false, + "offload_optimizer": { + "device": "none" + }, + "offload_param": { + "device": "none" + } + }, + "gradient_clipping": 1.0, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "gradient_accumulation_steps": 10, + "steps_per_print": 2000000 +} +``` + +3. Output of `accelerate launch test.py`: + +```bash +ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: +['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', +'zero3_save_16bit_model', 'mixed_precision']. +Please specify them appropriately in the DeepSpeed config file. +If you are using an accelerate config file, remove others config variables mentioned in the above specified list. +The easiest method is to create a new config following the questionnaire via `accelerate config`. +It will only ask for the necessary config variables when using `deepspeed_config_file`. +``` + +**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown. + +1. Run `accelerate config`: + +```bash +$ accelerate config +------------------------------------------------------------------------------------------------------------------------------- +In which compute environment are you running? +This machine +------------------------------------------------------------------------------------------------------------------------------- +Which type of machine are you using? +multi-GPU +How many different machines will you use (use more than 1 for multi-node training)? [1]: +Do you wish to optimize your script with torch dynamo?[yes/NO]: +Do you want to use DeepSpeed? [yes/NO]: yes +Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes +Please enter the path to the json DeepSpeed config file: ds_config.json +Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes +How many GPU(s) should be used for distributed training? [1]:4 +accelerate configuration saved at ds_config_sample.yaml +``` + +2. Content of the `accelerate` config: + +```yaml +compute_environment: LOCAL_MACHINE +deepspeed_config: + deepspeed_config_file: ds_config.json + zero3_init_flag: true +distributed_type: DEEPSPEED +downcast_bf16: 'no' +dynamo_backend: 'NO' +fsdp_config: {} +machine_rank: 0 +main_training_function: main +megatron_lm_config: {} +num_machines: 1 +num_processes: 4 +rdzv_backend: static +same_network: true +use_cpu: false +``` + +3. Output of `accelerate launch test.py`: + +```bash +Distributed environment: DEEPSPEED Backend: nccl +Num processes: 4 +Process index: 0 +Local process index: 0 +Device: cuda:0 +Mixed precision type: bf16 +ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}} +``` + +**Scenario 3**: Setting the `accelerate launch` command arguments related to DeepSpeed as `"auto"` in the DeepSpeed` configuration file and check that things work as expected. + +1. New `ds_config.json` with `"auto"` for the `accelerate launch` DeepSpeed command arguments: + +```json +{ + "bf16": { + "enabled": "auto" + }, + "zero_optimization": { + "stage": "auto", + "stage3_gather_16bit_weights_on_model_save": "auto", + "offload_optimizer": { + "device": "auto" + }, + "offload_param": { + "device": "auto" + } + }, + "gradient_clipping": "auto", + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "gradient_accumulation_steps": "auto", + "steps_per_print": 2000000 +} +``` + +2. Output of `accelerate launch --mixed_precision="fp16" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device="cpu" --offload_optimizer_device="nvme" --zero3_save_16bit_model="true" test.py`: + +```bash +Distributed environment: DEEPSPEED Backend: nccl +Num processes: 4 +Process index: 0 +Local process index: 0 +Device: cuda:0 +Mixed precision type: fp16 +ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}} +``` + +**Note**: Remaining `"auto"` values are handled in `accelerator.prepare()` call as explained in point 2 of +`Important code changes when using DeepSpeed Config File`. + +## Saving and loading + +1. Saving and loading of models is unchanged for ZeRO Stage-1 and Stage-2. + +2. under ZeRO Stage-3, `state_dict` contains just the placeholders since the model weights are partitioned across multiple GPUs. +ZeRO Stage-3 has 2 options: + + a. Saving the entire 16bit model weights to directly load later on using `model.load_state_dict(torch.load(pytorch_model.bin))`. + For this, either set `zero_optimization.stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed Config file or set + `zero3_save_16bit_model` to True in DeepSpeed Plugin. + **Note that this option requires consolidation of the weights on one GPU it can be slow and memory demanding, so only use this feature when needed.** + Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this: + ```python + unwrapped_model = accelerator.unwrap_model(model) + + # New Code # + # Saves the whole/unpartitioned fp16 model when in ZeRO Stage-3 to the output directory if + # `stage3_gather_16bit_weights_on_model_save` is True in DeepSpeed Config file or + # `zero3_save_16bit_model` is True in DeepSpeed Plugin. + # For Zero Stages 1 and 2, models are saved as usual in the output directory. + # The model name saved is `pytorch_model.bin` + unwrapped_model.save_pretrained( + args.output_dir, + is_main_process=accelerator.is_main_process, + save_function=accelerator.save, + state_dict=accelerator.get_state_dict(model), + ) + ``` + + b. To get 32bit weights, first save the model using `model.save_checkpoint()`. + Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this: + ```python + success = model.save_checkpoint(PATH, ckpt_id, checkpoint_state_dict) + status_msg = "checkpointing: PATH={}, ckpt_id={}".format(PATH, ckpt_id) + if success: + logging.info(f"Success {status_msg}") + else: + logging.warning(f"Failure {status_msg}") + ``` + This will create ZeRO model and optimizer partitions along with `zero_to_fp32.py` script in checkpoint directory. + You can use this script to do offline consolidation. + It requires no configuration files or GPUs. Here is an example of its usage: + ```bash + $ cd /path/to/checkpoint_dir + $ ./zero_to_fp32.py . pytorch_model.bin + Processing zero checkpoint at global_step1 + Detected checkpoint of type zero stage 3, world_size: 2 + Saving fp32 state dict to pytorch_model.bin (total_numel=60506624) + ``` + To get 32bit model for saving/inference, you can perform: + ```python + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + + unwrapped_model = accelerator.unwrap_model(model) + fp32_model = load_state_dict_from_zero_checkpoint(unwrapped_model, checkpoint_dir) + ``` + If you are only interested in the `state_dict`, you can do the following: + ```python + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) + ``` + Note that all these functions require ~2x memory (general RAM) of the size of the final checkpoint. + +## ZeRO Inference +DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. +It uses the same ZeRO protocol as training, but it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. +With accelerate integration, you just need to prepare the model and dataloader as shown below: + +```python +model, eval_dataloader = accelerator.prepare(model, eval_dataloader) +``` + +## Few caveats to be aware of + +1. Current integration doesn’t support Pipeline Parallelism of DeepSpeed. +2. Current integration doesn’t support `mpu`, limiting the tensor parallelism which is supported in Megatron-LM. +3. Current integration doesn’t support multiple models. + +## DeepSpeed Resources + +The documentation for the internals related to deepspeed can be found [here](../package_reference/deepspeed). + +- [Project's github](https://github.com/microsoft/deepspeed) +- [Usage docs](https://www.deepspeed.ai/getting-started/) +- [API docs](https://deepspeed.readthedocs.io/en/latest/index.html) +- [Blog posts](https://www.microsoft.com/en-us/research/search/?q=deepspeed) + +Papers: + +- [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054) +- [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840) +- [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857) + +Finally, please, remember that, 🤗 `Accelerate` only integrates DeepSpeed, therefore if you +have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues). + diff --git a/testbed/huggingface__accelerate/docs/source/usage_guides/fsdp.mdx b/testbed/huggingface__accelerate/docs/source/usage_guides/fsdp.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a561e4ecd46c193ff3e2d22dd2014fe73284c93f --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/usage_guides/fsdp.mdx @@ -0,0 +1,125 @@ + + +# Fully Sharded Data Parallel + +To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model. +This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters. +To read more about it and the benefits, check out the [Fully Sharded Data Parallel blog](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/). +We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature. +All you need to do is enable it through the config. + +## How it works out of the box + +On your machine(s) just run: + +```bash +accelerate config +``` + +and answer the questions asked. This will generate a config file that will be used automatically to properly set the +default options when doing + +```bash +accelerate launch my_script.py --args_to_my_script +``` + +For instance, here is how you would run the NLP example (from the root of the repo) with FSDP enabled: + +```bash +compute_environment: LOCAL_MACHINE +deepspeed_config: {} +distributed_type: FSDP +downcast_bf16: 'no' +fsdp_config: + fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP + fsdp_backward_prefetch_policy: BACKWARD_PRE + fsdp_offload_params: false + fsdp_sharding_strategy: 1 + fsdp_state_dict_type: FULL_STATE_DICT + fsdp_transformer_layer_cls_to_wrap: GPT2Block +machine_rank: 0 +main_process_ip: null +main_process_port: null +main_training_function: main +mixed_precision: 'no' +num_machines: 1 +num_processes: 2 +use_cpu: false +``` + +```bash +accelerate launch examples/nlp_example.py +``` + +Currently, `Accelerate` supports the following config through the CLI: + +```bash +`Sharding Strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD +`Offload Params`: Decides Whether to offload parameters and gradients to CPU +`Auto Wrap Policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP +`Transformer Layer Class to Wrap`: When using `TRANSFORMER_BASED_WRAP`, user specifies transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block`... +`Min Num Params`: minimum number of parameters when using `SIZE_BASED_WRAP` +`Backward Prefetch`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH +`State Dict Type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT +``` + +## A few caveats to be aware of + +- PyTorch FSDP auto wraps sub-modules, flattens the parameters and shards the parameters in place. + Due to this, any optimizer created before model wrapping gets broken and occupies more memory. + Hence, it is highly recommended and efficient to prepare the model before creating the optimizer. + `Accelerate` will automatically wrap the model and create an optimizer for you in case of single model with a warning message. + > FSDP Warning: When using FSDP, it is efficient and recommended to call prepare for the model before creating the optimizer + +However, below is the recommended way to prepare model and optimizer while using FSDP: + +```diff + model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) ++ model = accelerator.prepare(model) + + optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr) + +- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( +- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler +- ) + ++ optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( ++ optimizer, train_dataloader, eval_dataloader, lr_scheduler ++ ) +``` + +- In case of a single model, if you have created the optimizer with multiple parameter groups and called prepare with them together, + then the parameter groups will be lost and the following warning is displayed: + > FSDP Warning: When using FSDP, several parameter groups will be conflated into + > a single one due to nested module wrapping and parameter flattening. + + This is because parameter groups created before wrapping will have no meaning post wrapping due to parameter flattening of nested FSDP modules into 1D arrays (which can consume many layers). + For instance, below are the named parameters of an FSDP model on GPU 0 (When using 2 GPUs. Around 55M (110M/2) params in 1D arrays as this will have the 1st shard of the parameters). + Here, if one has applied no weight decay for [bias, LayerNorm.weight] the named parameters of an unwrapped BERT model, + it can't be applied to the below FSDP wrapped model as there are no named parameters with either of those strings and + the parameters of those layers are concatenated with parameters of various other layers. + ``` + { + '_fsdp_wrapped_module.flat_param': torch.Size([494209]), + '_fsdp_wrapped_module._fpw_module.bert.embeddings.word_embeddings._fsdp_wrapped_module.flat_param': torch.Size([11720448]), + '_fsdp_wrapped_module._fpw_module.bert.encoder._fsdp_wrapped_module.flat_param': torch.Size([42527232]) + } + ``` + + +- In case of multiple models, it is necessary to prepare the models before creating optimizers or else it will throw an error. +Then pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour. +- This feature is incompatible with `--predict_with_generate` in the `run_translation.py` script of 🤗 `Transformers` library. + +For more control, users can leverage the `FullyShardedDataParallelPlugin`. After creating an instance of this class, users can pass it to the Accelerator class instantiation. +For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code. diff --git a/testbed/huggingface__accelerate/docs/source/usage_guides/gradient_accumulation.mdx b/testbed/huggingface__accelerate/docs/source/usage_guides/gradient_accumulation.mdx new file mode 100644 index 0000000000000000000000000000000000000000..798e158471277f3ef825e3fc41fc1d68af92fa02 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/usage_guides/gradient_accumulation.mdx @@ -0,0 +1,130 @@ + + +# Performing gradient accumulation with 🤗 Accelerate + +Gradient accumulation is a technique where you can train on bigger batch sizes than +your machine would normally be able to fit into memory. This is done by accumulating gradients over +several batches, and only stepping the optimizer after a certain number of batches have been performed. + +While technically standard gradient accumulation code would work fine in a distributed setup, it is not the most efficient +method for doing so and you may experience considerable slowdowns! + +In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilities provided in 🤗 Accelerate, +which can total to adding just one new line of code! + +This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches: + +```python +device = "cuda" +model.to(device) + +gradient_accumulation_steps = 2 + +for index, batch in enumerate(training_dataloader): + inputs, targets = batch + inputs = inputs.to(device) + targets = targets.to(device) + outputs = model(inputs) + loss = loss_function(outputs, targets) + loss = loss / gradient_accumulation_steps + loss.backward() + if (index + 1) % gradient_accumulation_steps == 0: + optimizer.step() + scheduler.step() + optimizer.zero_grad() +``` + +## Converting it to 🤗 Accelerate + +First the code shown earlier will be converted to utilize 🤗 Accelerate without the special gradient accumulation helper: + +```diff ++ from accelerate import Accelerator ++ accelerator = Accelerator() + ++ model, optimizer, training_dataloader, scheduler = accelerator.prepare( ++ model, optimizer, training_dataloader, scheduler ++ ) + + for index, batch in enumerate(training_dataloader): + inputs, targets = batch +- inputs = inputs.to(device) +- targets = targets.to(device) + outputs = model(inputs) + loss = loss_function(outputs, targets) + loss = loss / gradient_accumulation_steps ++ accelerator.backward(loss) + if (index+1) % gradient_accumulation_steps == 0: + optimizer.step() + scheduler.step() + optimizer.zero_grad() +``` + + + + In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the [Concepts tutorial](concept_guides/gradient_synchronization)! + + + +## Letting 🤗 Accelerate handle gradient accumulation + +All that is left now is to let 🤗 Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to [`Accelerator`], dictating the number +of steps to perform before each call to `step()` and how to automatically adjust the loss during the call to [`~Accelerator.backward`]: + +```diff + from accelerate import Accelerator +- accelerator = Accelerator() ++ accelerator = Accelerator(gradient_accumulation_steps=2) +``` + +From here you can use the [`~Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you! +You just wrap it around the entire training part of our code: + +```diff +- for index, batch in enumerate(training_dataloader): ++ for batch in training_dataloader: ++ with accelerator.accumulate(model): + inputs, targets = batch + outputs = model(inputs) +``` + +You can remove all the special checks for the step number and the loss adjustment: + +```diff +- loss = loss / gradient_accumulation_steps + accelerator.backward(loss) +- if (index+1) % gradient_accumulation_steps == 0: + optimizer.step() + scheduler.step() + optimizer.zero_grad() +``` + +As you can see the [`Accelerator`] is able to keep track of the batch number you are on and it will automatically know whether to step through the prepared optimizer and how to adjust the loss. + +## The finished code + +Below is the finished implementation for performing gradient accumulation with 🤗 Accelerate + +```python +for batch in training_dataloader: + with accelerator.accumulate(model): + inputs, targets = batch + outputs = model(inputs) + loss = loss_function(outputs, targets) + accelerator.backward(loss) + optimizer.step() + scheduler.step() + optimizer.zero_grad() +``` + +To learn more about what magic this wraps around, read the [Gradient Synchronization concept guide](/concept_guides/gradient_synchronization) \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/usage_guides/megatron_lm.mdx b/testbed/huggingface__accelerate/docs/source/usage_guides/megatron_lm.mdx new file mode 100644 index 0000000000000000000000000000000000000000..23b024f758397082eb259b3dba11eee235ffbcc7 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/usage_guides/megatron_lm.mdx @@ -0,0 +1,579 @@ + + + +# Megatron-LM + +[Megatron-LM](https://github.com/NVIDIA/Megatron-LM) enables training large transformer language models at scale. +It provides efficient tensor, pipeline and sequence based model parallelism for pre-training transformer based +Language Models such as [GPT](https://arxiv.org/abs/2005.14165) (Decoder Only), [BERT](https://arxiv.org/pdf/1810.04805.pdf) (Encoder Only) and [T5](https://arxiv.org/abs/1910.10683) (Encoder-Decoder). +For detailed information and how things work behind the scene please refer the github [repo](https://github.com/NVIDIA/Megatron-LM). + +## What is integrated? + +Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning +of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder): + +a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks. +Each tensor is split into multiple chunks with each shard residing on separate GPU. At each step, the same mini-batch of data is processed +independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation). +In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path. +For more details, please refer research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using +Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and +this section of 🤗 blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism). + + +b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. +Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. +Layers are distributed uniformly across PP stages. For example, if a model has `24` layers and we have `4` GPUs for +pipeline parallelism, each GPU will have `6` layers (24/4). For more details on schedules to reduce the idle time of PP, +please refer to the research paper [Efficient Large-Scale Language Model Training on GPU Clusters +Using Megatron-LM](https://arxiv.org/pdf/2104.04473.pdf) and +this section of 🤗 blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism). + +c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP. +It reduces activation memory required as it prevents the same copies to be on the tensor parallel ranks +post `all-reduce` by replacing then with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`. +As `all-reduce = reduce-scatter + all-gather`, this saves a ton of activation memory at no added communication cost. +To put it simply, it shards the outputs of each transformer layer along sequence dimension, e.g., +if the sequence length is `1024` and the TP size is `4`, each GPU will have `256` tokens (1024/4) for each sample. +This increases the batch size that can be supported for training. For more details, please refer to the research paper +[Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf). + +d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces the memory footprint by sharding optimizer states and gradients across DP ranks +(versus the traditional method of replicating the optimizer state across data parallel ranks). +For example, when using Adam optimizer with mixed-precision training, each parameter accounts for 12 bytes of memory. +This gets distributed equally across the GPUs, i.e., each parameter would account for 3 bytes (12/4) if we have 4 GPUs. +For more details, please refer the research paper [ZeRO: Memory Optimizations Toward Training Trillion +Parameter Models](https://arxiv.org/pdf/1910.02054.pdf) and following section of 🤗 blog +[The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#zero-data-parallelism). + +e. **Selective Activation Recomputation**: Reduces the memory footprint of activations significantly via smart activation checkpointing. +It doesn't store activations occupying large memory while being fast to recompute thereby achieving great tradeoff between memory and recomputation. +For example, for GPT-3, this leads to 70% reduction in required memory for activations at the expense of +only 2.7% FLOPs overhead for recomputation of activations. For more details, please refer to the research paper +[Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf). + +f. **Fused Kernels**: Fused Softmax, Mixed Precision Fused Layer Norm and Fused gradient accumulation to weight gradient computation of linear layer. +PyTorch JIT compiled Fused GeLU and Fused Bias+Dropout+Residual addition. + +g. **Support for Indexed datasets**: Efficient binary format of datasets for large scale training. Support for the `mmap`, `cached` index file and the `lazy` loader format. + +h. **Checkpoint reshaping and interoperability**: Utility for reshaping Megatron-LM checkpoints of variable +tensor and pipeline parallel sizes to the beloved 🤗 Transformers sharded checkpoints as it has great support with plethora of tools +such as 🤗 Accelerate Big Model Inference, Megatron-DeepSpeed Inference etc. +Support is also available for converting 🤗 Transformers sharded checkpoints to Megatron-LM checkpoint of variable tensor and pipeline parallel sizes +for large scale training. + + +## Pre-Requisites + +You will need to install the latest pytorch, cuda, nccl, and NVIDIA [APEX](https://github.com/NVIDIA/apex#quick-start) releases and the nltk library. +See [documentation](https://github.com/NVIDIA/Megatron-LM#setup) for more details. +Another way to setup the environment is to pull an NVIDIA PyTorch Container that comes with all the required installations from NGC. + +Below is a step-by-step method to set up the conda environment: + +1. Create a virtual environment +``` +conda create --name ml +``` + +2. Assuming that the machine has CUDA 11.3 installed, installing the corresponding PyTorch GPU Version +``` +conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch +``` + +3. Install Nvidia APEX +``` +git clone https://github.com/NVIDIA/apex +cd apex +pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ +cd .. +``` + +4. Installing Megatron-LM + +``` +pip install git+https://github.com/huggingface/Megatron-LM.git +``` + +## Accelerate Megatron-LM Plugin + +Important features are directly supported via the `accelerate config` command. +An example of thr corresponding questions for using Megatron-LM features is shown below: + +```bash +:~$ accelerate config --config_file "megatron_gpt_config.yaml" +In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 0 +Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU [4] MPS): 2 +How many different machines will you use (use more than 1 for multi-node training)? [1]: +Do you want to use DeepSpeed? [yes/NO]: +Do you want to use FullyShardedDataParallel? [yes/NO]: +Do you want to use Megatron-LM ? [yes/NO]: yes +What is the Tensor Parallelism degree/size? [1]:2 +Do you want to enable Sequence Parallelism? [YES/no]: +What is the Pipeline Parallelism degree/size? [1]:2 +What is the number of micro-batches? [1]:2 +Do you want to enable selective activation recomputation? [YES/no]: +Do you want to use distributed optimizer which shards optimizer state and gradients across data pralellel ranks? [YES/no]: +What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: +How many GPU(s) should be used for distributed training? [1]:4 +Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: bf16 +``` + +The resulting config is shown below: + +``` +~$ cat megatron_gpt_config.yaml +compute_environment: LOCAL_MACHINE +deepspeed_config: {} +distributed_type: MEGATRON_LM +downcast_bf16: 'no' +fsdp_config: {} +machine_rank: 0 +main_process_ip: null +main_process_port: null +main_training_function: main +megatron_lm_config: + megatron_lm_gradient_clipping: 1.0 + megatron_lm_num_micro_batches: 2 + megatron_lm_pp_degree: 2 + megatron_lm_recompute_activations: true + megatron_lm_sequence_parallelism: true + megatron_lm_tp_degree: 2 + megatron_lm_use_distributed_optimizer: true +mixed_precision: bf16 +num_machines: 1 +num_processes: 4 +rdzv_backend: static +same_network: true +use_cpu: false +``` + +We will take the example of GPT pre-training. The minimal changes required to the official `run_clm_no_trainer.py` +to use Megatron-LM are as follows: + +1. As Megatron-LM uses its own implementation of Optimizer, the corresponding scheduler compatible with it needs to be used. +As such, support for only the Megatron-LM's scheduler is present. User will need to create `accelerate.utils.MegatronLMDummyScheduler`. +Example is given below: + +```python +from accelerate.utils import MegatronLMDummyScheduler + +if accelerator.distributed_type == DistributedType.MEGATRON_LM: + lr_scheduler = MegatronLMDummyScheduler( + optimizer=optimizer, + total_num_steps=args.max_train_steps, + warmup_num_steps=args.num_warmup_steps, + ) +else: + lr_scheduler = get_scheduler( + name=args.lr_scheduler_type, + optimizer=optimizer, + num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + ) +``` + +2. Getting the details of the total batch size now needs to be cognization of tensor and pipeline parallel sizes. +Example of getting the effective total batch size is shown below: + +```python +if accelerator.distributed_type == DistributedType.MEGATRON_LM: + total_batch_size = accelerator.state.megatron_lm_plugin.global_batch_size +else: + total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps +``` + +3. When using Megatron-LM, the losses are already averaged across the data parallel group + +```python +if accelerator.distributed_type == DistributedType.MEGATRON_LM: + losses.append(loss) +else: + losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size))) + +if accelerator.distributed_type == DistributedType.MEGATRON_LM: + losses = torch.tensor(losses) +else: + losses = torch.cat(losses) +``` + +4. For Megatron-LM, we need to save the model using `accelerator.save_state` + +```python +if accelerator.distributed_type == DistributedType.MEGATRON_LM: + accelerator.save_state(args.output_dir) +else: + unwrapped_model = accelerator.unwrap_model(model) + unwrapped_model.save_pretrained( + args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save + ) +``` + +That's it! We are good to go 🚀. Please find the example script in the examples folder at the path `accelerate/examples/by_feature/megatron_lm_gpt_pretraining.py`. +Let's run it for `gpt-large` model architecture using 4 A100-80GB GPUs. + +```bash +accelerate launch --config_file megatron_gpt_config.yaml \ +examples/by_feature/megatron_lm_gpt_pretraining.py \ +--config_name "gpt2-large" \ +--tokenizer_name "gpt2-large" \ +--dataset_name wikitext \ +--dataset_config_name wikitext-2-raw-v1 \ +--block_size 1024 \ +--learning_rate 5e-5 \ +--per_device_train_batch_size 24 \ +--per_device_eval_batch_size 24 \ +--num_train_epochs 5 \ +--with_tracking \ +--report_to "wandb" \ +--output_dir "awesome_model" +``` + +Below are some important excerpts from the output logs: + +```bash +Loading extension module fused_dense_cuda... +>>> done with compiling and loading fused kernels. Compilation time: 3.569 seconds + > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) +Building gpt model in the pre-training mode. +The Megatron LM model weights are initialized at random in `accelerator.prepare`. Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup. +Preparing dataloader +Preparing dataloader +Preparing model + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 210753280 + > number of parameters on (tensor, pipeline) model parallel rank (1, 1): 209445120 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 210753280 + > number of parameters on (tensor, pipeline) model parallel rank (0, 1): 209445120 +Preparing optimizer +Preparing scheduler +> learning rate decay style: linear +10/10/2022 22:57:22 - INFO - __main__ - ***** Running training ***** +10/10/2022 22:57:22 - INFO - __main__ - Num examples = 2318 +10/10/2022 22:57:22 - INFO - __main__ - Num Epochs = 5 +10/10/2022 22:57:22 - INFO - __main__ - Instantaneous batch size per device = 24 +10/10/2022 22:57:22 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 48 +10/10/2022 22:57:22 - INFO - __main__ - Gradient Accumulation steps = 1 +10/10/2022 22:57:22 - INFO - __main__ - Total optimization steps = 245 + 20%|████████████▍ | 49/245 [01:04<04:09, 1.27s/it] + 10/10/2022 22:58:29 - INFO - __main__ - epoch 0: perplexity: 1222.1594275215962 eval_loss: 7.10837459564209 + 40%|████████████████████████▊ | 98/245 [02:10<03:07, 1.28s/it] + 10/10/2022 22:59:35 - INFO - __main__ - epoch 1: perplexity: 894.5236583794557 eval_loss: 6.796291351318359 + 60%|████████████████████████████████████▌ | 147/245 [03:16<02:05, 1.28s/it] + 10/10/2022 23:00:40 - INFO - __main__ - epoch 2: perplexity: 702.8458788508042 eval_loss: 6.555137634277344 + 80%|████████████████████████████████████████████████▊ | 196/245 [04:22<01:02, 1.28s/it] + 10/10/2022 23:01:46 - INFO - __main__ - epoch 3: perplexity: 600.3220028695281 eval_loss: 6.39746618270874 +100%|█████████████████████████████████████████████████████████████| 245/245 [05:27<00:00, 1.28s/it] +``` + +There are a large number of other options/features that one can set using `accelerate.utils.MegatronLMPlugin`. + +## Advanced features to leverage writing custom train step and Megatron-LM Indexed Datasets + +For leveraging more features, please go through below details. + +1. Below is an example of changes required to customize the Train Step while using Megatron-LM. +You will implement the `accelerate.utils.AbstractTrainStep` or inherit from their corresponding children +`accelerate.utils.GPTTrainStep`, `accelerate.utils.BertTrainStep` or `accelerate.utils.T5TrainStep`. + +```python +from accelerate.utils import MegatronLMDummyScheduler, GPTTrainStep, avg_losses_across_data_parallel_group + +# Custom loss function for the Megatron model +class GPTTrainStepWithCustomLoss(GPTTrainStep): + def __init__(self, megatron_args, **kwargs): + super().__init__(megatron_args) + self.kwargs = kwargs + + def get_loss_func(self): + def loss_func(inputs, loss_mask, output_tensor): + batch_size, seq_length = output_tensor.shape + losses = output_tensor.float() + loss_mask = loss_mask.view(-1).float() + loss = losses.view(-1) * loss_mask + + # Resize and average loss per sample + loss_per_sample = loss.view(batch_size, seq_length).sum(axis=1) + loss_mask_per_sample = loss_mask.view(batch_size, seq_length).sum(axis=1) + loss_per_sample = loss_per_sample / loss_mask_per_sample + + # Calculate and scale weighting + weights = torch.stack([(inputs == kt).float() for kt in self.kwargs["keytoken_ids"]]).sum(axis=[0, 2]) + weights = 1.0 + self.kwargs["alpha"] * weights + # Calculate weighted average + weighted_loss = (loss_per_sample * weights).mean() + + # Reduce loss across data parallel groups + averaged_loss = avg_losses_across_data_parallel_group([weighted_loss]) + + return weighted_loss, {"lm loss": averaged_loss[0]} + + return loss_func + + def get_forward_step_func(self): + def forward_step(data_iterator, model): + """Forward step.""" + # Get the batch. + tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator) + output_tensor = model(tokens, position_ids, attention_mask, labels=labels) + + return output_tensor, partial(self.loss_func, tokens, loss_mask) + + return forward_step + + +def main(): + # Custom loss function for the Megatron model + keytoken_ids = [] + keywords = ["plt", "pd", "sk", "fit", "predict", " plt", " pd", " sk", " fit", " predict"] + for keyword in keywords: + ids = tokenizer([keyword]).input_ids[0] + if len(ids) == 1: + keytoken_ids.append(ids[0]) + accelerator.print(f"Keytoken ids: {keytoken_ids}") + accelerator.state.megatron_lm_plugin.custom_train_step_class = GPTTrainStepWithCustomLoss + accelerator.state.megatron_lm_plugin.custom_train_step_kwargs = { + "keytoken_ids": keytoken_ids, + "alpha": 0.25, + } +``` + +2. For using the Megatron-LM datasets, a few more changes are required. Dataloaders for these datasets +are available only on rank 0 of each tensor parallel group. As such, there are rank where dataloader won't be +avaiable and this requires tweaks to the training loop. Being able to do all this shows how +felixble and extensible 🤗 Accelerate is. The changes required are as follows. + +a. For Megatron-LM indexed datasets, we need to use `MegatronLMDummyDataLoader` +and pass the required dataset args to it such as `data_path`, `seq_length` etc. +See [here](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/arguments.py#L804) for the list of available args. + +```python +from accelerate.utils import MegatronLMDummyDataLoader + +megatron_dataloader_config = { + "data_path": args.data_path, + "splits_string": args.splits_string, + "seq_length": args.block_size, + "micro_batch_size": args.per_device_train_batch_size, +} +megatron_dataloader = MegatronLMDummyDataLoader(**megatron_dataloader_config) +accelerator.state.megatron_lm_plugin.megatron_dataset_flag = True +``` + +b. `megatron_dataloader` is repeated 3 times to get training, validation and test dataloaders +as per the `args.splits_string` proportions + +```python +model, optimizer, lr_scheduler, train_dataloader, eval_dataloader, _ = accelerator.prepare( + model, optimizer, lr_scheduler, megatron_dataloader, megatron_dataloader, megatron_dataloader +) +``` + +c. Changes to training and evaluation loops as dataloader is only available on tensor parallel ranks 0 +So, we need to iterate only if the dataloader isn't `None` else provide empty dict +As such, we loop using `while` loop and break when `completed_steps` is equal to `args.max_train_steps` +This is similar to the Megatron-LM setup wherein user has to provide `max_train_steps` when using Megaton-LM indexed datasets. +This displays how flexible and extensible 🤗 Accelerate is. + +```python +while completed_steps < args.max_train_steps: + model.train() + batch = next(train_dataloader) if train_dataloader is not None else {} + outputs = model(**batch) + loss = outputs.loss + ... + + if completed_steps % eval_interval == 0: + eval_completed_steps = 0 + losses = [] + while eval_completed_steps < eval_iters: + model.eval() + with torch.no_grad(): + batch = next(eval_dataloader) if eval_dataloader is not None else {} + outputs = model(**batch) +``` + + +## Utility for Checkpoint reshaping and interoperability + +1. The scripts for these are present in 🤗 Transformers library under respective models. +Currently, it is available for GPT model [checkpoint_reshaping_and_interoperability.py](https://github.com/huggingface/transformers/blob/main/src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py) + +2. Below is an example of conversion of checkpoint from Megatron-LM to universal 🤗 Transformers sharded checkpoint. +```bash +python checkpoint_reshaping_and_interoperability.py \ +--convert_checkpoint_from_megatron_to_transformers \ +--load_path "gpt/iter_0005000" \ +--save_path "gpt/trfs_checkpoint" \ +--max_shard_size "200MB" \ +--tokenizer_name "gpt2" \ +--print-checkpoint-structure +``` + +3. Conversion of checkpoint from transformers to megatron with `tp_size=2`, `pp_size=2` and `dp_size=2`. +```bash +python checkpoint_utils/megatgron_gpt2/checkpoint_reshaping_and_interoperability.py \ +--load_path "gpt/trfs_checkpoint" \ +--save_path "gpt/megatron_lm_checkpoint" \ +--target_tensor_model_parallel_size 2 \ +--target_pipeline_model_parallel_size 2 \ +--target_data_parallel_size 2 \ +--target_params_dtype "bf16" \ +--make_vocab_size_divisible_by 128 \ +--use_distributed_optimizer \ +--print-checkpoint-structure +``` + +## Megatron-LM GPT models support returning logits and `megatron_generate` function for text generation + +1. Returning logits require setting `require_logits=True` in MegatronLMPlugin as shown below. +These would be available on the in the last stage of pipeline. +```python +megatron_lm_plugin = MegatronLMPlugin(return_logits=True) +``` + +2. `megatron_generate` method for Megatron-LM GPT model: This will use Tensor and Pipeline Parallelism to complete +generations for a batch of inputs when using greedy with/without top_k/top_p sampling and for individual prompt inputs when using beam search decoding. +Only a subset of features of transformers generate is supported. This will help in using large models via tensor and pipeline parallelism +for generation (already does key-value caching and uses fused kernels by default). +This requires data parallel size to be 1, sequence parallelism and activation checkpointing to be disabled. +It also requires specifying path to tokenizer's vocab file and merges file. +Below example shows how to configure and use `megatron_generate` method for Megatron-LM GPT model. +```python +# specifying tokenizer's vocab and merges file +vocab_file = os.path.join(args.resume_from_checkpoint, "vocab.json") +merge_file = os.path.join(args.resume_from_checkpoint, "merges.txt") +other_megatron_args = {"vocab_file": vocab_file, "merge_file": merge_file} +megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args) + +# inference using `megatron_generate` functionality +tokenizer.pad_token = tokenizer.eos_token +max_new_tokens = 64 +batch_texts = [ + "Are you human?", + "The purpose of life is", + "The arsenal was constructed at the request of", + "How are you doing these days?", +] +batch_encodings = tokenizer(batch_texts, return_tensors="pt", padding=True) + +# top-p sampling +generated_tokens = model.megatron_generate( + batch_encodings["input_ids"], + batch_encodings["attention_mask"], + max_new_tokens=max_new_tokens, + top_p=0.8, + top_p_decay=0.5, + temperature=0.9, +) +decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) +accelerator.print(decoded_preds) + +# top-k sampling +generated_tokens = model.megatron_generate( + batch_encodings["input_ids"], + batch_encodings["attention_mask"], + max_new_tokens=max_new_tokens, + top_k=50, + temperature=0.9, +) +decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) +accelerator.print(decoded_preds) + +# adding `bos` token at the start +generated_tokens = model.megatron_generate( + batch_encodings["input_ids"], batch_encodings["attention_mask"], max_new_tokens=max_new_tokens, add_BOS=True +) +decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) +accelerator.print(decoded_preds) + +# beam search => only takes single prompt +batch_texts = ["The purpose of life is"] +batch_encodings = tokenizer(batch_texts, return_tensors="pt", padding=True) +generated_tokens = model.megatron_generate( + batch_encodings["input_ids"], + batch_encodings["attention_mask"], + max_new_tokens=max_new_tokens, + num_beams=20, + length_penalty=1.5, +) +decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) +accelerator.print(decoded_preds) +``` + +3. An end-to-end example of using `megatron_generate` method for Megatron-LM GPT model is available at +[megatron_gpt2_generation.py](https://github.com/pacman100/accelerate-megatron-test/blob/main/src/inference/megatron_gpt2_generation.py) with +config file [megatron_lm_gpt_generate_config.yaml](https://github.com/pacman100/accelerate-megatron-test/blob/main/src/Configs/megatron_lm_gpt_generate_config.yaml). +The bash script with accelerate launch command is available at [megatron_lm_gpt_generate.sh](https://github.com/pacman100/accelerate-megatron-test/blob/main/megatron_lm_gpt_generate.sh). +The output logs of the script are available at [megatron_lm_gpt_generate.log](https://github.com/pacman100/accelerate-megatron-test/blob/main/output_logs/megatron_lm_gpt_generate.log). + +## Support for ROPE and ALiBi Positional embeddings and Multi-Query Attention + +1. For ROPE/ALiBi attention, pass `position_embedding_type` with `("absolute" | "rotary" | "alibi")` to `MegatronLMPlugin` as shown below. +```python +other_megatron_args = {"position_embedding_type": "alibi"} +megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args) +``` + +2. For Multi-Query Attention, pass `attention_head_type` with `("multihead" | "multiquery")` to `MegatronLMPlugin` as shown below. +```python +other_megatron_args = {"attention_head_type": "multiquery"} +megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args) +``` + +## Caveats + +1. Supports Transformers GPT2, Megatron-BERT and T5 models. +This covers Decoder only, Encode only and Encoder-Decoder model classes. + +2. Only loss is returned from model forward pass as +there is quite complex interplay of pipeline, tensor and data parallelsim behind the scenes. +The `model(**batch_data)` call return loss(es) averaged across the data parallel ranks. +This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and +you can easily compute the `perplexity` using the loss. +For GPT model, returning logits in addition to loss(es) is supported. +These logits aren't gathered across data prallel ranks. Use `accelerator.utils.gather_across_data_parallel_groups` +to gather logits across data parallel ranks. These logits along with labels can be used for computing various +performance metrics. + +3. The main process is the last rank as the losses/logits are available in the last stage of pipeline. +`accelerator.is_main_process` and `accelerator.is_local_main_process` return `True` for last rank when using +Megatron-LM integration. + +4. In `accelerator.prepare` call, a Megatron-LM model corresponding to a given Transformers model is created +with random weights. Please use `accelerator.load_state` to load the Megatron-LM checkpoint with matching TP, PP and DP partitions. + +5. Currently, checkpoint reshaping and interoperability support is only available for GPT. +Soon it will be extended to BERT and T5. + +6. `gradient_accumulation_steps` needs to be 1. When using Megatron-LM, micro batches in pipeline parallelism +setting is synonymous with gradient accumulation. + +7. When using Megatron-LM, use `accelerator.save_state` and `accelerator.load_state` for saving and loading checkpoints. + +8. Below are the mapping from Megatron-LM model architectures to the the equivalent 🤗 transformers model architectures. +Only these 🤗 transformers model architectures are supported. + +a. Megatron-LM [BertModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/bert_model.py) : +🤗 transformers models with `megatron-bert` in config's model type, e.g., +[MegatronBERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert) + +b. Megatron-LM [GPTModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py) : +🤗 transformers models with `gpt2` in config's model type, e.g., +[OpenAI GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2) + +c. Megatron-LM [T5Model](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/t5_model.py) : +🤗 transformers models with `t5` in config's model type, e.g., +[T5](https://huggingface.co/docs/transformers/model_doc/t5) and +[MT5](https://huggingface.co/docs/transformers/model_doc/mt5) \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/usage_guides/memory.mdx b/testbed/huggingface__accelerate/docs/source/usage_guides/memory.mdx new file mode 100644 index 0000000000000000000000000000000000000000..213a2f6707f6b6386e95d9e3cd098abf6ce76c21 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/usage_guides/memory.mdx @@ -0,0 +1,55 @@ + + +# Memory Utilities + +One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory", +as the entire script needs to be restarted, progress is lost, and typically a developer would want to simply +start their script and let it run. + +`Accelerate` provides a utility heavily based on [toma](https://github.com/BlackHC/toma) to give this capability. + +## find_executable_batch_size + +This algorithm operates with exponential decay, decreasing the batch size in half after each failed run on some +training script. To use it, restructure your training function to include an inner function that includes this wrapper, +and build your dataloaders inside it. At a minimum, this could look like 4 new lines of code. +> Note: The inner function *must* take in the batch size as the first parameter, but we do not pass one to it when called. The wrapper handles this for us + +It should also be noted that anything which will consume CUDA memory and passed to the `accelerator` **must** be declared inside the inner function, +such as models and optimizers. + +```diff +def training_function(args): + accelerator = Accelerator() + ++ @find_executable_batch_size(starting_batch_size=args.batch_size) ++ def inner_training_loop(batch_size): ++ nonlocal accelerator # Ensure they can be used in our context ++ accelerator.free_memory() # Free all lingering references + model = get_model() + model.to(accelerator.device) + optimizer = get_optimizer() + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) + lr_scheduler = get_scheduler( + optimizer, + num_training_steps=len(train_dataloader)*num_epochs + ) + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + train(model, optimizer, train_dataloader, lr_scheduler) + validate(model, eval_dataloader) ++ inner_training_loop() +``` + +To find out more, check the documentation [here](../package_reference/utilities#accelerate.find_executable_batch_size). diff --git a/testbed/huggingface__accelerate/docs/source/usage_guides/mps.mdx b/testbed/huggingface__accelerate/docs/source/usage_guides/mps.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7a7f8a6283e7ee5b9f9be278f3b8ab963430926b --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/usage_guides/mps.mdx @@ -0,0 +1,82 @@ + + +# Accelerated PyTorch Training on Mac + +With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. +This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac. +Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `"mps"` device. +This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS. +For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/) +and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html). + +### Benefits of Training and Inference using Apple Silicon Chips + +1. Enables users to train larger networks or batch sizes locally +2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. +Therefore, improving end-to-end performance. +3. Reduces costs associated with cloud-based development or the need for additional local GPUs. + +**Pre-requisites**: To install torch with mps support, +please follow this nice medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1). + + +## How it works out of the box + +On your machine(s) just run: + +```bash +accelerate config +``` + +and answer the questions asked, specifically choose `MPS` for the query: + +``` + Which type of machine are you using?. + ``` + +This will generate a config file that will be used automatically to properly set +the default options when doing `accelerate launch`, such as the one shown below: + +```bash +compute_environment: LOCAL_MACHINE +deepspeed_config: {} +distributed_type: MPS +downcast_bf16: 'no' +fsdp_config: {} +machine_rank: 0 +main_process_ip: null +main_process_port: null +main_training_function: main +mixed_precision: 'no' +num_machines: 1 +num_processes: 1 +use_cpu: false +``` + +After this configuration has been made, here is how you run the CV example +(from the root of the repo) with MPS enabled: + +```bash +accelerate launch /examples/cv_example.py --data_dir images +``` + +## A few caveats to be aware of + +1. We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing) on your MacOS machine. +It has major fixes related to model correctness and performance improvements for transformer based models. +Please refer to https://github.com/pytorch/pytorch/issues/82707 for more details. +2. Distributed setups `gloo` and `nccl` are not working with `mps` device. +This means that currently only single GPU of `mps` device type can be used. + +Finally, please, remember that, 🤗 `Accelerate` only integrates MPS backend, therefore if you +have any problems or questions with regards to MPS backend usage, please, file an issue with [PyTorch GitHub](https://github.com/pytorch/pytorch/issues). \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/usage_guides/sagemaker.mdx b/testbed/huggingface__accelerate/docs/source/usage_guides/sagemaker.mdx new file mode 100644 index 0000000000000000000000000000000000000000..0afe52e2921b3ddd86a3f48afcc0ce9d5f16d02a --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/usage_guides/sagemaker.mdx @@ -0,0 +1,169 @@ + + +# Amazon SageMaker + +Hugging Face and Amazon introduced new [Hugging Face Deep Learning Containers (DLCs)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) to +make it easier than ever to train Hugging Face Transformer models in [Amazon SageMaker](https://aws.amazon.com/sagemaker/). + +## Getting Started + +### Setup & Installation + + +Before you can run your 🤗 Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not +have an AWS account yet learn more [here](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html). + +After you have your AWS Account you need to install the `sagemaker` sdk for 🤗 Accelerate with: + +```bash +pip install "accelerate[sagemaker]" --upgrade +``` + +🤗 Accelerate currently uses the 🤗 DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. 🤗 +Accelerate is not in the DLC yet (will soon be added!) so to use it within Amazon SageMaker you need to create a +`requirements.txt` in the same directory where your training script is located and add it as dependency: + +``` +accelerate +``` + +You should also add any other dependencies you have to this `requirements.txt`. + + +### Configure 🤗 Accelerate + +You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with +the 🤗 Accelerate CLI: + +```bash +accelerate config +# In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1 +``` + +🤗 Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit. + + + + 🤗 Accelerate is not saving any of your credentials. + + + +### Prepare a 🤗 Accelerate fine-tuning script + +The training script is very similar to a training script you might run outside of SageMaker, but to save your model +after training you need to specify either `/opt/ml/model` or use `os.environ["SM_MODEL_DIR"]` as your save +directory. After training, artifacts in this directory are uploaded to S3: + + +```diff +- torch.save('/opt/ml/model`) ++ accelerator.save('/opt/ml/model') +``` + + + + SageMaker doesn’t support argparse actions. If you want to use, for example, boolean hyperparameters, you need to + specify type as bool in your script and provide an explicit True or False value for this hyperparameter. [[REF]](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script). + + + +### Launch Training + +You can launch your training with 🤗 Accelerate CLI with: + +``` +accelerate launch path_to_script.py --args_to_the_script +``` + +This will launch your training script using your configuration. The only thing you have to do is provide all the +arguments needed by your training script as named arguments. + +**Examples** + + + + If you run one of the example scripts, don't forget to add `accelerator.save('/opt/ml/model')` to it. + + + +```bash +accelerate launch ./examples/sagemaker_example.py +``` + +Outputs: + +``` +Configuring Amazon SageMaker environment +Converting Arguments to Hyperparameters +Creating Estimator +2021-04-08 11:56:50 Starting - Starting the training job... +2021-04-08 11:57:13 Starting - Launching requested ML instancesProfilerReport-1617883008: InProgress +......... +2021-04-08 11:58:54 Starting - Preparing the instances for training......... +2021-04-08 12:00:24 Downloading - Downloading input data +2021-04-08 12:00:24 Training - Downloading the training image.................. +2021-04-08 12:03:39 Training - Training image download completed. Training in progress.. +........ +epoch 0: {'accuracy': 0.7598039215686274, 'f1': 0.8178438661710037} +epoch 1: {'accuracy': 0.8357843137254902, 'f1': 0.882249560632689} +epoch 2: {'accuracy': 0.8406862745098039, 'f1': 0.8869565217391304} +........ +2021-04-08 12:05:40 Uploading - Uploading generated training model +2021-04-08 12:05:40 Completed - Training job completed +Training seconds: 331 +Billable seconds: 331 +You can find your model data at: s3://your-bucket/accelerate-sagemaker-1-2021-04-08-11-56-47-108/output/model.tar.gz +``` + +## Advanced Features + +### Distributed Training: Data Parallelism + +Set up the accelerate config by running `accelerate config` and answer the SageMaker questions and set it up. +To use SageMaker DDP, select it when asked +`What is the distributed mode? ([0] No distributed training, [1] data parallelism):`. +Example config below: +```yaml +base_job_name: accelerate-sagemaker-1 +compute_environment: AMAZON_SAGEMAKER +distributed_type: DATA_PARALLEL +ec2_instance_type: ml.p3.16xlarge +iam_role_name: xxxxx +image_uri: null +mixed_precision: fp16 +num_machines: 1 +profile: xxxxx +py_version: py38 +pytorch_version: 1.10.2 +region: us-east-1 +transformers_version: 4.17.0 +use_cpu: false +``` + +### Distributed Training: Model Parallelism + +*currently in development, will be supported soon.* + +### Python packages and dependencies + +🤗 Accelerate currently uses the 🤗 DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. If you +want to use different/other Python packages you can do this by adding them to the `requirements.txt`. These packages +will be installed before your training script is started. + +### Remote scripts: Use scripts located on Github + +*undecided if feature is needed. Contact us if you would like this feature.* + +### Use Spot Instances + +*undecided if feature is needed. Contact us if you would like this feature.* diff --git a/testbed/huggingface__accelerate/docs/source/usage_guides/tracking.mdx b/testbed/huggingface__accelerate/docs/source/usage_guides/tracking.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3ff40a4a44592a621f41cd82c163bb3ac557fa66 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/usage_guides/tracking.mdx @@ -0,0 +1,207 @@ + + +# Tracking + +There are a large number of experiment tracking API's available, however getting them all to work with in a multi-processing environment can oftentimes be complex. +🤗 Accelerate provides a general tracking API that can be used to log useful items during your script through [`Accelerator.log`] + +## Integrated Trackers + +Currently `Accelerate` supports four trackers out-of-the-box: + +- TensorBoard +- WandB +- CometML +- MLFlow + +To use any of them, pass in the selected type(s) to the `log_with` parameter in [`Accelerate`]: +```python +from accelerate import Accelerator +from accelerate.utils import LoggerType + +accelerator = Accelerator(log_with="all") # For all available trackers in the environment +accelerator = Accelerator(log_with="wandb") +accelerator = Accelerator(log_with=["wandb", LoggerType.TENSORBOARD]) +``` + +At the start of your experiment [`Accelerator.init_trackers`] should be used to setup your project, and potentially add any experiment hyperparameters to be logged: +```python +hps = {"num_iterations": 5, "learning_rate": 1e-2} +accelerator.init_trackers("my_project", config=hps) +``` + +When you are ready to log any data, [`Accelerator.log`] should be used. +A `step` can also be passed in to correlate the data with a particular step in the training loop. +```python +accelerator.log({"train_loss": 1.12, "valid_loss": 0.8}, step=1) +``` + +Once you've finished training, make sure to run [`Accelerator.end_training`] so that all the trackers can run their finish functionalities if they have any. +```python +accelerator.end_training() +``` + + +A full example is below: +```python +from accelerate import Accelerator + +accelerator = Accelerator(log_with="all") +config = { + "num_iterations": 5, + "learning_rate": 1e-2, + "loss_function": str(my_loss_function), +} + +accelerator.init_trackers("example_project", config=config) + +my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader) +device = accelerator.device +my_model.to(device) + +for iteration in config["num_iterations"]: + for step, batch in my_training_dataloader: + my_optimizer.zero_grad() + inputs, targets = batch + inputs = inputs.to(device) + targets = targets.to(device) + outputs = my_model(inputs) + loss = my_loss_function(outputs, targets) + accelerator.backward(loss) + my_optimizer.step() + accelerator.log({"training_loss": loss}, step=step) +accelerator.end_training() +``` + +If a tracker requires a directory to save data to such as `TensorBoard` then a `logging_dir` or `project_dir` can be passed in. `project_dir` is useful +if there are other further configurations such as those which can be combined with the [`~utils.ProjectConfiguration`] dataclass. + +```python +accelerator = Accelerator(log_with="tensorboard", logging_dir=".") +``` + +## Implementing Custom Trackers + +To implement a new tracker to be used in `Accelerator`, a new one can be made through implementing the [`GeneralTracker`] class. +Every tracker must implement three functions and have three properties: + - `__init__`: + - Should store a `run_name` and initialize the tracker API of the integrated library. + - If a tracker stores their data locally (such as TensorBoard), a `logging_dir` parameter can be added. + - `store_init_configuration`: + - Should take in a `values` dictionary and store them as a one-time experiment configuration + - `log`: + - Should take in a `values` dictionary and a `step`, and should log them to the run + + - `name` (`str`): + - A unique string name for the tracker, such as `"wandb"` for the wandb tracker. + - This will be used for interacting with this tracker specifically + - `requires_logging_directory` (`bool`): + - Whether a `logging_dir` is needed for this particular tracker and if it uses one. + - `tracker`: + - This should be implemented as a `@property` function + - Should return the internal tracking mechanism the library uses, such as the `run` object for `wandb`. + +A brief example can be seen below with an integration with Weights and Biases, containing only the relevant information: +```python +from accelerate.tracking import GeneralTracker +from typing import Optional + +import wandb + + +class MyCustomTracker(GeneralTracker): + name = "wandb" + requires_logging_directory = False + + def __init__(self, run_name: str): + self.run_name = run_name + run = wandb.init(self.run_name) + + @property + def tracker(self): + return self.run.run + + def store_init_configuration(self, values: dict): + wandb.config(values) + + def log(self, values: dict, step: Optional[int] = None): + wandb.log(values, step=step) +``` + +When you are ready to build your `Accelerator` object, pass in an **instance** of your tracker to [`Accelerator.log_with`] to have it automatically +be used with the API: + +```python +tracker = MyCustomTracker("some_run_name") +accelerator = Accelerator(log_with=tracker) +``` + +These also can be mixed with existing trackers, including with `"all"`: + +```python +tracker = MyCustomTracker("some_run_name") +accelerator = Accelerator(log_with=[tracker, "all"]) +``` + +## Accessing the internal tracker + +If some custom interactions with a tracker might be wanted directly, you can quickly access one using the +[`Accelerator.get_tracker`] method. Just pass in the string corresponding to a tracker's `.name` attribute +and it will return that tracker on the main process. + +This example shows doing so with wandb: + +```python +wandb_tracker = accelerator.get_tracker("wandb") +``` + +From there you can interact with `wandb`'s `run` object like normal: + + + Make sure to only interact with trackers on the main process! + + + +```python +if accelerator.is_main_process: + wandb_run.log_artifact(some_artifact_to_log) +``` + +## When a wrapper cannot work + +If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement: +```diff + from accelerate import Accelerator ++ import neptune.new as neptune + + accelerator = Accelerator() ++ run = neptune.init(...) + + my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader) + device = accelerator.device + my_model.to(device) + + for iteration in config["num_iterations"]: + for batch in my_training_dataloader: + my_optimizer.zero_grad() + inputs, targets = batch + inputs = inputs.to(device) + targets = targets.to(device) + outputs = my_model(inputs) + loss = my_loss_function(outputs, targets) + total_loss += loss + accelerator.backward(loss) + my_optimizer.step() ++ if accelerator.is_main_process: ++ run["logs/training/batch/loss"].log(loss) +``` diff --git a/testbed/huggingface__accelerate/docs/source/usage_guides/training_zoo.mdx b/testbed/huggingface__accelerate/docs/source/usage_guides/training_zoo.mdx new file mode 100644 index 0000000000000000000000000000000000000000..176d97d2b8f10d7deb5264d1404db2d698ac30cb --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/usage_guides/training_zoo.mdx @@ -0,0 +1,117 @@ + + +# Example Zoo + +Below contains a non-exhuastive list of tutorials and scripts showcasing Accelerate + +## Official Accelerate Examples: + +### Basic Examples + +These examples showcase the base features of Accelerate and are a great starting point + +- [Barebones NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py) +- [Barebones distributed NLP example in a Jupyter Notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb) +- [Barebones computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/cv_example.py) +- [Barebones distributed computer vision example in a Jupyter Notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_cv_example.ipynb) +- [Using Accelerate in Kaggle](https://www.kaggle.com/code/muellerzr/multi-gpu-and-accelerate) + +### Feature Specific Examples + +These examples showcase specific features that the Accelerate framework offers + +- [Automatic memory-aware gradient accumulation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/automatic_gradient_accumulation.py) +- [Checkpointing states](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/checkpointing.py) +- [Cross validation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/cross_validation.py) +- [DeepSpeed](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/deepspeed_with_config_support.py) +- [Fully Sharded Data Parallelism](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/fsdp_with_peak_mem_tracking.py) +- [Gradient accumulation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/gradient_accumulation.py) +- [Memory-aware batch size finder](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/memory.py) +- [Metric Computation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/multi_process_metrics.py) +- [Using Trackers](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/tracking.py) +- [Using Megatron-LM](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/megatron_lm_gpt_pretraining.py) + +### Full Examples + +These examples showcase every feature in Accelerate at once that was shown in "Feature Specific Examples" + +- [Complete NLP example](https://github.com/huggingface/accelerate/blob/main/examples/complete_nlp_example.py) +- [Complete computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/complete_cv_example.py) +- [Causal language model fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_clm_no_trainer.py) +- [Masked language model fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_no_trainer.py) +- [Speech pretraining example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py) +- [Translation fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/translation/run_translation_no_trainer.py) +- [Text classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue_no_trainer.py) +- [Semantic segmentation fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py) +- [Question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_no_trainer.py) +- [Beam search question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py) +- [Multiple choice question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/multiple-choice/run_swag_no_trainer.py) +- [Named entity recognition fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/token-classification/run_ner_no_trainer.py) +- [Image classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/image-classification/run_image_classification_no_trainer.py) +- [Summarization fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/summarization/run_summarization_no_trainer.py) +- [End-to-end examples on how to use AWS SageMaker integration of Accelerate](https://github.com/huggingface/notebooks/blob/main/sagemaker/22_accelerate_sagemaker_examples/README.md) +- [Megatron-LM examples for various NLp tasks](https://github.com/pacman100/accelerate-megatron-test) + +## Integration Examples + +These are tutorials from libraries that integrate with 🤗 Accelerate: + +### Catalyst + +- [Distributed training tutorial with Catalyst](https://catalyst-team.github.io/catalyst/tutorials/ddp.html) + +### DALLE2-pytorch + +- [Fine-tuning DALLE2](https://github.com/lucidrains/DALLE2-pytorch#usage) + +### 🤗 diffusers + +- [Performing textual inversion with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) +- [Training DreamBooth with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) + +### fastai + +- [Distributed training from Jupyter Notebooks with fastai](https://docs.fast.ai/tutorial.distributed.html) +- [Basic distributed training examples with fastai](https://docs.fast.ai/examples/distributed_app_examples.html) + +### GradsFlow + +- [Auto Image Classification with GradsFlow](https://docs.gradsflow.com/en/latest/examples/nbs/01-ImageClassification/) + +### imagen-pytorch + +- [Fine-tuning Imagen](https://github.com/lucidrains/imagen-pytorch#usage) + +### Kornia + +- [Fine-tuning vision models with Kornia's Trainer](https://kornia.readthedocs.io/en/latest/get-started/training.html) + +### PyTorch Accelerated + +- [Quickstart distributed training tutorial with PyTorch Accelerated](https://pytorch-accelerated.readthedocs.io/en/latest/quickstart.html) + +### PyTorch3D + +- [Perform Deep Learning with 3D data](https://pytorch3d.org/tutorials/) + +### Stable-Dreamfusion + +- [Training with Stable-Dreamfusion to convert text to a 3D model](https://colab.research.google.com/drive/1MXT3yfOFvO0ooKEfiUUvTKwUkrrlCHpF?usp=sharing) + +### Tez + +- [Leaf disease detection with Tez and Accelerate](https://www.kaggle.com/code/abhishek/tez-faster-and-easier-training-for-leaf-detection/notebook) + +### trlx + +- [How to implement a sentiment learning task with trlx](https://github.com/CarperAI/trlx#example-how-to-add-a-task) \ No newline at end of file diff --git a/testbed/huggingface__accelerate/examples/README.md b/testbed/huggingface__accelerate/examples/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6a3c0a11a54a46c5179308d870948f0869e36ace --- /dev/null +++ b/testbed/huggingface__accelerate/examples/README.md @@ -0,0 +1,212 @@ + + +# In this folder we showcase various full examples using 🤗 Accelerate + +## Simple NLP example + +The [nlp_example.py](./nlp_example.py) script is a simple example to train a Bert model on a classification task ([GLUE's MRPC](https://www.microsoft.com/en-us/download/details.aspx?id=52398)). + +Prior to running it you should install 🤗 Dataset and 🤗 Transformers: + +```bash +pip install datasets evaluate transformers +``` + +The same script can be run in any of the following configurations: +- single CPU or single GPU +- multi GPUs (using PyTorch distributed mode) +- (multi) TPUs +- fp16 (mixed-precision) or fp32 (normal precision) + +To run it in each of these various modes, use the following commands: +- single CPU: + * from a server without GPU + ```bash + python ./nlp_example.py + ``` + * from any server by passing `cpu=True` to the `Accelerator`. + ```bash + python ./nlp_example.py --cpu + ``` + * from any server with Accelerate launcher + ```bash + accelerate launch --cpu ./nlp_example.py + ``` +- single GPU: + ```bash + python ./nlp_example.py # from a server with a GPU + ``` +- with fp16 (mixed-precision) + * from any server by passing `fp16=True` to the `Accelerator`. + ```bash + python ./nlp_example.py --fp16 + ``` + * from any server with Accelerate launcher + ```bash + accelerate launch --fp16 ./nlp_example.py +- multi GPUs (using PyTorch distributed mode) + * With Accelerate config and launcher + ```bash + accelerate config # This will create a config file on your server + accelerate launch ./nlp_example.py # This will run the script on your server + ``` + * With traditional PyTorch launcher + ```bash + python -m torch.distributed.launch --nproc_per_node 2 --use_env ./nlp_example.py + ``` +- multi GPUs, multi node (several machines, using PyTorch distributed mode) + * With Accelerate config and launcher, on each machine: + ```bash + accelerate config # This will create a config file on each server + accelerate launch ./nlp_example.py # This will run the script on each server + ``` + * With PyTorch launcher only + ```bash + python -m torch.distributed.launch --nproc_per_node 2 \ + --use_env \ + --node_rank 0 \ + --master_addr master_node_ip_address \ + ./nlp_example.py # On the first server + python -m torch.distributed.launch --nproc_per_node 2 \ + --use_env \ + --node_rank 1 \ + --master_addr master_node_ip_address \ + ./nlp_example.py # On the second server + ``` +- (multi) TPUs + * With Accelerate config and launcher + ```bash + accelerate config # This will create a config file on your TPU server + accelerate launch ./nlp_example.py # This will run the script on each server + ``` + * In PyTorch: + Add an `xmp.spawn` line in your script as you usually do. + + +## Simple vision example + +The [cv_example.py](./cv_example.py) script is a simple example to fine-tune a ResNet-50 on a classification task ([Ofxord-IIT Pet Dataset](https://www.robots.ox.ac.uk/~vgg/data/pets/)). + +The same script can be run in any of the following configurations: +- single CPU or single GPU +- multi GPUs (using PyTorch distributed mode) +- (multi) TPUs +- fp16 (mixed-precision) or fp32 (normal precision) + +Prior to running it you should install timm and torchvision: + +```bash +pip install timm torchvision +``` + +and you should download the data with the following commands: + +```bash +wget https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz +tar -xzf images.tar.gz +``` + +To run it in each of these various modes, use the following commands: +- single CPU: + * from a server without GPU + ```bash + python ./cv_example.py --data_dir path_to_data + ``` + * from any server by passing `cpu=True` to the `Accelerator`. + ```bash + python ./cv_example.py --data_dir path_to_data --cpu + ``` + * from any server with Accelerate launcher + ```bash + accelerate launch --cpu ./cv_example.py --data_dir path_to_data + ``` +- single GPU: + ```bash + python ./cv_example.py # from a server with a GPU + ``` +- with fp16 (mixed-precision) + * from any server by passing `fp16=True` to the `Accelerator`. + ```bash + python ./cv_example.py --data_dir path_to_data --fp16 + ``` + * from any server with Accelerate launcher + ```bash + accelerate launch --fp16 ./cv_example.py --data_dir path_to_data +- multi GPUs (using PyTorch distributed mode) + * With Accelerate config and launcher + ```bash + accelerate config # This will create a config file on your server + accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on your server + ``` + * With traditional PyTorch launcher + ```bash + python -m torch.distributed.launch --nproc_per_node 2 --use_env ./cv_example.py --data_dir path_to_data + ``` +- multi GPUs, multi node (several machines, using PyTorch distributed mode) + * With Accelerate config and launcher, on each machine: + ```bash + accelerate config # This will create a config file on each server + accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server + ``` + * With PyTorch launcher only + ```bash + python -m torch.distributed.launch --nproc_per_node 2 \ + --use_env \ + --node_rank 0 \ + --master_addr master_node_ip_address \ + ./cv_example.py --data_dir path_to_data # On the first server + python -m torch.distributed.launch --nproc_per_node 2 \ + --use_env \ + --node_rank 1 \ + --master_addr master_node_ip_address \ + ./cv_example.py --data_dir path_to_data # On the second server + ``` +- (multi) TPUs + * With Accelerate config and launcher + ```bash + accelerate config # This will create a config file on your TPU server + accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server + ``` + * In PyTorch: + Add an `xmp.spawn` line in your script as you usually do. + +### Simple vision example (GANs) + +- [huggan project](https://github.com/huggingface/community-events/tree/main/huggan) + +### Using AWS SageMaker integration +- [Examples showcasing AWS SageMaker integration of 🤗 Accelerate.](https://github.com/pacman100/accelerate-aws-sagemaker) + +## Finer Examples + +While the first two scripts are extremely barebones when it comes to what you can do with accelerate, more advanced features are documented in two other locations. + +### `by_feature` examples + +These scripts are *individual* examples highlighting one particular feature or use-case within Accelerate. They all stem from the [nlp_example.py](./nlp_example.py) script, and any changes or modifications is denoted with a `# New Code #` comment. + +Read the README.md file located in the `by_feature` folder for more information. + +### `complete_*` examples + +These two scripts contain *every* single feature currently available in Accelerate in one place, as one giant script. + +New arguments that can be passed include: + +- `checkpointing_steps`, whether the various states should be saved at the end of every `n` steps, or `"epoch"` for each epoch. States are then saved to folders named `step_{n}` or `epoch_{n}` +- `resume_from_checkpoint`, should be used if you want to resume training off of a previous call to the script and passed a `checkpointing_steps` to it. +- `with_tracking`, should be used if you want to log the training run using all available experiment trackers in your environment. Currently supported trackers include TensorBoard, Weights and Biases, and CometML. diff --git a/testbed/huggingface__accelerate/examples/by_feature/README.md b/testbed/huggingface__accelerate/examples/by_feature/README.md new file mode 100644 index 0000000000000000000000000000000000000000..689127a356c02e530b00d473e1c4c1c6db1924eb --- /dev/null +++ b/testbed/huggingface__accelerate/examples/by_feature/README.md @@ -0,0 +1,80 @@ +# What are these scripts? + +All scripts in this folder originate from the `nlp_example.py` file, as it is a very simplistic NLP training example using Accelerate with zero extra features. + +From there, each further script adds in just **one** feature of Accelerate, showing how you can quickly modify your own scripts to implement these capabilities. + +A full example with all of these parts integrated together can be found in the `complete_nlp_example.py` script and `complete_cv_example.py` script. + +Adjustments to each script from the base `nlp_example.py` file can be found quickly by searching for "# New Code #" + +## Example Scripts by Feature and their Arguments + +### Base Example (`../nlp_example.py`) + +- Shows how to use `Accelerator` in an extremely simplistic PyTorch training loop +- Arguments available: + - `mixed_precision`, whether to use mixed precision. ("no", "fp16", or "bf16") + - `cpu`, whether to train using only the CPU. (yes/no/1/0) + +All following scripts also accept these arguments in addition to their added ones. + +These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as: + +```bash +accelerate launch ../nlp_example.py --mixed_precision fp16 --cpu 0 +``` + +### Checkpointing and Resuming Training (`checkpointing.py`) + +- Shows how to use `Accelerator.save_state` and `Accelerator.load_state` to save or continue training +- **It is assumed you are continuing off the same training script** +- Arguments available: + - `checkpointing_steps`, after how many steps the various states should be saved. ("epoch", 1, 2, ...) + - `output_dir`, where saved state folders should be saved to, default is current working directory + - `resume_from_checkpoint`, what checkpoint folder to resume from. ("epoch_0", "step_22", ...) + +These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as: + +(Note, `resume_from_checkpoint` assumes that we've ran the script for one epoch with the `--checkpointing_steps epoch` flag) + +```bash +accelerate launch ./checkpointing.py --checkpointing_steps epoch output_dir "checkpointing_tutorial" --resume_from_checkpoint "checkpointing_tutorial/epoch_0" +``` + +### Cross Validation (`cross_validation.py`) + +- Shows how to use `Accelerator.free_memory` and run cross validation efficiently with `datasets`. +- Arguments available: + - `num_folds`, the number of folds the training dataset should be split into. + +These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as: + +```bash +accelerate launch ./cross_validation.py --num_folds 2 +``` + +### Experiment Tracking (`tracking.py`) + +- Shows how to use `Accelerate.init_trackers` and `Accelerator.log` +- Can be used with Weights and Biases, TensorBoard, or CometML. +- Arguments available: + - `with_tracking`, whether to load in all available experiment trackers from the environment. + +These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as: + +```bash +accelerate launch ./tracking.py --with_tracking +``` + +### Gradient Accumulation (`gradient_accumulation.py`) + +- Shows how to use `Accelerator.no_sync` to prevent gradient averaging in a distributed setup. +- Arguments available: + - `gradient_accumulation_steps`, the number of steps to perform before the gradients are accumulated and the optimizer and scheduler are stepped + zero_grad + +These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.launch`), such as: + +```bash +accelerate launch ./gradient_accumulation.py --gradient_accumulation_steps 5 +``` \ No newline at end of file diff --git a/testbed/huggingface__accelerate/examples/by_feature/automatic_gradient_accumulation.py b/testbed/huggingface__accelerate/examples/by_feature/automatic_gradient_accumulation.py new file mode 100644 index 0000000000000000000000000000000000000000..d6e0cf02839896daf9fc9376bddf9521447dd55f --- /dev/null +++ b/testbed/huggingface__accelerate/examples/by_feature/automatic_gradient_accumulation.py @@ -0,0 +1,232 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os + +import torch +from torch.optim import AdamW +from torch.utils.data import DataLoader + +# New Code # +import evaluate +from accelerate import Accelerator, DistributedType +from accelerate.utils import find_executable_batch_size +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + + +######################################################################## +# This is a fully working simple example to use Accelerate, +# specifically showcasing how to combine both the gradient accumulation +# and automatic batch size finder utilities of Accelerate to perfrom +# automatic gradient accumulation +# +# This example trains a Bert base model on GLUE MRPC +# in any of the following settings (with the same script): +# - single CPU or single GPU +# - multi GPUS (using PyTorch distributed mode) +# - (multi) TPUs +# - fp16 (mixed-precision) or fp32 (normal precision) +# +# New additions from the base script can be found quickly by +# looking for the # New Code # tags +# +# To run it in each of these various modes, follow the instructions +# in the readme for examples: +# https://github.com/huggingface/accelerate/tree/main/examples +# +######################################################################## + +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): + """ + Creates a set of `DataLoader`s for the `glue` dataset, + using "bert-base-cased" as the tokenizer. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + """ + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + # starting with the main process first: + with accelerator.main_process_first(): + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +# For testing only +if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + from accelerate.test_utils.training import mocked_dataloaders + + get_dataloaders = mocked_dataloaders # noqa: F811 + + +def training_function(config, args): + # For testing only + if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + config["num_epochs"] = 2 + # Initialize accelerator + accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + observed_batch_size = int(config["batch_size"]) + + metric = evaluate.load("glue", "mrpc") + + # New Code # + # We use the `find_executable_batch_size` decorator, passing in the desired observed batch size + # to train on. If a CUDA OOM error occurs, it will retry this loop cutting the batch size in + # half each time. From this, we can calculate the number of gradient accumulation steps needed + # and modify the Accelerator object as a result + @find_executable_batch_size(starting_batch_size=int(observed_batch_size)) + def inner_training_loop(batch_size): + # Since we need to modify the outside accelerator object, we need to bring it + # to the local scope + nonlocal accelerator + + # We can calculate the number of gradient accumulation steps based on the current + # batch size vs the starting batch size + num_gradient_accumulation_steps = observed_batch_size // batch_size + + # And then set it in the Accelerator directly: + accelerator.gradient_accumulation_steps = num_gradient_accumulation_steps + + # Next we need to free all of the stored model references in the Accelerator each time + accelerator.free_memory() + + # And set the seed so our results are reproducable each reset + set_seed(seed) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) + + # We could avoid this line since the accelerator is set with `device_placement=True` (default value). + # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer + # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). + model = model.to(accelerator.device) + + # Instantiate optimizer + optimizer = AdamW(params=model.parameters(), lr=lr) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) + + # Instantiate scheduler + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=100, + num_training_steps=(len(train_dataloader) * num_epochs), + ) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # Now we train the model + for epoch in range(num_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + # And perform gradient accumulation + with accelerator.accumulate(model): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + outputs = model(**batch) + loss = outputs.loss + accelerator.backward(loss) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + model.eval() + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + + # New Code # + # And call it at the end with no arguments + # Note: You could also refactor this outside of your training loop function + inner_training_loop() + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script.") + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU.", + ) + parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") + args = parser.parse_args() + # New Code # + # We modify the starting batch size to be an observed batch size of 256, to guarentee an initial CUDA OOM + config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 256} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/by_feature/checkpointing.py b/testbed/huggingface__accelerate/examples/by_feature/checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..bffd843d8557771f659fe7028c2ebff75e6dfd59 --- /dev/null +++ b/testbed/huggingface__accelerate/examples/by_feature/checkpointing.py @@ -0,0 +1,303 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os + +import torch +from torch.optim import AdamW +from torch.utils.data import DataLoader + +import evaluate +from accelerate import Accelerator, DistributedType +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + + +######################################################################## +# This is a fully working simple example to use Accelerate, +# specifically showcasing the checkpointing capability, +# and builds off the `nlp_example.py` script. +# +# This example trains a Bert base model on GLUE MRPC +# in any of the following settings (with the same script): +# - single CPU or single GPU +# - multi GPUS (using PyTorch distributed mode) +# - (multi) TPUs +# - fp16 (mixed-precision) or fp32 (normal precision) +# +# To help focus on the differences in the code, building `DataLoaders` +# was refactored into its own function. +# New additions from the base script can be found quickly by +# looking for the # New Code # tags +# +# To run it in each of these various modes, follow the instructions +# in the readme for examples: +# https://github.com/huggingface/accelerate/tree/main/examples +# +######################################################################## + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): + """ + Creates a set of `DataLoader`s for the `glue` dataset, + using "bert-base-cased" as the tokenizer. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + """ + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + # starting with the main process first: + with accelerator.main_process_first(): + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +# For testing only +if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + from accelerate.test_utils.training import mocked_dataloaders + + get_dataloaders = mocked_dataloaders # noqa: F811 + + +def training_function(config, args): + # For testing only + if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + config["num_epochs"] = 2 + # Initialize accelerator + accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + + # New Code # + # Parse out whether we are saving every epoch or after a certain number of batches + if hasattr(args.checkpointing_steps, "isdigit"): + if args.checkpointing_steps == "epoch": + checkpointing_steps = args.checkpointing_steps + elif args.checkpointing_steps.isdigit(): + checkpointing_steps = int(args.checkpointing_steps) + else: + raise ValueError( + f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." + ) + else: + checkpointing_steps = None + + set_seed(seed) + + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) + metric = evaluate.load("glue", "mrpc") + + # If the batch size is too big we use gradient accumulation + gradient_accumulation_steps = 1 + if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: + gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE + batch_size = MAX_GPU_BATCH_SIZE + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) + + # We could avoid this line since the accelerator is set with `device_placement=True` (default value). + # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer + # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). + model = model.to(accelerator.device) + + # Instantiate optimizer + optimizer = AdamW(params=model.parameters(), lr=lr) + + # Instantiate scheduler + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=100, + num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, + ) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # New Code # + # We need to keep track of how many total steps we have iterated over + overall_step = 0 + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + + # We need to load the checkpoint back in before training here with `load_state` + # The total number of epochs is adjusted based on where the state is being loaded from, + # as we assume continuation of the same training script + if args.resume_from_checkpoint: + if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": + accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") + accelerator.load_state(args.resume_from_checkpoint) + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] + dirs.sort(key=os.path.getctime) + path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last + # Extract `epoch_{i}` or `step_{i}` + training_difference = os.path.splitext(path)[0] + + if "epoch" in training_difference: + starting_epoch = int(training_difference.replace("epoch_", "")) + 1 + resume_step = None + else: + resume_step = int(training_difference.replace("step_", "")) + starting_epoch = resume_step // len(train_dataloader) + resume_step -= starting_epoch * len(train_dataloader) + + # Now we train the model + for epoch in range(starting_epoch, num_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + # New Code # + # We need to skip steps until we reach the resumed step during the first epoch + if args.resume_from_checkpoint and epoch == starting_epoch: + if resume_step is not None and step < resume_step: + overall_step += 1 + continue + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + # New Code # + overall_step += 1 + + # New Code # + # We save the model, optimizer, lr_scheduler, and seed states by calling `save_state` + # These are saved to folders named `step_{overall_step}` + # Will contain files: "pytorch_model.bin", "optimizer.bin", "scheduler.bin", and "random_states.pkl" + # If mixed precision was used, will also save a "scalar.bin" file + if isinstance(checkpointing_steps, int): + output_dir = f"step_{overall_step}" + if overall_step % checkpointing_steps == 0: + if args.output_dir is not None: + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + + model.eval() + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True` (the default). + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + + # New Code # + # We save the model, optimizer, lr_scheduler, and seed states by calling `save_state` + # These are saved to folders named `epoch_{epoch}` + # Will contain files: "pytorch_model.bin", "optimizer.bin", "scheduler.bin", and "random_states.pkl" + # If mixed precision was used, will also save a "scalar.bin" file + if checkpointing_steps == "epoch": + output_dir = f"epoch_{epoch}" + if args.output_dir is not None: + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script.") + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU.", + ) + parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") + parser.add_argument( + "--checkpointing_steps", + type=str, + default=None, + help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help="If the training should continue from a checkpoint folder.", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/by_feature/cross_validation.py b/testbed/huggingface__accelerate/examples/by_feature/cross_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..87f804ce6c58643e7b5e7789f8e5ada6c67825ef --- /dev/null +++ b/testbed/huggingface__accelerate/examples/by_feature/cross_validation.py @@ -0,0 +1,268 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +from typing import List + +import numpy as np +import torch +from torch.optim import AdamW +from torch.utils.data import DataLoader + +import evaluate +from accelerate import Accelerator, DistributedType +from datasets import DatasetDict, load_dataset + +# New Code # +# We'll be using StratifiedKFold for this example +from sklearn.model_selection import StratifiedKFold +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + + +######################################################################## +# This is a fully working simple example to use Accelerate, +# specifically showcasing how to perform Cross Validation, +# and builds off the `nlp_example.py` script. +# +# This example trains a Bert base model on GLUE MRPC +# in any of the following settings (with the same script): +# - single CPU or single GPU +# - multi GPUS (using PyTorch distributed mode) +# - (multi) TPUs +# - fp16 (mixed-precision) or fp32 (normal precision) +# +# To help focus on the differences in the code, building `DataLoaders` +# was refactored into its own function. +# New additions from the base script can be found quickly by +# looking for the # New Code # tags +# +# To run it in each of these various modes, follow the instructions +# in the readme for examples: +# https://github.com/huggingface/accelerate/tree/main/examples +# +######################################################################## + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + +# New Code # +# We need a different `get_dataloaders` function that will build dataloaders by index + + +def get_fold_dataloaders( + accelerator: Accelerator, dataset: DatasetDict, train_idxs: List[int], valid_idxs: List[int], batch_size: int = 16 +): + """ + Gets a set of train, valid, and test dataloaders for a particular fold + + Args: + accelerator (`Accelerator`): + The main `Accelerator` object + train_idxs (list of `int`): + The split indices for the training dataset + valid_idxs (list of `int`): + The split indices for the validation dataset + batch_size (`int`): + The size of the minibatch. Default is 16 + """ + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + datasets = DatasetDict( + { + "train": dataset["train"].select(train_idxs), + "validation": dataset["train"].select(valid_idxs), + "test": dataset["validation"], + } + ) + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + # starting with the main process first: + with accelerator.main_process_first(): + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + test_dataloader = DataLoader( + tokenized_datasets["test"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader, test_dataloader + + +def training_function(config, args): + # New Code # + test_predictions = [] + # Download the dataset + datasets = load_dataset("glue", "mrpc") + # Create our splits + kfold = StratifiedKFold(n_splits=int(args.num_folds)) + # Initialize accelerator + accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + + metric = evaluate.load("glue", "mrpc") + + # If the batch size is too big we use gradient accumulation + gradient_accumulation_steps = 1 + if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: + gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE + batch_size = MAX_GPU_BATCH_SIZE + + set_seed(seed) + + # New Code # + # Create our folds: + folds = kfold.split(np.zeros(datasets["train"].num_rows), datasets["train"]["label"]) + test_references = [] + # Iterate over them + for i, (train_idxs, valid_idxs) in enumerate(folds): + train_dataloader, eval_dataloader, test_dataloader = get_fold_dataloaders( + accelerator, + datasets, + train_idxs, + valid_idxs, + ) + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) + + # We could avoid this line since the accelerator is set with `device_placement=True` (default value). + # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer + # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). + model = model.to(accelerator.device) + + # Instantiate optimizer + optimizer = AdamW(params=model.parameters(), lr=lr) + + # Instantiate scheduler + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=100, + num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, + ) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # Now we train the model + for epoch in range(num_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + model.eval() + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + + # New Code # + # We also run predictions on the test set at the very end + fold_predictions = [] + for step, batch in enumerate(test_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits + predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) + fold_predictions.append(predictions.cpu()) + if i == 0: + # We need all of the test predictions + test_references.append(references.cpu()) + # Use accelerator.print to print only on the main process. + test_predictions.append(torch.cat(fold_predictions, dim=0)) + # We now need to release all our memory and get rid of the current model, optimizer, etc + accelerator.free_memory() + # New Code # + # Finally we check the accuracy of our folded results: + test_references = torch.cat(test_references, dim=0) + preds = torch.stack(test_predictions, dim=0).sum(dim=0).div(int(args.num_folds)).argmax(dim=-1) + test_metric = metric.compute(predictions=preds, references=test_references) + accelerator.print("Average test metrics from all folds:", test_metric) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script.") + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU.", + ) + parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") + # New Code # + parser.add_argument("--num_folds", type=int, default=3, help="The number of splits to perform across the dataset") + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/by_feature/deepspeed_with_config_support.py b/testbed/huggingface__accelerate/examples/by_feature/deepspeed_with_config_support.py new file mode 100644 index 0000000000000000000000000000000000000000..6cfee9ddae4f624671f884c2a582165e3f7effbb --- /dev/null +++ b/testbed/huggingface__accelerate/examples/by_feature/deepspeed_with_config_support.py @@ -0,0 +1,733 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) +on a text file or a dataset without using HuggingFace Trainer. + +Here is the full list of checkpoints on the hub that can be fine-tuned by this script: +https://huggingface.co/models?filter=text-generation +""" +# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. + +import argparse +import json +import logging +import math +import os +import random +from itertools import chain +from pathlib import Path + +import torch +from torch.utils.data import DataLoader + +import datasets +import transformers +from accelerate import Accelerator, DistributedType +from accelerate.logging import get_logger +from accelerate.utils import DummyOptim, DummyScheduler, set_seed +from datasets import load_dataset +from huggingface_hub import Repository +from tqdm.auto import tqdm +from transformers import ( + CONFIG_MAPPING, + MODEL_MAPPING, + AutoConfig, + AutoModelForCausalLM, + AutoTokenizer, + SchedulerType, + default_data_collator, + get_scheduler, +) +from transformers.utils import get_full_repo_name +from transformers.utils.versions import require_version + + +logger = get_logger(__name__) + +require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") + +MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) +MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) + + +def parse_args(): + parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task") + parser.add_argument( + "--dataset_name", + type=str, + default=None, + help="The name of the dataset to use (via the datasets library).", + ) + parser.add_argument( + "--dataset_config_name", + type=str, + default=None, + help="The configuration name of the dataset to use (via the datasets library).", + ) + parser.add_argument( + "--train_file", type=str, default=None, help="A csv or a json file containing the training data." + ) + parser.add_argument( + "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." + ) + parser.add_argument( + "--validation_split_percentage", + default=5, + help="The percentage of the train set used as validation set in case there's no validation split", + ) + parser.add_argument( + "--model_name_or_path", + type=str, + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--config_name", + type=str, + default=None, + help="Pretrained config name or path if not the same as model_name", + ) + parser.add_argument( + "--tokenizer_name", + type=str, + default=None, + help="Pretrained tokenizer name or path if not the same as model_name", + ) + parser.add_argument( + "--use_slow_tokenizer", + action="store_true", + help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", + ) + parser.add_argument( + "--per_device_train_batch_size", + type=int, + default=8, + help="Batch size (per device) for the training dataloader.", + ) + parser.add_argument( + "--per_device_eval_batch_size", + type=int, + default=8, + help="Batch size (per device) for the evaluation dataloader.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=5e-5, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") + parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--lr_scheduler_type", + type=SchedulerType, + default="linear", + help="The scheduler type to use.", + choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], + ) + parser.add_argument( + "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") + parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + parser.add_argument( + "--model_type", + type=str, + default=None, + help="Model type to use if training from scratch.", + choices=MODEL_TYPES, + ) + parser.add_argument( + "--block_size", + type=int, + default=None, + help=( + "Optional input sequence length after tokenization. The training dataset will be truncated in block of" + " this size for training. Default to the model max input length for single sentence inputs (take into" + " account special tokens)." + ), + ) + parser.add_argument( + "--preprocessing_num_workers", + type=int, + default=None, + help="The number of processes to use for the preprocessing.", + ) + parser.add_argument( + "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets" + ) + parser.add_argument( + "--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files." + ) + parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") + parser.add_argument( + "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." + ) + parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") + parser.add_argument( + "--checkpointing_steps", + type=str, + default=None, + help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help="If the training should continue from a checkpoint folder.", + ) + # New Code # + # Whether to load the best model at the end of training + parser.add_argument( + "--load_best_model", + action="store_true", + help="Whether to load the best model at the end of training", + ) + parser.add_argument( + "--with_tracking", + action="store_true", + help="Whether to enable experiment trackers for logging.", + ) + parser.add_argument( + "--report_to", + type=str, + default="all", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' + ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' + "Only applicable when `--with_tracking` is passed." + ), + ) + args = parser.parse_args() + + # Sanity checks + if args.dataset_name is None and args.train_file is None and args.validation_file is None: + raise ValueError("Need either a dataset name or a training/validation file.") + else: + if args.train_file is not None: + extension = args.train_file.split(".")[-1] + assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file." + if args.validation_file is not None: + extension = args.validation_file.split(".")[-1] + assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file." + + if args.push_to_hub: + assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." + + return args + + +# New Code # +def checkpoint_model(checkpoint_folder, ckpt_id, model, epoch, last_global_step, **kwargs): + """Utility function for checkpointing model + optimizer dictionaries + The main purpose for this is to be able to resume training from that instant again + """ + checkpoint_state_dict = { + "epoch": epoch, + "last_global_step": last_global_step, + } + # Add extra kwargs too + checkpoint_state_dict.update(kwargs) + + success = model.save_checkpoint(checkpoint_folder, ckpt_id, checkpoint_state_dict) + status_msg = f"checkpointing: checkpoint_folder={checkpoint_folder}, ckpt_id={ckpt_id}" + if success: + logging.info(f"Success {status_msg}") + else: + logging.warning(f"Failure {status_msg}") + return + + +# New Code # +def load_training_checkpoint(model, load_dir, tag=None, **kwargs): + """Utility function for checkpointing model + optimizer dictionaries + The main purpose for this is to be able to resume training from that instant again + """ + _, checkpoint_state_dict = model.load_checkpoint(load_dir, tag=tag, **kwargs) + epoch = checkpoint_state_dict["epoch"] + last_global_step = checkpoint_state_dict["last_global_step"] + del checkpoint_state_dict + return (epoch, last_global_step) + + +# New Code # +def evaluate(args, model, eval_dataloader, accelerator, eval_dataset): + model.eval() + losses = [] + for step, batch in enumerate(eval_dataloader): + with torch.no_grad(): + outputs = model(**batch) + + loss = outputs.loss + losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size))) + + losses = torch.cat(losses) + try: + eval_loss = torch.mean(losses) + perplexity = math.exp(eval_loss) + except OverflowError: + perplexity = float("inf") + return perplexity, eval_loss + + +def main(): + args = parse_args() + + # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. + # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers + # in the environment + accelerator = ( + Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator() + ) + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + logger.info(accelerator.state, main_process_only=False) + if accelerator.is_local_main_process: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_info() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + + # If passed along, set the training seed now. + if args.seed is not None: + set_seed(args.seed) + + # Handle the repository creation + if accelerator.is_main_process: + if args.push_to_hub: + if args.hub_model_id is None: + repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) + else: + repo_name = args.hub_model_id + repo = Repository(args.output_dir, clone_from=repo_name) + + with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: + if "step_*" not in gitignore: + gitignore.write("step_*\n") + if "epoch_*" not in gitignore: + gitignore.write("epoch_*\n") + elif args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + accelerator.wait_for_everyone() + + # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) + # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ + # (the dataset will be downloaded automatically from the datasets Hub). + # + # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called + # 'text' is found. You can easily tweak this behavior (see below). + # + # In distributed training, the load_dataset function guarantee that only one local process can concurrently + # download the dataset. + if args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) + if "validation" not in raw_datasets.keys(): + raw_datasets["validation"] = load_dataset( + args.dataset_name, + args.dataset_config_name, + split=f"train[:{args.validation_split_percentage}%]", + ) + raw_datasets["train"] = load_dataset( + args.dataset_name, + args.dataset_config_name, + split=f"train[{args.validation_split_percentage}%:]", + ) + else: + data_files = {} + dataset_args = {} + if args.train_file is not None: + data_files["train"] = args.train_file + if args.validation_file is not None: + data_files["validation"] = args.validation_file + extension = args.train_file.split(".")[-1] + if extension == "txt": + extension = "text" + dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks + raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args) + # If no validation data is there, validation_split_percentage will be used to divide the dataset. + if "validation" not in raw_datasets.keys(): + raw_datasets["validation"] = load_dataset( + extension, + data_files=data_files, + split=f"train[:{args.validation_split_percentage}%]", + **dataset_args, + ) + raw_datasets["train"] = load_dataset( + extension, + data_files=data_files, + split=f"train[{args.validation_split_percentage}%:]", + **dataset_args, + ) + + # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at + # https://huggingface.co/docs/datasets/loading_datasets.html. + + # Load pretrained model and tokenizer + # + # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently + # download model & vocab. + if args.config_name: + config = AutoConfig.from_pretrained(args.config_name) + elif args.model_name_or_path: + config = AutoConfig.from_pretrained(args.model_name_or_path) + else: + config = CONFIG_MAPPING[args.model_type]() + logger.warning("You are instantiating a new config instance from scratch.") + + if args.tokenizer_name: + tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer) + elif args.model_name_or_path: + tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) + else: + raise ValueError( + "You are instantiating a new tokenizer from scratch. This is not supported by this script." + "You can do it from another script, save it, and load it from here, using --tokenizer_name." + ) + + if args.model_name_or_path: + model = AutoModelForCausalLM.from_pretrained( + args.model_name_or_path, + from_tf=bool(".ckpt" in args.model_name_or_path), + config=config, + ) + else: + logger.info("Training new model from scratch") + model = AutoModelForCausalLM.from_config(config) + + model.resize_token_embeddings(len(tokenizer)) + + # Preprocessing the datasets. + # First we tokenize all the texts. + column_names = raw_datasets["train"].column_names + text_column_name = "text" if "text" in column_names else column_names[0] + + def tokenize_function(examples): + return tokenizer(examples[text_column_name]) + + with accelerator.main_process_first(): + tokenized_datasets = raw_datasets.map( + tokenize_function, + batched=True, + num_proc=args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not args.overwrite_cache, + desc="Running tokenizer on dataset", + ) + + if args.block_size is None: + block_size = tokenizer.model_max_length + if block_size > 1024: + logger.warning( + f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " + "Picking 1024 instead. You can change that default value by passing --block_size xxx." + ) + block_size = 1024 + else: + if args.block_size > tokenizer.model_max_length: + logger.warning( + f"The block_size passed ({args.block_size}) is larger than the maximum length for the model" + f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." + ) + block_size = min(args.block_size, tokenizer.model_max_length) + + # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. + def group_texts(examples): + # Concatenate all texts. + concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} + total_length = len(concatenated_examples[list(examples.keys())[0]]) + # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can + # customize this part to your needs. + if total_length >= block_size: + total_length = (total_length // block_size) * block_size + # Split by chunks of max_len. + result = { + k: [t[i : i + block_size] for i in range(0, total_length, block_size)] + for k, t in concatenated_examples.items() + } + result["labels"] = result["input_ids"].copy() + return result + + # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder + # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower + # to preprocess. + # + # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: + # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map + + with accelerator.main_process_first(): + lm_datasets = tokenized_datasets.map( + group_texts, + batched=True, + num_proc=args.preprocessing_num_workers, + load_from_cache_file=not args.overwrite_cache, + desc=f"Grouping texts in chunks of {block_size}", + ) + + train_dataset = lm_datasets["train"] + eval_dataset = lm_datasets["validation"] + + # Log a few random samples from the training set: + for index in random.sample(range(len(train_dataset)), 3): + logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") + + # DataLoaders creation: + train_dataloader = DataLoader( + train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size + ) + eval_dataloader = DataLoader( + eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size + ) + + # Optimizer + # Split weights in two groups, one with weight decay and the other not. + no_decay = ["bias", "LayerNorm.weight"] + optimizer_grouped_parameters = [ + { + "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + "weight_decay": args.weight_decay, + }, + { + "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], + "weight_decay": 0.0, + }, + ] + # New Code # + # Creates Dummy Optimizer if `optimizer` was specified in the config file else creates Adam Optimizer + optimizer_cls = ( + torch.optim.AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate) + + # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties. + if accelerator.distributed_type == DistributedType.TPU: + model.tie_weights() + + # Scheduler and math around the number of training steps. + + # New Code + # Get gradient accumulation steps from deepspeed config if available + if accelerator.state.deepspeed_plugin is not None: + args.gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[ + "gradient_accumulation_steps" + ] + + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + else: + args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + # New Code # + # Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_scheduler( + name=args.lr_scheduler_type, + optimizer=optimizer, + num_warmup_steps=args.num_warmup_steps, + num_training_steps=args.max_train_steps, + ) + else: + lr_scheduler = DummyScheduler( + optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps + ) + + # Prepare everything with our `accelerator`. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We need to recalculate our total training steps as the size of the training dataloader may have changed. + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + + # Figure out how many steps we should save the Accelerator states + if hasattr(args.checkpointing_steps, "isdigit"): + checkpointing_steps = args.checkpointing_steps + if args.checkpointing_steps.isdigit(): + checkpointing_steps = int(args.checkpointing_steps) + else: + checkpointing_steps = None + + # We need to initialize the trackers we use, and also store our configuration. + # The trackers initializes automatically on the main process. + if args.with_tracking: + experiment_config = vars(args) + # TensorBoard cannot log Enums, need the raw value + experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value + accelerator.init_trackers("clm_no_trainer", experiment_config) + + # Train! + total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len(train_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + # Only show the progress bar once on each machine. + progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) + completed_steps = 0 + starting_epoch = 0 + best_metric = None + best_metric_checkpoint = None + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + # New Code # + # Loads the DeepSpeed checkpoint from the specified path + _, last_global_step = load_training_checkpoint( + model, + args.resume_from_checkpoint, + **{"load_optimizer_states": True, "load_lr_scheduler_states": True}, + ) + accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") + resume_step = last_global_step + starting_epoch = resume_step // len(train_dataloader) + resume_step -= starting_epoch * len(train_dataloader) + + for epoch in range(starting_epoch, args.num_train_epochs): + model.train() + if args.with_tracking: + total_loss = 0 + for step, batch in enumerate(train_dataloader): + # We need to skip steps until we reach the resumed step + if args.resume_from_checkpoint and epoch == starting_epoch: + if resume_step is not None and step < resume_step: + completed_steps += 1 + continue + outputs = model(**batch) + loss = outputs.loss + # We keep track of the loss at each epoch + if args.with_tracking: + total_loss += loss.detach().float() + loss = loss / args.gradient_accumulation_steps + accelerator.backward(loss) + if (step + 1) % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + progress_bar.update(1) + completed_steps += 1 + + if isinstance(checkpointing_steps, int): + if completed_steps % checkpointing_steps == 0: + output_dir = f"step_{completed_steps }" + if args.output_dir is not None: + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + if completed_steps >= args.max_train_steps: + break + + perplexity, eval_loss = evaluate(args, model, eval_dataloader, accelerator, eval_dataset) + logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}") + + if args.with_tracking: + accelerator.log( + { + "perplexity": perplexity, + "eval_loss": eval_loss, + "train_loss": total_loss.item() / len(train_dataloader), + "epoch": epoch, + "step": completed_steps, + }, + step=completed_steps, + ) + + # New Code # + # Save the DeepSpeed checkpoint to the specified path + checkpoint_model(args.output_dir, epoch, model, epoch, completed_steps) + + # New Code # + # Tracks the best checkpoint and best metric + if best_metric is None or best_metric > perplexity: + best_metric = perplexity + best_metric_checkpoint = os.path.join(args.output_dir, str(epoch)) + accelerator.print(f"New best metric: {best_metric} at epoch {epoch}") + accelerator.print(f"best_metric_checkpoint: {best_metric_checkpoint}") + + # New Code # + # Loads the best checkpoint after the training is finished + if args.load_best_model: + _, last_global_step = load_training_checkpoint( + model, + "/".join(best_metric_checkpoint.split("/")[:-1]), + tag=best_metric_checkpoint.split("/")[-1], + **{"load_optimizer_states": True, "load_lr_scheduler_states": True}, + ) + + # New Code # + # Evaluates using the best checkpoint + perplexity, eval_loss = evaluate(args, model, eval_dataloader, accelerator, eval_dataset) + logger.info(f"Best model metrics: perplexity: {perplexity} eval_loss: {eval_loss}") + if perplexity != best_metric: + raise AssertionError( + f"Best metric {best_metric} does not match the metric {perplexity} of the loaded best model." + ) + + if args.output_dir is not None: + accelerator.wait_for_everyone() + unwrapped_model = accelerator.unwrap_model(model) + + # New Code # + # Saves the whole/unpartitioned fp16 model when in ZeRO Stage-3 to the output directory if + # `stage3_gather_16bit_weights_on_model_save` is True in DeepSpeed Config file or + # `zero3_save_16bit_model` is True in DeepSpeed Plugin. + # For Zero Stages 1 and 2, models are saved as usual in the output directory. + # The model name saved is `pytorch_model.bin` + unwrapped_model.save_pretrained( + args.output_dir, + is_main_process=accelerator.is_main_process, + save_function=accelerator.save, + state_dict=accelerator.get_state_dict(model), + ) + if accelerator.is_main_process: + tokenizer.save_pretrained(args.output_dir) + if args.push_to_hub: + repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True) + + with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: + json.dump({"perplexity": perplexity, "eval_loss": eval_loss.item()}, f) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/by_feature/fsdp_with_peak_mem_tracking.py b/testbed/huggingface__accelerate/examples/by_feature/fsdp_with_peak_mem_tracking.py new file mode 100644 index 0000000000000000000000000000000000000000..4ae8e917e743d683ec4ad85a5603e3c92ea2e310 --- /dev/null +++ b/testbed/huggingface__accelerate/examples/by_feature/fsdp_with_peak_mem_tracking.py @@ -0,0 +1,381 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import gc +import os + +import torch +from torch.utils.data import DataLoader + +import evaluate +from accelerate import Accelerator, DistributedType +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + + +######################################################################## +# This is a fully working simple example to use Accelerate +# +# This example trains a Bert base model on GLUE MRPC +# in any of the following settings (with the same script): +# - single CPU or single GPU +# - multi GPUS (using PyTorch distributed mode) +# - (multi) TPUs +# - fp16 (mixed-precision) or fp32 (normal precision) +# - FSDP +# +# This example also demonstrates the checkpointing and sharding capabilities +# +# To run it in each of these various modes, follow the instructions +# in the readme for examples: +# https://github.com/huggingface/accelerate/tree/main/examples +# +######################################################################## + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +# New Code # +# Converting Bytes to Megabytes +def b2mb(x): + return int(x / 2**20) + + +# New Code # +# This context manager is used to track the peak memory usage of the process +class TorchTracemalloc: + def __enter__(self): + gc.collect() + torch.cuda.empty_cache() + torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.cuda.memory_allocated() + return self + + def __exit__(self, *exc): + gc.collect() + torch.cuda.empty_cache() + self.end = torch.cuda.memory_allocated() + self.peak = torch.cuda.max_memory_allocated() + self.used = b2mb(self.end - self.begin) + self.peaked = b2mb(self.peak - self.begin) + # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") + + +# For testing only +if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + from accelerate.test_utils.training import mocked_dataloaders + + get_dataloaders = mocked_dataloaders # noqa: F811 + + +def training_function(config, args): + # For testing only + if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + config["num_epochs"] = 2 + # Initialize accelerator + if args.with_tracking: + accelerator = Accelerator( + cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="wandb", logging_dir=args.logging_dir + ) + else: + accelerator = Accelerator() + accelerator.print(accelerator.distributed_type) + + if hasattr(args.checkpointing_steps, "isdigit"): + if args.checkpointing_steps == "epoch": + checkpointing_steps = args.checkpointing_steps + elif args.checkpointing_steps.isdigit(): + checkpointing_steps = int(args.checkpointing_steps) + else: + raise ValueError( + f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." + ) + else: + checkpointing_steps = None + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + + # We need to initialize the trackers we use, and also store our configuration + if args.with_tracking: + experiment_config = vars(args) + accelerator.init_trackers("fsdp_glue_no_trainer", experiment_config) + + tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) + datasets = load_dataset("glue", "mrpc") + metric = evaluate.load("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + # starting with the main process first: + with accelerator.main_process_first(): + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + # If the batch size is too big we use gradient accumulation + gradient_accumulation_steps = 1 + if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: + gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE + batch_size = MAX_GPU_BATCH_SIZE + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + set_seed(seed) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, return_dict=True) + # New Code # + # For FSDP feature, it is highly recommended and efficient to prepare the model before creating optimizer + model = accelerator.prepare(model) + accelerator.print(model) + + # Instantiate optimizer + # New Code # + # For FSDP feature, at present it doesn't support multiple parameter groups, + # so we need to create a single parameter group for the whole model + optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr, weight_decay=2e-4) + + # Instantiate scheduler + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=10, + num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, + ) + + # New Code # + # For FSDP feature, prepare everything except the model as we have already prepared the model + # before creating the optimizer + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + overall_step = 0 + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": + accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") + accelerator.load_state(args.resume_from_checkpoint) + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] + dirs.sort(key=os.path.getctime) + path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last + # Extract `epoch_{i}` or `step_{i}` + training_difference = os.path.splitext(path)[0] + + if "epoch" in training_difference: + num_epochs -= int(training_difference.replace("epoch_", "")) + resume_step = None + else: + resume_step = int(training_difference.replace("step_", "")) + num_epochs -= resume_step // len(train_dataloader) + # If resuming by step, we also need to know exactly how far into the DataLoader we went + resume_step = (num_epochs * len(train_dataloader)) - resume_step + + # Now we train the model + for epoch in range(num_epochs): + # New Code # + # context manager to track the peak memory usage during the training epoch + with TorchTracemalloc() as tracemalloc: + model.train() + if args.with_tracking: + total_loss = 0 + for step, batch in enumerate(train_dataloader): + # We need to skip steps until we reach the resumed step + if args.resume_from_checkpoint and epoch == 0: + if resume_step is not None and step < resume_step: + pass + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + # We keep track of the loss at each epoch + if args.with_tracking: + total_loss += loss.detach().float() + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + # accelerator.print(lr_scheduler.get_lr()) + + overall_step += 1 + + if isinstance(checkpointing_steps, int): + output_dir = f"step_{overall_step}" + if overall_step % checkpointing_steps == 0: + if args.output_dir is not None: + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + # New Code # + # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage + accelerator.print("Memory before entering the train : {}".format(b2mb(tracemalloc.begin))) + accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used)) + accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked)) + accelerator.print( + "Total Peak Memory consumed during the train (max): {}".format( + tracemalloc.peaked + b2mb(tracemalloc.begin) + ) + ) + # Logging the peak memory usage of the GPU to the tracker + if args.with_tracking: + accelerator.log( + { + "train_total_peak_memory": tracemalloc.peaked + b2mb(tracemalloc.begin), + }, + step=epoch, + ) + + # New Code # + # context manager to track the peak memory usage during the evaluation + with TorchTracemalloc() as tracemalloc: + model.eval() + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + if args.with_tracking: + accelerator.log( + { + "accuracy": eval_metric["accuracy"], + "f1": eval_metric["f1"], + "train_loss": total_loss.item() / len(train_dataloader), + }, + step=epoch, + ) + + if checkpointing_steps == "epoch": + output_dir = f"epoch_{epoch}" + if args.output_dir is not None: + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + # New Code # + # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage + accelerator.print("Memory before entering the eval : {}".format(b2mb(tracemalloc.begin))) + accelerator.print("Memory consumed at the end of the eval (end-begin): {}".format(tracemalloc.used)) + accelerator.print("Peak Memory consumed during the eval (max-begin): {}".format(tracemalloc.peaked)) + accelerator.print( + "Total Peak Memory consumed during the eval (max): {}".format(tracemalloc.peaked + b2mb(tracemalloc.begin)) + ) + # Logging the peak memory usage of the GPU to the tracker + if args.with_tracking: + accelerator.log( + { + "eval_total_peak_memory": tracemalloc.peaked + b2mb(tracemalloc.begin), + }, + step=epoch, + ) + + if args.with_tracking: + accelerator.end_training() + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script.") + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU.", + ) + parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") + parser.add_argument( + "--checkpointing_steps", + type=str, + default=None, + help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help="If the training should continue from a checkpoint folder.", + ) + parser.add_argument( + "--with_tracking", + action="store_true", + help="Whether to load in all available experiment trackers from the environment and use them for logging.", + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help="Location on where to store experiment tracking logs`", + ) + parser.add_argument( + "--model_name_or_path", + type=str, + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=True, + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": 3, "seed": 1, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/by_feature/gradient_accumulation.py b/testbed/huggingface__accelerate/examples/by_feature/gradient_accumulation.py new file mode 100644 index 0000000000000000000000000000000000000000..170a885e315ae326cb632ae671945437800b084e --- /dev/null +++ b/testbed/huggingface__accelerate/examples/by_feature/gradient_accumulation.py @@ -0,0 +1,215 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os + +import torch +from torch.optim import AdamW +from torch.utils.data import DataLoader + +import evaluate +from accelerate import Accelerator, DistributedType +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + + +######################################################################## +# This is a fully working simple example to use Accelerate +# and perform gradient accumulation +# +# This example trains a Bert base model on GLUE MRPC +# in any of the following settings (with the same script): +# - single CPU or single GPU +# - multi GPUS (using PyTorch distributed mode) +# - (multi) TPUs +# - fp16 (mixed-precision) or fp32 (normal precision) +# +# To run it in each of these various modes, follow the instructions +# in the readme for examples: +# https://github.com/huggingface/accelerate/tree/main/examples +# +######################################################################## + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): + """ + Creates a set of `DataLoader`s for the `glue` dataset, + using "bert-base-cased" as the tokenizer. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + """ + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + # starting with the main process first: + with accelerator.main_process_first(): + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +# For testing only +if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + from accelerate.test_utils.training import mocked_dataloaders + + get_dataloaders = mocked_dataloaders # noqa: F811 + + +def training_function(config, args): + # For testing only + if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + config["num_epochs"] = 2 + # New Code # + gradient_accumulation_steps = int(args.gradient_accumulation_steps) + # Initialize accelerator + accelerator = Accelerator( + cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps + ) + if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: + raise NotImplementedError( + "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" + ) + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + + metric = evaluate.load("glue", "mrpc") + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) + + # We could avoid this line since the accelerator is set with `device_placement=True` (default value). + # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer + # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). + model = model.to(accelerator.device) + + # Instantiate optimizer + optimizer = AdamW(params=model.parameters(), lr=lr) + + # Instantiate scheduler + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=100, + num_training_steps=(len(train_dataloader) * num_epochs), + ) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # Now we train the model + for epoch in range(num_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + # New code # + # We use the new `accumulate` context manager to perform gradient accumulation + # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. + with accelerator.accumulate(model): + output = model(**batch) + loss = output.loss + accelerator.backward(loss) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + model.eval() + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script.") + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU.", + ) + # New Code # + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="The number of minibatches to be ran before gradients are accumulated.", + ) + parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/by_feature/megatron_lm_gpt_pretraining.py b/testbed/huggingface__accelerate/examples/by_feature/megatron_lm_gpt_pretraining.py new file mode 100644 index 0000000000000000000000000000000000000000..3fcf10207f3dfa48d0cae96b0ffbb750b195e631 --- /dev/null +++ b/testbed/huggingface__accelerate/examples/by_feature/megatron_lm_gpt_pretraining.py @@ -0,0 +1,701 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) +on a text file or a dataset without using HuggingFace Trainer. + +Here is the full list of checkpoints on the hub that can be fine-tuned by this script: +https://huggingface.co/models?filter=text-generation +""" +# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. + +import argparse +import json +import logging +import math +import os +import random +from itertools import chain +from pathlib import Path + +import torch +from torch.utils.data import DataLoader + +import datasets +import transformers +from accelerate import Accelerator, DistributedType +from accelerate.logging import get_logger +from accelerate.utils import MegatronLMDummyScheduler, set_seed +from datasets import load_dataset +from huggingface_hub import Repository +from tqdm.auto import tqdm +from transformers import ( + CONFIG_MAPPING, + MODEL_MAPPING, + AutoConfig, + AutoModelForCausalLM, + AutoTokenizer, + SchedulerType, + default_data_collator, + get_scheduler, +) +from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry +from transformers.utils.versions import require_version + + +# Will error if the minimal version of Transformers is not installed. Remove at your own risks. +check_min_version("4.23.0.dev0") + +logger = get_logger(__name__) + +require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") + +MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) +MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) + + +def parse_args(): + parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task") + parser.add_argument( + "--dataset_name", + type=str, + default=None, + help="The name of the dataset to use (via the datasets library).", + ) + parser.add_argument( + "--dataset_config_name", + type=str, + default=None, + help="The configuration name of the dataset to use (via the datasets library).", + ) + parser.add_argument( + "--train_file", type=str, default=None, help="A csv or a json file containing the training data." + ) + parser.add_argument( + "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." + ) + parser.add_argument( + "--validation_split_percentage", + default=5, + help="The percentage of the train set used as validation set in case there's no validation split", + ) + parser.add_argument( + "--model_name_or_path", + type=str, + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--config_name", + type=str, + default=None, + help="Pretrained config name or path if not the same as model_name", + ) + parser.add_argument( + "--tokenizer_name", + type=str, + default=None, + help="Pretrained tokenizer name or path if not the same as model_name", + ) + parser.add_argument( + "--use_slow_tokenizer", + action="store_true", + help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", + ) + parser.add_argument( + "--per_device_train_batch_size", + type=int, + default=8, + help="Batch size (per device) for the training dataloader.", + ) + parser.add_argument( + "--per_device_eval_batch_size", + type=int, + default=8, + help="Batch size (per device) for the evaluation dataloader.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=5e-5, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") + parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--lr_scheduler_type", + type=SchedulerType, + default="linear", + help="The scheduler type to use.", + choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], + ) + parser.add_argument( + "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") + parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + parser.add_argument( + "--model_type", + type=str, + default=None, + help="Model type to use if training from scratch.", + choices=MODEL_TYPES, + ) + parser.add_argument( + "--block_size", + type=int, + default=None, + help=( + "Optional input sequence length after tokenization. The training dataset will be truncated in block of" + " this size for training. Default to the model max input length for single sentence inputs (take into" + " account special tokens)." + ), + ) + parser.add_argument( + "--preprocessing_num_workers", + type=int, + default=None, + help="The number of processes to use for the preprocessing.", + ) + parser.add_argument( + "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" + ) + parser.add_argument( + "--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files." + ) + parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") + parser.add_argument( + "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." + ) + parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") + parser.add_argument( + "--checkpointing_steps", + type=str, + default=None, + help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help="If the training should continue from a checkpoint folder.", + ) + parser.add_argument( + "--with_tracking", + action="store_true", + help="Whether to enable experiment trackers for logging.", + ) + parser.add_argument( + "--report_to", + type=str, + default="all", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' + ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' + "Only applicable when `--with_tracking` is passed." + ), + ) + args = parser.parse_args() + + # Sanity checks + if args.dataset_name is None and args.train_file is None and args.validation_file is None: + raise ValueError("Need either a dataset name or a training/validation file.") + else: + if args.train_file is not None: + extension = args.train_file.split(".")[-1] + assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file." + if args.validation_file is not None: + extension = args.validation_file.split(".")[-1] + assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file." + + if args.push_to_hub: + assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." + + return args + + +def main(): + args = parse_args() + + # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The + # information sent is the one passed as arguments along with your Python/PyTorch versions. + send_example_telemetry("run_clm_no_trainer", args) + + # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. + # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers + # in the environment + accelerator_log_kwargs = {} + + if args.with_tracking: + accelerator_log_kwargs["log_with"] = args.report_to + accelerator_log_kwargs["logging_dir"] = args.output_dir + + accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) + + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + logger.info(accelerator.state, main_process_only=False) + if accelerator.is_local_main_process: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_info() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + + # If passed along, set the training seed now. + if args.seed is not None: + set_seed(args.seed) + + # Handle the repository creation + if accelerator.is_main_process: + if args.push_to_hub: + if args.hub_model_id is None: + repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) + else: + repo_name = args.hub_model_id + repo = Repository(args.output_dir, clone_from=repo_name) + + with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: + if "step_*" not in gitignore: + gitignore.write("step_*\n") + if "epoch_*" not in gitignore: + gitignore.write("epoch_*\n") + elif args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + accelerator.wait_for_everyone() + + # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) + # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ + # (the dataset will be downloaded automatically from the datasets Hub). + # + # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called + # 'text' is found. You can easily tweak this behavior (see below). + # + # In distributed training, the load_dataset function guarantee that only one local process can concurrently + # download the dataset. + if args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) + if "validation" not in raw_datasets.keys(): + raw_datasets["validation"] = load_dataset( + args.dataset_name, + args.dataset_config_name, + split=f"train[:{args.validation_split_percentage}%]", + ) + raw_datasets["train"] = load_dataset( + args.dataset_name, + args.dataset_config_name, + split=f"train[{args.validation_split_percentage}%:]", + ) + else: + data_files = {} + dataset_args = {} + if args.train_file is not None: + data_files["train"] = args.train_file + if args.validation_file is not None: + data_files["validation"] = args.validation_file + extension = args.train_file.split(".")[-1] + if extension == "txt": + extension = "text" + dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks + raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args) + # If no validation data is there, validation_split_percentage will be used to divide the dataset. + if "validation" not in raw_datasets.keys(): + raw_datasets["validation"] = load_dataset( + extension, + data_files=data_files, + split=f"train[:{args.validation_split_percentage}%]", + **dataset_args, + ) + raw_datasets["train"] = load_dataset( + extension, + data_files=data_files, + split=f"train[{args.validation_split_percentage}%:]", + **dataset_args, + ) + + # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at + # https://huggingface.co/docs/datasets/loading_datasets.html. + + # Load pretrained model and tokenizer + # + # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently + # download model & vocab. + if args.config_name: + config = AutoConfig.from_pretrained(args.config_name) + elif args.model_name_or_path: + config = AutoConfig.from_pretrained(args.model_name_or_path) + else: + config = CONFIG_MAPPING[args.model_type]() + logger.warning("You are instantiating a new config instance from scratch.") + + if args.tokenizer_name: + tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer) + elif args.model_name_or_path: + tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) + else: + raise ValueError( + "You are instantiating a new tokenizer from scratch. This is not supported by this script." + "You can do it from another script, save it, and load it from here, using --tokenizer_name." + ) + + if args.model_name_or_path: + model = AutoModelForCausalLM.from_pretrained( + args.model_name_or_path, + from_tf=bool(".ckpt" in args.model_name_or_path), + config=config, + ) + else: + logger.info("Training new model from scratch") + model = AutoModelForCausalLM.from_config(config) + + model.resize_token_embeddings(len(tokenizer)) + + # Preprocessing the datasets. + # First we tokenize all the texts. + column_names = raw_datasets["train"].column_names + text_column_name = "text" if "text" in column_names else column_names[0] + + def tokenize_function(examples): + return tokenizer(examples[text_column_name]) + + with accelerator.main_process_first(): + tokenized_datasets = raw_datasets.map( + tokenize_function, + batched=True, + num_proc=args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not args.overwrite_cache, + desc="Running tokenizer on dataset", + ) + + if args.block_size is None: + block_size = tokenizer.model_max_length + if block_size > 1024: + logger.warning( + f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " + "Picking 1024 instead. You can change that default value by passing --block_size xxx." + ) + block_size = 1024 + else: + if args.block_size > tokenizer.model_max_length: + logger.warning( + f"The block_size passed ({args.block_size}) is larger than the maximum length for the model" + f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." + ) + block_size = min(args.block_size, tokenizer.model_max_length) + + # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. + def group_texts(examples): + # Concatenate all texts. + concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} + total_length = len(concatenated_examples[list(examples.keys())[0]]) + # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can + # customize this part to your needs. + if total_length >= block_size: + total_length = (total_length // block_size) * block_size + # Split by chunks of max_len. + result = { + k: [t[i : i + block_size] for i in range(0, total_length, block_size)] + for k, t in concatenated_examples.items() + } + result["labels"] = result["input_ids"].copy() + return result + + # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder + # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower + # to preprocess. + # + # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: + # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map + + with accelerator.main_process_first(): + lm_datasets = tokenized_datasets.map( + group_texts, + batched=True, + num_proc=args.preprocessing_num_workers, + load_from_cache_file=not args.overwrite_cache, + desc=f"Grouping texts in chunks of {block_size}", + ) + + train_dataset = lm_datasets["train"] + eval_dataset = lm_datasets["validation"] + + # Log a few random samples from the training set: + for index in random.sample(range(len(train_dataset)), 3): + logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") + + # DataLoaders creation: + train_dataloader = DataLoader( + train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size + ) + eval_dataloader = DataLoader( + eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size + ) + + # Optimizer + # Split weights in two groups, one with weight decay and the other not. + no_decay = ["bias", "layer_norm.weight"] + optimizer_grouped_parameters = [ + { + "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + "weight_decay": args.weight_decay, + }, + { + "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], + "weight_decay": 0.0, + }, + ] + optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) + + # Scheduler and math around the number of training steps. + overrode_max_train_steps = False + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + overrode_max_train_steps = True + + # New Code + # For Megatron-LM, we need to use `MegatronLMDummyScheduler` instead of regular schedulers + if accelerator.distributed_type == DistributedType.MEGATRON_LM: + lr_scheduler = MegatronLMDummyScheduler( + optimizer=optimizer, + total_num_steps=args.max_train_steps, + warmup_num_steps=args.num_warmup_steps, + ) + else: + lr_scheduler = get_scheduler( + name=args.lr_scheduler_type, + optimizer=optimizer, + num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + ) + + # Prepare everything with our `accelerator`. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties. + if accelerator.distributed_type == DistributedType.TPU: + model.tie_weights() + + # We need to recalculate our total training steps as the size of the training dataloader may have changed. + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if overrode_max_train_steps: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + # Afterwards we recalculate our number of training epochs + args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + # Figure out how many steps we should save the Accelerator states + checkpointing_steps = args.checkpointing_steps + if checkpointing_steps is not None and checkpointing_steps.isdigit(): + checkpointing_steps = int(checkpointing_steps) + + # We need to initialize the trackers we use, and also store our configuration. + # The trackers initializes automatically on the main process. + if args.with_tracking: + experiment_config = vars(args) + # TensorBoard cannot log Enums, need the raw value + experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value + accelerator.init_trackers("clm_no_trainer", experiment_config) + + # Train! + # New Code + # For Megatron-LM, we need to get `global_batch_size` from megatron_lm_plugin + # as it handles the specifics related to data parallelism, tensor model parallelism and pipeline parallelism + if accelerator.distributed_type == DistributedType.MEGATRON_LM: + total_batch_size = accelerator.state.megatron_lm_plugin.global_batch_size + else: + total_batch_size = ( + args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps + ) + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len(train_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + # Only show the progress bar once on each machine. + progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) + completed_steps = 0 + starting_epoch = 0 + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": + accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") + accelerator.load_state(args.resume_from_checkpoint) + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] + dirs.sort(key=os.path.getctime) + path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last + # Extract `epoch_{i}` or `step_{i}` + training_difference = os.path.splitext(path)[0] + + if "epoch" in training_difference: + starting_epoch = int(training_difference.replace("epoch_", "")) + 1 + resume_step = None + else: + # need to multiply `gradient_accumulation_steps` to reflect real steps + resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps + starting_epoch = resume_step // len(train_dataloader) + resume_step -= starting_epoch * len(train_dataloader) + + # update the progress_bar if load from checkpoint + progress_bar.update(starting_epoch * num_update_steps_per_epoch) + completed_steps = starting_epoch * num_update_steps_per_epoch + + for epoch in range(starting_epoch, args.num_train_epochs): + model.train() + if args.with_tracking: + total_loss = 0 + for step, batch in enumerate(train_dataloader): + # We need to skip steps until we reach the resumed step + if args.resume_from_checkpoint and epoch == starting_epoch: + if resume_step is not None and step < resume_step: + if step % args.gradient_accumulation_steps == 0: + progress_bar.update(1) + completed_steps += 1 + continue + + with accelerator.accumulate(model): + outputs = model(**batch) + loss = outputs.loss + # We keep track of the loss at each epoch + if args.with_tracking: + total_loss += loss.detach().float() + accelerator.backward(loss) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + completed_steps += 1 + + if isinstance(checkpointing_steps, int): + if completed_steps % checkpointing_steps == 0: + output_dir = f"step_{completed_steps }" + if args.output_dir is not None: + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + if completed_steps >= args.max_train_steps: + break + + model.eval() + losses = [] + for step, batch in enumerate(eval_dataloader): + with torch.no_grad(): + outputs = model(**batch) + + loss = outputs.loss + # New Code + # For Megatron-LM, the losses are already averaged across the data parallel group + if accelerator.distributed_type == DistributedType.MEGATRON_LM: + losses.append(loss) + else: + losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size))) + try: + if accelerator.distributed_type == DistributedType.MEGATRON_LM: + losses = torch.tensor(losses) + else: + losses = torch.cat(losses) + eval_loss = torch.mean(losses) + perplexity = math.exp(eval_loss) + except OverflowError: + perplexity = float("inf") + + logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}") + + if args.with_tracking: + accelerator.log( + { + "perplexity": perplexity, + "eval_loss": eval_loss, + "train_loss": total_loss.item() / len(train_dataloader), + "epoch": epoch, + "step": completed_steps, + }, + step=completed_steps, + ) + + if args.push_to_hub and epoch < args.num_train_epochs - 1: + accelerator.wait_for_everyone() + unwrapped_model = accelerator.unwrap_model(model) + unwrapped_model.save_pretrained( + args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save + ) + if accelerator.is_main_process: + tokenizer.save_pretrained(args.output_dir) + repo.push_to_hub( + commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True + ) + + if args.checkpointing_steps == "epoch": + output_dir = f"epoch_{epoch}" + if args.output_dir is not None: + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + + # this is causing some issue with Megatron-LM when using `wandb` at the end of the main function. + # Everything works fine inspite of commenting this out. (wandb finishes/closes the run without error) + # if args.with_tracking: + # accelerator.end_training() + + if args.output_dir is not None: + accelerator.wait_for_everyone() + # New Code + # For Megatron-LM, we need to save the model using `accelerator.save_state` + if accelerator.distributed_type == DistributedType.MEGATRON_LM: + accelerator.save_state(args.output_dir) + else: + unwrapped_model = accelerator.unwrap_model(model) + unwrapped_model.save_pretrained( + args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save + ) + if accelerator.is_main_process: + tokenizer.save_pretrained(args.output_dir) + if args.push_to_hub: + repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True) + + with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: + json.dump({"perplexity": perplexity}, f) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/by_feature/memory.py b/testbed/huggingface__accelerate/examples/by_feature/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..684a3285327742dcec05e841c82411c3d00ef14a --- /dev/null +++ b/testbed/huggingface__accelerate/examples/by_feature/memory.py @@ -0,0 +1,220 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os + +import torch +from torch.optim import AdamW +from torch.utils.data import DataLoader + +# New Code # +import evaluate +from accelerate import Accelerator, DistributedType +from accelerate.utils import find_executable_batch_size +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + + +######################################################################## +# This is a fully working simple example to use Accelerate, +# specifically showcasing how to ensure out-of-memory errors never +# interrupt training, and builds off the `nlp_example.py` script. +# +# This example trains a Bert base model on GLUE MRPC +# in any of the following settings (with the same script): +# - single CPU or single GPU +# - multi GPUS (using PyTorch distributed mode) +# - (multi) TPUs +# - fp16 (mixed-precision) or fp32 (normal precision) +# +# New additions from the base script can be found quickly by +# looking for the # New Code # tags +# +# To run it in each of these various modes, follow the instructions +# in the readme for examples: +# https://github.com/huggingface/accelerate/tree/main/examples +# +######################################################################## + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): + """ + Creates a set of `DataLoader`s for the `glue` dataset, + using "bert-base-cased" as the tokenizer. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + """ + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + # starting with the main process first: + with accelerator.main_process_first(): + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +# For testing only +if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + from accelerate.test_utils.training import mocked_dataloaders + + get_dataloaders = mocked_dataloaders # noqa: F811 + + +def training_function(config, args): + # For testing only + if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + config["num_epochs"] = 2 + # Initialize accelerator + accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + + metric = evaluate.load("glue", "mrpc") + + # New Code # + # We now can define an inner training loop function. It should take a batch size as the only parameter, + # and build the dataloaders in there. + # It also gets our decorator + @find_executable_batch_size(starting_batch_size=batch_size) + def inner_training_loop(batch_size): + # And now just move everything below under this function + # We need to bring in the Accelerator object from earlier + nonlocal accelerator + # And reset all of its attributes that could hold onto any memory: + accelerator.free_memory() + + # Then we can declare the model, optimizer, and everything else: + set_seed(seed) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) + + # We could avoid this line since the accelerator is set with `device_placement=True` (default value). + # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer + # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). + model = model.to(accelerator.device) + + # Instantiate optimizer + optimizer = AdamW(params=model.parameters(), lr=lr) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) + + # Instantiate scheduler + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=100, + num_training_steps=(len(train_dataloader) * num_epochs), + ) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # Now we train the model + for epoch in range(num_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + outputs = model(**batch) + loss = outputs.loss + accelerator.backward(loss) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + model.eval() + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + + # New Code # + # And call it at the end with no arguments + # Note: You could also refactor this outside of your training loop function + inner_training_loop() + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script.") + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU.", + ) + parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/by_feature/multi_process_metrics.py b/testbed/huggingface__accelerate/examples/by_feature/multi_process_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..cb9534c4a1ef0662ec73e60f74b61c558eddb118 --- /dev/null +++ b/testbed/huggingface__accelerate/examples/by_feature/multi_process_metrics.py @@ -0,0 +1,225 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os + +import torch +from torch.optim import AdamW +from torch.utils.data import DataLoader + +import evaluate +from accelerate import Accelerator, DistributedType +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + + +######################################################################## +# This is a fully working simple example to use Accelerate, +# specifically showcasing how to properly calculate the metrics on the +# validation dataset when in a distributed system, and builds off the +# `nlp_example.py` script. +# +# This example trains a Bert base model on GLUE MRPC +# in any of the following settings (with the same script): +# - single CPU or single GPU +# - multi GPUS (using PyTorch distributed mode) +# - (multi) TPUs +# - fp16 (mixed-precision) or fp32 (normal precision) +# +# To help focus on the differences in the code, building `DataLoaders` +# was refactored into its own function. +# New additions from the base script can be found quickly by +# looking for the # New Code # tags +# +# To run it in each of these various modes, follow the instructions +# in the readme for examples: +# https://github.com/huggingface/accelerate/tree/main/examples +# +######################################################################## + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): + """ + Creates a set of `DataLoader`s for the `glue` dataset, + using "bert-base-cased" as the tokenizer. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + """ + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + # starting with the main process first: + with accelerator.main_process_first(): + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +# For testing only +if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + from accelerate.test_utils.training import mocked_dataloaders + + get_dataloaders = mocked_dataloaders # noqa: F811 + + +def training_function(config, args): + # For testing only + if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + config["num_epochs"] = 2 + # Initialize accelerator + accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + + metric = evaluate.load("glue", "mrpc") + + # If the batch size is too big we use gradient accumulation + gradient_accumulation_steps = 1 + if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: + gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE + batch_size = MAX_GPU_BATCH_SIZE + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) + + # We could avoid this line since the accelerator is set with `device_placement=True` (default value). + # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer + # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). + model = model.to(accelerator.device) + + # Instantiate optimizer + optimizer = AdamW(params=model.parameters(), lr=lr) + + # Instantiate scheduler + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=100, + num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, + ) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # Now we train the model + for epoch in range(num_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + model.eval() + samples_seen = 0 + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + predictions, references = accelerator.gather((predictions, batch["labels"])) + # New Code # + # First we check if it's a distributed system + if accelerator.use_distributed: + # Then see if we're on the last batch of our eval dataloader + if step == len(eval_dataloader) - 1: + # Last batch needs to be truncated on distributed systems as it contains additional samples + predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] + references = references[: len(eval_dataloader.dataset) - samples_seen] + else: + # Otherwise we add the number of samples seen + samples_seen += references.shape[0] + # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: + # accelerator.gather_for_metrics((predictions, batch["labels"])) + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script.") + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU.", + ) + parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/by_feature/tracking.py b/testbed/huggingface__accelerate/examples/by_feature/tracking.py new file mode 100644 index 0000000000000000000000000000000000000000..e4467697cfb1190dd4ea918ff2594e2218badce5 --- /dev/null +++ b/testbed/huggingface__accelerate/examples/by_feature/tracking.py @@ -0,0 +1,263 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os + +import torch +from torch.optim import AdamW +from torch.utils.data import DataLoader + +import evaluate +from accelerate import Accelerator, DistributedType +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + + +######################################################################## +# This is a fully working simple example to use Accelerate, +# specifically showcasing the experiment tracking capability, +# and builds off the `nlp_example.py` script. +# +# This example trains a Bert base model on GLUE MRPC +# in any of the following settings (with the same script): +# - single CPU or single GPU +# - multi GPUS (using PyTorch distributed mode) +# - (multi) TPUs +# - fp16 (mixed-precision) or fp32 (normal precision) +# +# To help focus on the differences in the code, building `DataLoaders` +# was refactored into its own function. +# New additions from the base script can be found quickly by +# looking for the # New Code # tags +# +# To run it in each of these various modes, follow the instructions +# in the readme for examples: +# https://github.com/huggingface/accelerate/tree/main/examples +# +######################################################################## + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): + """ + Creates a set of `DataLoader`s for the `glue` dataset, + using "bert-base-cased" as the tokenizer. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + """ + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + # starting with the main process first: + with accelerator.main_process_first(): + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +# For testing only +if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + from accelerate.test_utils.training import mocked_dataloaders + + get_dataloaders = mocked_dataloaders # noqa: F811 + + +def training_function(config, args): + # For testing only + if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": + config["num_epochs"] = 2 + # Initialize Accelerator + + # New Code # + # We pass in "all" to `log_with` to grab all available trackers in the environment + # Note: If using a custom `Tracker` class, should be passed in here such as: + # >>> log_with = ["all", MyCustomTrackerClassInstance()] + if args.with_tracking: + accelerator = Accelerator( + cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", logging_dir=args.logging_dir + ) + else: + accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + set_seed(seed) + + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) + metric = evaluate.load("glue", "mrpc") + + # If the batch size is too big we use gradient accumulation + gradient_accumulation_steps = 1 + if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: + gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE + batch_size = MAX_GPU_BATCH_SIZE + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) + + # We could avoid this line since the accelerator is set with `device_placement=True` (default value). + # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer + # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). + model = model.to(accelerator.device) + + # Instantiate optimizer + optimizer = AdamW(params=model.parameters(), lr=lr) + + # Instantiate scheduler + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=100, + num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, + ) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # New Code # + # We need to initialize the trackers we use. Overall configurations can also be stored + if args.with_tracking: + run = os.path.split(__file__)[-1].split(".")[0] + accelerator.init_trackers(run, config) + + # Now we train the model + for epoch in range(num_epochs): + model.train() + # New Code # + # For our tracking example, we will log the total loss of each epoch + if args.with_tracking: + total_loss = 0 + for step, batch in enumerate(train_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + outputs = model(**batch) + loss = outputs.loss + # New Code # + if args.with_tracking: + total_loss += loss.detach().float() + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + model.eval() + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True` (the default). + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + + # New Code # + # To actually log, we call `Accelerator.log` + # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` + if args.with_tracking: + accelerator.log( + { + "accuracy": eval_metric["accuracy"], + "f1": eval_metric["f1"], + "train_loss": total_loss.item() / len(train_dataloader), + "epoch": epoch, + }, + step=epoch, + ) + + # New Code # + # When a run is finished, you should call `accelerator.end_training()` + # to close all of the open trackers + if args.with_tracking: + accelerator.end_training() + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script.") + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU.", + ) + parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") + parser.add_argument( + "--with_tracking", + action="store_true", + help="Whether to load in all available experiment trackers from the environment and use them for logging.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help="Location on where to store experiment tracking logs`", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/complete_cv_example.py b/testbed/huggingface__accelerate/examples/complete_cv_example.py new file mode 100644 index 0000000000000000000000000000000000000000..bca55aa87fcdd568318e3a05d45ed77da9814e96 --- /dev/null +++ b/testbed/huggingface__accelerate/examples/complete_cv_example.py @@ -0,0 +1,317 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os +import re + +import numpy as np +import torch +from torch.optim.lr_scheduler import OneCycleLR +from torch.utils.data import DataLoader, Dataset + +import PIL +from accelerate import Accelerator +from timm import create_model +from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor + + +######################################################################## +# This is a fully working simple example to use Accelerate +# +# This example trains a ResNet50 on the Oxford-IIT Pet Dataset +# in any of the following settings (with the same script): +# - single CPU or single GPU +# - multi GPUS (using PyTorch distributed mode) +# - (multi) TPUs +# - fp16 (mixed-precision) or fp32 (normal precision) +# +# To run it in each of these various modes, follow the instructions +# in the readme for examples: +# https://github.com/huggingface/accelerate/tree/main/examples +# +######################################################################## + + +# Function to get the label from the filename +def extract_label(fname): + stem = fname.split(os.path.sep)[-1] + return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] + + +class PetsDataset(Dataset): + def __init__(self, file_names, image_transform=None, label_to_id=None): + self.file_names = file_names + self.image_transform = image_transform + self.label_to_id = label_to_id + + def __len__(self): + return len(self.file_names) + + def __getitem__(self, idx): + fname = self.file_names[idx] + raw_image = PIL.Image.open(fname) + image = raw_image.convert("RGB") + if self.image_transform is not None: + image = self.image_transform(image) + label = extract_label(fname) + if self.label_to_id is not None: + label = self.label_to_id[label] + return {"image": image, "label": label} + + +def training_function(config, args): + # Initialize accelerator + if args.with_tracking: + accelerator = Accelerator( + cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", logging_dir=args.logging_dir + ) + else: + accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + image_size = config["image_size"] + if not isinstance(image_size, (list, tuple)): + image_size = (image_size, image_size) + + # Parse out whether we are saving every epoch or after a certain number of batches + if hasattr(args.checkpointing_steps, "isdigit"): + if args.checkpointing_steps == "epoch": + checkpointing_steps = args.checkpointing_steps + elif args.checkpointing_steps.isdigit(): + checkpointing_steps = int(args.checkpointing_steps) + else: + raise ValueError( + f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." + ) + else: + checkpointing_steps = None + + # We need to initialize the trackers we use, and also store our configuration + if args.with_tracking: + run = os.path.split(__file__)[-1].split(".")[0] + accelerator.init_trackers(run, config) + + # Grab all the image filenames + file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(".jpg")] + + # Build the label correspondences + all_labels = [extract_label(fname) for fname in file_names] + id_to_label = list(set(all_labels)) + id_to_label.sort() + label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} + + # Set the seed before splitting the data. + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + # Split our filenames between train and validation + random_perm = np.random.permutation(len(file_names)) + cut = int(0.8 * len(file_names)) + train_split = random_perm[:cut] + eval_split = random_perm[cut:] + + # For training we use a simple RandomResizedCrop + train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()]) + train_dataset = PetsDataset( + [file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id + ) + + # For evaluation, we use a deterministic Resize + eval_tfm = Compose([Resize(image_size), ToTensor()]) + eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) + + # Instantiate dataloaders. + train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) + eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) + + # We could avoid this line since the accelerator is set with `device_placement=True` (default value). + # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer + # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). + model = model.to(accelerator.device) + + # Freezing the base model + for param in model.parameters(): + param.requires_grad = False + for param in model.get_classifier().parameters(): + param.requires_grad = True + + # We normalize the batches of images to be a bit faster. + mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None].to(accelerator.device) + std = torch.tensor(model.default_cfg["std"])[None, :, None, None].to(accelerator.device) + + # Instantiate optimizer + optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25) + + # Instantiate learning rate scheduler + lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader)) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + # We need to keep track of how many total steps we have iterated over + overall_step = 0 + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": + accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") + accelerator.load_state(args.resume_from_checkpoint) + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] + dirs.sort(key=os.path.getctime) + path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last + # Extract `epoch_{i}` or `step_{i}` + training_difference = os.path.splitext(path)[0] + + if "epoch" in training_difference: + starting_epoch = int(training_difference.replace("epoch_", "")) + 1 + resume_step = None + else: + resume_step = int(training_difference.replace("step_", "")) + starting_epoch = resume_step // len(train_dataloader) + resume_step -= starting_epoch * len(train_dataloader) + + # Now we train the model + for epoch in range(starting_epoch, num_epochs): + model.train() + if args.with_tracking: + total_loss = 0 + for step, batch in enumerate(train_dataloader): + # We need to skip steps until we reach the resumed step + if args.resume_from_checkpoint and epoch == starting_epoch: + if resume_step is not None and step < resume_step: + overall_step += 1 + continue + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch = {k: v.to(accelerator.device) for k, v in batch.items()} + inputs = (batch["image"] - mean) / std + outputs = model(inputs) + loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) + # We keep track of the loss at each epoch + if args.with_tracking: + total_loss += loss.detach().float() + accelerator.backward(loss) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + overall_step += 1 + if isinstance(checkpointing_steps, int): + output_dir = f"step_{overall_step}" + if overall_step % checkpointing_steps == 0: + if args.output_dir is not None: + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + model.eval() + accurate = 0 + num_elems = 0 + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch = {k: v.to(accelerator.device) for k, v in batch.items()} + inputs = (batch["image"] - mean) / std + with torch.no_grad(): + outputs = model(inputs) + predictions = outputs.argmax(dim=-1) + predictions, references = accelerator.gather_for_metrics((predictions, batch["label"])) + accurate_preds = predictions == references + num_elems += accurate_preds.shape[0] + accurate += accurate_preds.long().sum() + + eval_metric = accurate.item() / num_elems + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") + if args.with_tracking: + accelerator.log( + { + "accuracy": 100 * eval_metric, + "train_loss": total_loss.item() / len(train_dataloader), + "epoch": epoch, + }, + step=overall_step, + ) + if checkpointing_steps == "epoch": + output_dir = f"epoch_{epoch}" + if args.output_dir is not None: + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + + if args.with_tracking: + accelerator.end_training() + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script.") + parser.add_argument("--data_dir", required=True, help="The data folder on disk.") + parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training.") + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU.", + ) + parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") + parser.add_argument( + "--checkpointing_steps", + type=str, + default=None, + help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help="If the training should continue from a checkpoint folder.", + ) + parser.add_argument( + "--with_tracking", + action="store_true", + help="Whether to load in all available experiment trackers from the environment and use them for logging.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help="Location on where to store experiment tracking logs`", + ) + args = parser.parse_args() + config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/complete_nlp_example.py b/testbed/huggingface__accelerate/examples/complete_nlp_example.py new file mode 100644 index 0000000000000000000000000000000000000000..559a5c963009d1fa7b6901a84d22b0c084a2e32d --- /dev/null +++ b/testbed/huggingface__accelerate/examples/complete_nlp_example.py @@ -0,0 +1,296 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os + +import torch +from torch.optim import AdamW +from torch.utils.data import DataLoader + +import evaluate +from accelerate import Accelerator, DistributedType +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + + +######################################################################## +# This is a fully working simple example to use Accelerate +# +# This example trains a Bert base model on GLUE MRPC +# in any of the following settings (with the same script): +# - single CPU or single GPU +# - multi GPUS (using PyTorch distributed mode) +# - (multi) TPUs +# - fp16 (mixed-precision) or fp32 (normal precision) +# +# This example also demonstrates the checkpointing and sharding capabilities +# +# To run it in each of these various modes, follow the instructions +# in the readme for examples: +# https://github.com/huggingface/accelerate/tree/main/examples +# +######################################################################## + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def training_function(config, args): + # Initialize accelerator + if args.with_tracking: + accelerator = Accelerator( + cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", logging_dir=args.logging_dir + ) + else: + accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) + + if hasattr(args.checkpointing_steps, "isdigit"): + if args.checkpointing_steps == "epoch": + checkpointing_steps = args.checkpointing_steps + elif args.checkpointing_steps.isdigit(): + checkpointing_steps = int(args.checkpointing_steps) + else: + raise ValueError( + f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." + ) + else: + checkpointing_steps = None + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + + # We need to initialize the trackers we use, and also store our configuration + if args.with_tracking: + run = os.path.split(__file__)[-1].split(".")[0] + accelerator.init_trackers(run, config) + + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + datasets = load_dataset("glue", "mrpc") + metric = evaluate.load("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + # starting with the main process first: + with accelerator.main_process_first(): + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + # If the batch size is too big we use gradient accumulation + gradient_accumulation_steps = 1 + if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: + gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE + batch_size = MAX_GPU_BATCH_SIZE + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + set_seed(seed) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) + + # We could avoid this line since the accelerator is set with `device_placement=True` (default value). + # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer + # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). + model = model.to(accelerator.device) + + # Instantiate optimizer + optimizer = AdamW(params=model.parameters(), lr=lr) + + # Instantiate scheduler + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=100, + num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, + ) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We need to keep track of how many total steps we have iterated over + overall_step = 0 + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": + accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") + accelerator.load_state(args.resume_from_checkpoint) + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] + dirs.sort(key=os.path.getctime) + path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last + # Extract `epoch_{i}` or `step_{i}` + training_difference = os.path.splitext(path)[0] + + if "epoch" in training_difference: + starting_epoch = int(training_difference.replace("epoch_", "")) + 1 + resume_step = None + else: + resume_step = int(training_difference.replace("step_", "")) + starting_epoch = resume_step // len(train_dataloader) + resume_step -= starting_epoch * len(train_dataloader) + + # Now we train the model + for epoch in range(starting_epoch, num_epochs): + model.train() + if args.with_tracking: + total_loss = 0 + for step, batch in enumerate(train_dataloader): + # We need to skip steps until we reach the resumed step + if args.resume_from_checkpoint and epoch == starting_epoch: + if resume_step is not None and step < resume_step: + overall_step += 1 + continue + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + # We keep track of the loss at each epoch + if args.with_tracking: + total_loss += loss.detach().float() + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + overall_step += 1 + + if isinstance(checkpointing_steps, int): + output_dir = f"step_{overall_step}" + if overall_step % checkpointing_steps == 0: + if args.output_dir is not None: + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + + model.eval() + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + if args.with_tracking: + accelerator.log( + { + "accuracy": eval_metric["accuracy"], + "f1": eval_metric["f1"], + "train_loss": total_loss.item() / len(train_dataloader), + "epoch": epoch, + }, + step=epoch, + ) + + if checkpointing_steps == "epoch": + output_dir = f"epoch_{epoch}" + if args.output_dir is not None: + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + + if args.with_tracking: + accelerator.end_training() + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script.") + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU.", + ) + parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") + parser.add_argument( + "--checkpointing_steps", + type=str, + default=None, + help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help="If the training should continue from a checkpoint folder.", + ) + parser.add_argument( + "--with_tracking", + action="store_true", + help="Whether to load in all available experiment trackers from the environment and use them for logging.", + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help="Location on where to store experiment tracking logs`", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/cv_example.py b/testbed/huggingface__accelerate/examples/cv_example.py new file mode 100644 index 0000000000000000000000000000000000000000..1118a2f0e0832bbaca9cc1d7be9fa99a6e09b0d2 --- /dev/null +++ b/testbed/huggingface__accelerate/examples/cv_example.py @@ -0,0 +1,211 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os +import re + +import numpy as np +import torch +from torch.optim.lr_scheduler import OneCycleLR +from torch.utils.data import DataLoader, Dataset + +import PIL +from accelerate import Accelerator +from timm import create_model +from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor + + +######################################################################## +# This is a fully working simple example to use Accelerate +# +# This example trains a ResNet50 on the Oxford-IIT Pet Dataset +# in any of the following settings (with the same script): +# - single CPU or single GPU +# - multi GPUS (using PyTorch distributed mode) +# - (multi) TPUs +# - fp16 (mixed-precision) or fp32 (normal precision) +# +# To run it in each of these various modes, follow the instructions +# in the readme for examples: +# https://github.com/huggingface/accelerate/tree/main/examples +# +######################################################################## + + +# Function to get the label from the filename +def extract_label(fname): + stem = fname.split(os.path.sep)[-1] + return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] + + +class PetsDataset(Dataset): + def __init__(self, file_names, image_transform=None, label_to_id=None): + self.file_names = file_names + self.image_transform = image_transform + self.label_to_id = label_to_id + + def __len__(self): + return len(self.file_names) + + def __getitem__(self, idx): + fname = self.file_names[idx] + raw_image = PIL.Image.open(fname) + image = raw_image.convert("RGB") + if self.image_transform is not None: + image = self.image_transform(image) + label = extract_label(fname) + if self.label_to_id is not None: + label = self.label_to_id[label] + return {"image": image, "label": label} + + +def training_function(config, args): + # Initialize accelerator + accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + image_size = config["image_size"] + if not isinstance(image_size, (list, tuple)): + image_size = (image_size, image_size) + + # Grab all the image filenames + file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(".jpg")] + + # Build the label correspondences + all_labels = [extract_label(fname) for fname in file_names] + id_to_label = list(set(all_labels)) + id_to_label.sort() + label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} + + # Set the seed before splitting the data. + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + # Split our filenames between train and validation + random_perm = np.random.permutation(len(file_names)) + cut = int(0.8 * len(file_names)) + train_split = random_perm[:cut] + eval_split = random_perm[cut:] + + # For training we use a simple RandomResizedCrop + train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()]) + train_dataset = PetsDataset( + [file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id + ) + + # For evaluation, we use a deterministic Resize + eval_tfm = Compose([Resize(image_size), ToTensor()]) + eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) + + # Instantiate dataloaders. + train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) + eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) + + # We could avoid this line since the accelerator is set with `device_placement=True` (default value). + # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer + # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). + model = model.to(accelerator.device) + + # Freezing the base model + for param in model.parameters(): + param.requires_grad = False + for param in model.get_classifier().parameters(): + param.requires_grad = True + + # We normalize the batches of images to be a bit faster. + mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None].to(accelerator.device) + std = torch.tensor(model.default_cfg["std"])[None, :, None, None].to(accelerator.device) + + # Instantiate optimizer + optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25) + + # Instantiate learning rate scheduler + lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader)) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # Now we train the model + for epoch in range(num_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch = {k: v.to(accelerator.device) for k, v in batch.items()} + inputs = (batch["image"] - mean) / std + outputs = model(inputs) + loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) + accelerator.backward(loss) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + model.eval() + accurate = 0 + num_elems = 0 + for _, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch = {k: v.to(accelerator.device) for k, v in batch.items()} + inputs = (batch["image"] - mean) / std + with torch.no_grad(): + outputs = model(inputs) + predictions = outputs.argmax(dim=-1) + predictions, references = accelerator.gather_for_metrics((predictions, batch["label"])) + accurate_preds = predictions == references + num_elems += accurate_preds.shape[0] + accurate += accurate_preds.long().sum() + + eval_metric = accurate.item() / num_elems + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script.") + parser.add_argument("--data_dir", required=True, help="The data folder on disk.") + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU.", + ) + parser.add_argument( + "--checkpointing_steps", + type=str, + default=None, + help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", + ) + parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") + args = parser.parse_args() + config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage1_config.json b/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage1_config.json new file mode 100644 index 0000000000000000000000000000000000000000..674420e57fc21329cd466dc2a2e68f981c621bce --- /dev/null +++ b/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage1_config.json @@ -0,0 +1,43 @@ +{ + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "weight_decay": "auto", + "torch_adam": true, + "adam_w_mode": true + } + }, + "scheduler": { + "type": "WarmupDecayLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto", + "total_num_steps": "auto" + } + }, + "zero_optimization": { + "stage": 1, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": "auto", + "contiguous_gradients": true + }, + "gradient_accumulation_steps": 1, + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} \ No newline at end of file diff --git a/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage2_config.json b/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage2_config.json new file mode 100644 index 0000000000000000000000000000000000000000..9597f8485c1a96424ff91f88ab9d934c4ac07bb9 --- /dev/null +++ b/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage2_config.json @@ -0,0 +1,43 @@ +{ + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "weight_decay": "auto", + "torch_adam": true, + "adam_w_mode": true + } + }, + "scheduler": { + "type": "WarmupDecayLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto", + "total_num_steps": "auto" + } + }, + "zero_optimization": { + "stage": 2, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": "auto", + "contiguous_gradients": true + }, + "gradient_accumulation_steps": 1, + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} \ No newline at end of file diff --git a/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage2_offload_config.json b/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage2_offload_config.json new file mode 100644 index 0000000000000000000000000000000000000000..98baedef38a86fe1cef3d5e1a8659b6b08eb275b --- /dev/null +++ b/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage2_offload_config.json @@ -0,0 +1,47 @@ +{ + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "weight_decay": "auto", + "torch_adam": true, + "adam_w_mode": true + } + }, + "scheduler": { + "type": "WarmupDecayLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto", + "total_num_steps": "auto" + } + }, + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": "auto", + "contiguous_gradients": true + }, + "gradient_accumulation_steps": 1, + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} \ No newline at end of file diff --git a/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage3_config.json b/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage3_config.json new file mode 100644 index 0000000000000000000000000000000000000000..2ec6fff41a79b4f1ad0d1849cbba640de67b113d --- /dev/null +++ b/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage3_config.json @@ -0,0 +1,44 @@ +{ + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "weight_decay": "auto" + } + }, + "scheduler": { + "type": "WarmupDecayLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto", + "total_num_steps": "auto" + } + }, + "zero_optimization": { + "stage": 3, + "overlap_comm": true, + "contiguous_gradients": true, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "sub_group_size": 1e9, + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": "auto" + }, + "gradient_accumulation_steps": 1, + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} \ No newline at end of file diff --git a/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage3_offload_config.json b/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage3_offload_config.json new file mode 100644 index 0000000000000000000000000000000000000000..edae8e6286e632d40fffd990b39bbde5d7409c03 --- /dev/null +++ b/testbed/huggingface__accelerate/examples/deepspeed_config_templates/zero_stage3_offload_config.json @@ -0,0 +1,52 @@ +{ + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "weight_decay": "auto" + } + }, + "scheduler": { + "type": "WarmupDecayLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto", + "total_num_steps": "auto" + } + }, + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "sub_group_size": 1e9, + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": "auto" + }, + "gradient_accumulation_steps": 1, + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} \ No newline at end of file diff --git a/testbed/huggingface__accelerate/examples/nlp_example.py b/testbed/huggingface__accelerate/examples/nlp_example.py new file mode 100644 index 0000000000000000000000000000000000000000..a126b5dd5719613376a5cf4cacdb2c7b80db1fad --- /dev/null +++ b/testbed/huggingface__accelerate/examples/nlp_example.py @@ -0,0 +1,192 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse + +import torch +from torch.optim import AdamW +from torch.utils.data import DataLoader + +import evaluate +from accelerate import Accelerator, DistributedType +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + + +######################################################################## +# This is a fully working simple example to use Accelerate +# +# This example trains a Bert base model on GLUE MRPC +# in any of the following settings (with the same script): +# - single CPU or single GPU +# - multi GPUS (using PyTorch distributed mode) +# - (multi) TPUs +# - fp16 (mixed-precision) or fp32 (normal precision) +# +# To run it in each of these various modes, follow the instructions +# in the readme for examples: +# https://github.com/huggingface/accelerate/tree/main/examples +# +######################################################################## + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): + """ + Creates a set of `DataLoader`s for the `glue` dataset, + using "bert-base-cased" as the tokenizer. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + """ + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + # starting with the main process first: + with accelerator.main_process_first(): + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +def training_function(config, args): + # Initialize accelerator + accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + + metric = evaluate.load("glue", "mrpc") + + # If the batch size is too big we use gradient accumulation + gradient_accumulation_steps = 1 + if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: + gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE + batch_size = MAX_GPU_BATCH_SIZE + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) + + # We could avoid this line since the accelerator is set with `device_placement=True` (default value). + # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer + # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). + model = model.to(accelerator.device) + + # Instantiate optimizer + optimizer = AdamW(params=model.parameters(), lr=lr) + + # Instantiate scheduler + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=100, + num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, + ) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # Now we train the model + for epoch in range(num_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + model.eval() + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script.") + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="Whether to use mixed precision. Choose" + "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." + "and an Nvidia Ampere GPU.", + ) + parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/examples/requirements.txt b/testbed/huggingface__accelerate/examples/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..912986bf1d0012e4c857cd61d158ca0e36c76f0d --- /dev/null +++ b/testbed/huggingface__accelerate/examples/requirements.txt @@ -0,0 +1,3 @@ +accelerate # used to be installed in Amazon SageMaker environment +evaluate +datasets==2.3.2 \ No newline at end of file diff --git a/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_1.py b/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_1.py new file mode 100644 index 0000000000000000000000000000000000000000..81ec0c965f6dae924f607f713ba6559457456fc1 --- /dev/null +++ b/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_1.py @@ -0,0 +1,108 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from manim import * + + +class Stage1(Scene): + def construct(self): + mem = Rectangle(height=0.5,width=0.5) + fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0) + + cpu_left_col_base = [mem.copy() for i in range(6)] + cpu_right_col_base = [mem.copy() for i in range(6)] + cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0) + cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0) + cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0) + cpu_text = Text("CPU", font_size=24) + cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + cpu.move_to([-2.5,-.5,0]) + self.add(cpu) + + gpu_base = [mem.copy() for i in range(1)] + gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0) + gpu_text = Text("GPU", font_size=24) + gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + gpu.align_to(cpu, DOWN) + gpu.set_x(gpu.get_x() - 1) + + self.add(gpu) + + model_base = [mem.copy() for i in range(6)] + model_rect = VGroup(*model_base).arrange(RIGHT,buff=0) + + model_text = Text("Model", font_size=24) + model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + model.move_to([3, -1., 0]) + + self.play( + Create(cpu_left_col, run_time=1), + Create(cpu_right_col, run_time=1), + Create(gpu_rect, run_time=1), + ) + + step_1 = MarkupText( + f"First, an empty model skeleton is loaded\ninto memory without using much RAM.", + font_size=24 + ) + + key = Square(side_length=2.2) + key.move_to([-5, 2, 0]) + + key_text = MarkupText( + f"Key:\n\n Empty Model", + font_size=18, + ) + + key_text.move_to([-5, 2.4, 0]) + + + step_1.move_to([2, 2, 0]) + self.play( + Write(step_1, run_time=2.5), + Write(key_text), + Write(key) + ) + + self.add(model) + + + cpu_targs = [] + first_animations = [] + second_animations = [] + for i,rect in enumerate(model_base): + + cpu_target = Rectangle(height=0.46,width=0.46).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7) + cpu_target.move_to(rect) + cpu_target.generate_target() + cpu_target.target.height = 0.46/4 + cpu_target.target.width = 0.46/3 + + if i == 0: + cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP) + cpu_target.target.set_x(cpu_target.target.get_x()+0.1) + elif i == 3: + cpu_target.target.next_to(cpu_targs[0].target, direction=UP, buff=0.) + else: + cpu_target.target.next_to(cpu_targs[i-1].target, direction=RIGHT, buff=0.) + cpu_targs.append(cpu_target) + + first_animations.append(rect.animate(run_time=0.5).set_stroke(YELLOW)) + second_animations.append(MoveToTarget(cpu_target, run_time=1.5)) + + self.play(*first_animations) + self.play(*second_animations) + + + self.wait() \ No newline at end of file diff --git a/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_2.py b/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_2.py new file mode 100644 index 0000000000000000000000000000000000000000..a30e9593b7dc2e989c4105bd59a7dfb63a100e49 --- /dev/null +++ b/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_2.py @@ -0,0 +1,126 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from manim import * + +class Stage2(Scene): + def construct(self): + mem = Rectangle(height=0.5,width=0.5) + fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0) + + cpu_left_col_base = [mem.copy() for i in range(6)] + cpu_right_col_base = [mem.copy() for i in range(6)] + cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0) + cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0) + cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0) + cpu_text = Text("CPU", font_size=24) + cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + cpu.move_to([-2.5,-.5,0]) + self.add(cpu) + + gpu_base = [mem.copy() for i in range(4)] + gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0) + gpu_text = Text("GPU", font_size=24) + gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + gpu.move_to([-1,-1,0]) + self.add(gpu) + + model_base = [mem.copy() for i in range(6)] + model_rect = VGroup(*model_base).arrange(RIGHT,buff=0) + + model_text = Text("Model", font_size=24) + model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + model.move_to([3, -1., 0]) + self.add(model) + + cpu_targs = [] + for i,rect in enumerate(model_base): + rect.set_stroke(YELLOW) + # target = fill.copy().set_fill(YELLOW, opacity=0.7) + # target.move_to(rect) + # self.add(target) + + cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7) + + if i == 0: + cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP) + cpu_target.set_x(cpu_target.get_x()+0.1) + elif i == 3: + cpu_target.next_to(cpu_targs[0], direction=UP, buff=0.) + else: + cpu_target.next_to(cpu_targs[i-1], direction=RIGHT, buff=0.) + self.add(cpu_target) + cpu_targs.append(cpu_target) + + + + checkpoint_base = [mem.copy() for i in range(6)] + checkpoint_rect = VGroup(*checkpoint_base).arrange(RIGHT,buff=0) + + checkpoint_text = Text("Loaded Checkpoint", font_size=24) + checkpoint = Group(checkpoint_rect,checkpoint_text).arrange(DOWN, aligned_edge=DOWN, buff=0.4) + checkpoint.move_to([3, .5, 0]) + + key = Square(side_length=2.2) + key.move_to([-5, 2, 0]) + + key_text = MarkupText( + f"Key:\n\n Empty Model", + font_size=18, + ) + + key_text.move_to([-5, 2.4, 0]) + + self.add(key_text, key) + + blue_text = MarkupText( + f" Checkpoint", + font_size=18, + ) + + blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left()) + + step_2 = MarkupText( + f'Next, a second model is loaded into memory,\nwith the weights of a single shard.', + font_size=24 + ) + step_2.move_to([2, 2, 0]) + self.play( + Write(step_2), + Write(blue_text) + ) + + self.play( + Write(checkpoint_text, run_time=1), + Create(checkpoint_rect, run_time=1) + ) + + first_animations = [] + second_animations = [] + for i,rect in enumerate(checkpoint_base): + target = fill.copy().set_fill(BLUE, opacity=0.7) + target.move_to(rect) + first_animations.append(GrowFromCenter(target, run_time=1)) + + cpu_target = target.copy() + cpu_target.generate_target() + if i < 5: + cpu_target.target.move_to(cpu_left_col_base[i+1]) + else: + cpu_target.target.move_to(cpu_right_col_base[i-5]) + second_animations.append(MoveToTarget(cpu_target, run_time=1.5)) + + self.play(*first_animations) + self.play(*second_animations) + self.wait() \ No newline at end of file diff --git a/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_3.py b/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_3.py new file mode 100644 index 0000000000000000000000000000000000000000..4ba20c4b523ab773949030aafb1c9fd0e0822a7f --- /dev/null +++ b/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_3.py @@ -0,0 +1,158 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from manim import * + +class Stage3(Scene): + def construct(self): + mem = Rectangle(height=0.5,width=0.5) + meta_mem = Rectangle(height=0.25,width=0.25) + fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0) + + cpu_left_col_base = [mem.copy() for i in range(6)] + cpu_right_col_base = [mem.copy() for i in range(6)] + cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0) + cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0) + cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0) + cpu_text = Text("CPU", font_size=24) + cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + cpu.move_to([-2.5,-.5,0]) + self.add(cpu) + + gpu_base = [mem.copy() for i in range(4)] + gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0) + gpu_text = Text("GPU", font_size=24) + gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + gpu.move_to([-1,-1,0]) + self.add(gpu) + + model_base = [mem.copy() for i in range(6)] + model_rect = VGroup(*model_base).arrange(RIGHT,buff=0) + + model_text = Text("Model", font_size=24) + model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + model.move_to([3, -1., 0]) + self.add(model) + + model_arr = [] + model_cpu_arr = [] + model_meta_arr = [] + + for i,rect in enumerate(model_base): + rect.set_stroke(YELLOW) + + cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7) + + if i == 0: + cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP) + cpu_target.set_x(cpu_target.get_x()+0.1) + elif i == 3: + cpu_target.next_to(model_cpu_arr[0], direction=UP, buff=0.) + else: + cpu_target.next_to(model_cpu_arr[i-1], direction=RIGHT, buff=0.) + self.add(cpu_target) + model_cpu_arr.append(cpu_target) + + self.add(*model_arr, *model_cpu_arr, *model_meta_arr) + + checkpoint_base = [mem.copy() for i in range(6)] + checkpoint_rect = VGroup(*checkpoint_base).arrange(RIGHT,buff=0) + + checkpoint_text = Text("Loaded Checkpoint", font_size=24) + checkpoint = Group(checkpoint_rect,checkpoint_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + checkpoint.move_to([3, .5, 0]) + + self.add(checkpoint) + + ckpt_arr = [] + ckpt_cpu_arr = [] + + for i,rect in enumerate(checkpoint_base): + target = fill.copy().set_fill(BLUE, opacity=0.7) + target.move_to(rect) + ckpt_arr.append(target) + + cpu_target = target.copy() + if i < 5: + cpu_target.move_to(cpu_left_col_base[i+1]) + else: + cpu_target.move_to(cpu_right_col_base[i-5]) + ckpt_cpu_arr.append(cpu_target) + self.add(*ckpt_arr, *ckpt_cpu_arr) + + key = Square(side_length=2.2) + key.move_to([-5, 2, 0]) + + key_text = MarkupText( + f"Key:\n\n Empty Model", + font_size=18, + ) + + key_text.move_to([-5, 2.4, 0]) + + self.add(key_text, key) + + blue_text = MarkupText( + f" Checkpoint", + font_size=18, + ) + + blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left()) + self.add(blue_text) + + step_3 = MarkupText( + f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.', + font_size=24 + ) + step_3.move_to([2, 2, 0]) + + disk_left_col_base = [meta_mem.copy() for i in range(6)] + disk_right_col_base = [meta_mem.copy() for i in range(6)] + disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0) + disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0) + disk_rects = VGroup(disk_left_col,disk_right_col).arrange(RIGHT, buff=0) + disk_text = Text("Disk", font_size=24) + disk = Group(disk_rects,disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + disk.move_to([-4.,-1.25,0]) + self.play( + Write(step_3, run_time=3), + Write(disk_text, run_time=1), + Create(disk_rects, run_time=1) + ) + + animations = [] + for i,rect in enumerate(ckpt_cpu_arr): + target = rect.copy() + target.generate_target() + target.target.move_to(disk_left_col_base[i]).scale(0.5) + animations.append(MoveToTarget(target, run_time=1.5)) + self.play(*animations) + + self.play(FadeOut(step_3)) + + step_4 = MarkupText( + f'Then, the checkpoint is removed from memory\nthrough garbage collection.', + font_size=24 + ) + step_4.move_to([2, 2, 0]) + + self.play( + Write(step_4, run_time=3) + ) + + self.play( + FadeOut(checkpoint_rect, checkpoint_text, *ckpt_arr, *ckpt_cpu_arr), + ) + + self.wait() \ No newline at end of file diff --git a/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_4.py b/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_4.py new file mode 100644 index 0000000000000000000000000000000000000000..3a79ad97cb1fa6cc029d25f88581bf3525baeefb --- /dev/null +++ b/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_4.py @@ -0,0 +1,156 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from manim import * + +class Stage4(Scene): + def construct(self): + mem = Rectangle(height=0.5,width=0.5) + fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0) + meta_mem = Rectangle(height=0.25,width=0.25) + + cpu_left_col_base = [mem.copy() for i in range(6)] + cpu_right_col_base = [mem.copy() for i in range(6)] + cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0) + cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0) + cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0) + cpu_text = Text("CPU", font_size=24) + cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + cpu.move_to([-2.5,-.5,0]) + self.add(cpu) + + gpu_base = [mem.copy() for i in range(4)] + gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0) + gpu_text = Text("GPU", font_size=24) + gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + gpu.move_to([-1,-1,0]) + self.add(gpu) + + model_base = [mem.copy() for i in range(6)] + model_rect = VGroup(*model_base).arrange(RIGHT,buff=0) + + model_text = Text("Model", font_size=24) + model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + model.move_to([3, -1., 0]) + self.add(model) + + model_cpu_arr = [] + model_meta_arr = [] + + for i,rect in enumerate(model_base): + rect.set_stroke(YELLOW) + + cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7) + + if i == 0: + cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP) + cpu_target.set_x(cpu_target.get_x()+0.1) + elif i == 3: + cpu_target.next_to(model_cpu_arr[0], direction=UP, buff=0.) + else: + cpu_target.next_to(model_cpu_arr[i-1], direction=RIGHT, buff=0.) + self.add(cpu_target) + model_cpu_arr.append(cpu_target) + + self.add(*model_cpu_arr, *model_meta_arr) + + disk_left_col_base = [meta_mem.copy() for i in range(6)] + disk_right_col_base = [meta_mem.copy() for i in range(6)] + disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0) + disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0) + disk_rects = VGroup(disk_left_col,disk_right_col).arrange(RIGHT, buff=0) + disk_text = Text("Disk", font_size=24) + disk = Group(disk_rects,disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + disk.move_to([-4.,-1.25,0]) + self.add(disk_text, disk_rects) + + cpu_disk_arr = [] + + for i in range(6): + target = fill.copy().set_fill(BLUE, opacity=0.8) + target.move_to(disk_left_col_base[i]).scale(0.5) + cpu_disk_arr.append(target) + + self.add(*cpu_disk_arr) + + key = Square(side_length=2.2) + key.move_to([-5, 2, 0]) + + key_text = MarkupText( + f"Key:\n\n Empty Model", + font_size=18, + ) + + key_text.move_to([-5, 2.4, 0]) + + self.add(key_text, key) + + blue_text = MarkupText( + f" Checkpoint", + font_size=18, + ) + + blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left()) + self.add(blue_text) + + step_5 = MarkupText( + f'The offloaded weights are all sent to the CPU.', + font_size=24 + ) + step_5.move_to([2, 2, 0]) + + self.play(Write(step_5, run_time=3)) + + for i in range(6): + rect = cpu_disk_arr[i] + cp2 = rect.copy().set_fill(BLUE, opacity=0.8).scale(2.0) + cp2.generate_target() + cp2.target.move_to(model_base[i]) + + if i == 0: + rect.set_fill(BLUE, opacity=0.8) + rect.generate_target() + rect.target.move_to(cpu_left_col_base[0]).scale(2.0) + + self.remove(*model_meta_arr, + *model_cpu_arr, + ) + + else: + rect.generate_target() + rect.target.move_to(cpu_left_col_base[i]).scale(2.0) + self.play( + MoveToTarget(rect), + MoveToTarget(cp2), + model_base[i].animate.set_stroke(WHITE) + ) + self.play(FadeOut(step_5)) + + step_5 = MarkupText( + f'Finally, hooks are added to each weight in the model\nto transfer the weights from CPU to GPU\n\t\tand back when needed.', + font_size=24 + ) + step_5.move_to([2, 2, 0]) + + self.play(Write(step_5, run_time=3)) + + arrows = [] + animations = [] + for i in range(6): + a = Arrow(start=UP, end=DOWN, color=RED, buff=.5) + a.next_to(model_base[i].get_left(), UP, buff=0.2) + arrows.append(a) + animations.append(Write(a)) + self.play(*animations) + self.wait() \ No newline at end of file diff --git a/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_5.py b/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_5.py new file mode 100644 index 0000000000000000000000000000000000000000..8b2ff33577ead0d32f54818998427c845d3cfb63 --- /dev/null +++ b/testbed/huggingface__accelerate/manim_animations/big_model_inference/stage_5.py @@ -0,0 +1,221 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from manim import * + +class Stage5(Scene): + def construct(self): + mem = Rectangle(height=0.5,width=0.5) + fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0) + + meta_mem = Rectangle(height=0.25,width=0.25) + + cpu_left_col_base = [mem.copy() for i in range(6)] + cpu_right_col_base = [mem.copy() for i in range(6)] + cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0) + cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0) + cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0) + cpu_text = Text("CPU", font_size=24) + cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + cpu.move_to([-2.5,-.5,0]) + self.add(cpu) + + gpu_base = [mem.copy() for i in range(4)] + gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0) + gpu_text = Text("GPU", font_size=24) + gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + gpu.move_to([-1,-1,0]) + self.add(gpu) + + model_base = [mem.copy() for i in range(6)] + model_rect = VGroup(*model_base).arrange(RIGHT,buff=0) + + model_text = Text("Model", font_size=24) + model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + model.move_to([3, -1., 0]) + self.add(model) + + model_arr = [] + model_cpu_arr = [] + + for i,rect in enumerate(model_base): + target = fill.copy().set_fill(BLUE, opacity=0.8) + target.move_to(rect) + model_arr.append(target) + + cpu_target = Rectangle(height=0.46,width=0.46).set_stroke(width=0.).set_fill(BLUE, opacity=0.8) + cpu_target.move_to(cpu_left_col_base[i]) + model_cpu_arr.append(cpu_target) + + self.add(*model_arr, *model_cpu_arr) + + disk_left_col_base = [meta_mem.copy() for i in range(6)] + disk_right_col_base = [meta_mem.copy() for i in range(6)] + disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0) + disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0) + disk_rects = VGroup(disk_left_col,disk_right_col).arrange(RIGHT, buff=0) + disk_text = Text("Disk", font_size=24) + disk = Group(disk_rects,disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) + disk.move_to([-4,-1.25,0]) + self.add(disk_text, disk_rects) + + key = Square(side_length=2.2) + key.move_to([-5, 2, 0]) + + key_text = MarkupText( + f"Key:\n\n Empty Model", + font_size=18, + ) + + key_text.move_to([-5, 2.4, 0]) + + self.add(key_text, key) + + blue_text = MarkupText( + f" Checkpoint", + font_size=18, + ) + + blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left()) + self.add(blue_text) + + step_6 = MarkupText( + f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.', + font_size=24 + ) + step_6.move_to([2, 2, 0]) + + self.play(Write(step_6)) + + input = Square(0.3) + input.set_fill(RED, opacity=1.) + input.set_stroke(width=0.) + input.next_to(model_base[0], LEFT, buff=.5) + + self.play(Write(input)) + + input.generate_target() + input.target.next_to(model_arr[0], direction=LEFT, buff=0.02) + self.play(MoveToTarget(input)) + + self.play(FadeOut(step_6)) + + + a = Arrow(start=UP, end=DOWN, color=RED, buff=.5) + a.next_to(model_arr[0].get_left(), UP, buff=0.2) + + model_cpu_arr[0].generate_target() + model_cpu_arr[0].target.move_to(gpu_rect[0]) + + step_7 = MarkupText( + f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.', + font_size=24 + ) + step_7.move_to([2, 2, 0]) + + self.play(Write(step_7, run_time=3)) + + circ_kwargs = {"run_time":1, "fade_in":True, "fade_out":True, "buff":0.02} + + self.play( + Write(a), + Circumscribe(model_arr[0], color=ORANGE, **circ_kwargs), + Circumscribe(model_cpu_arr[0], color=ORANGE, **circ_kwargs), + Circumscribe(gpu_rect[0], color=ORANGE, **circ_kwargs), + ) + self.play( + MoveToTarget(model_cpu_arr[0]) + ) + + a_c = a.copy() + for i in range(6): + a_c.next_to(model_arr[i].get_right()+0.02, UP, buff=0.2) + + input.generate_target() + input.target.move_to(model_arr[i].get_right()+0.02) + + grp = AnimationGroup( + FadeOut(a, run_time=.5), + MoveToTarget(input, run_time=.5), + FadeIn(a_c, run_time=.5), + lag_ratio=0.2 + ) + + self.play(grp) + + + model_cpu_arr[i].generate_target() + model_cpu_arr[i].target.move_to(cpu_left_col_base[i]) + + + if i < 5: + model_cpu_arr[i+1].generate_target() + model_cpu_arr[i+1].target.move_to(gpu_rect[0]) + if i >= 1: + circ_kwargs["run_time"] = .7 + + self.play( + Circumscribe(model_arr[i], **circ_kwargs), + Circumscribe(cpu_left_col_base[i], **circ_kwargs), + Circumscribe(cpu_left_col_base[i+1], color=ORANGE, **circ_kwargs), + Circumscribe(gpu_rect[0], color=ORANGE, **circ_kwargs), + Circumscribe(model_arr[i+1], color=ORANGE, **circ_kwargs), + ) + if i < 1: + self.play( + MoveToTarget(model_cpu_arr[i]), + MoveToTarget(model_cpu_arr[i+1]), + ) + else: + self.play( + MoveToTarget(model_cpu_arr[i], run_time=.7), + MoveToTarget(model_cpu_arr[i+1], run_time=.7), + ) + else: + model_cpu_arr[i].generate_target() + model_cpu_arr[i].target.move_to(cpu_left_col_base[-1]) + input.generate_target() + input.target.next_to(model_arr[-1].get_right(), RIGHT+0.02, buff=0.2) + + self.play( + Circumscribe(model_arr[-1], color=ORANGE, **circ_kwargs), + Circumscribe(cpu_left_col_base[-1], color=ORANGE, **circ_kwargs), + Circumscribe(gpu_rect[0], color=ORANGE, **circ_kwargs), + ) + + self.play( + MoveToTarget(model_cpu_arr[i]) + ) + + a = a_c + a_c = a_c.copy() + + input.generate_target() + input.target.next_to(model_base[-1], RIGHT+0.02, buff=.5) + self.play( + FadeOut(step_7), + FadeOut(a, run_time=.5), + ) + + step_8 = MarkupText( + f'Inference on a model too large for GPU memory\nis successfully completed.', font_size=24 + ) + step_8.move_to([2, 2, 0]) + + self.play( + Write(step_8, run_time=3), + MoveToTarget(input) + ) + + self.wait() \ No newline at end of file diff --git a/testbed/huggingface__accelerate/pyproject.toml b/testbed/huggingface__accelerate/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..b7465bb131070293a66bfb271a2c979495bf1a6b --- /dev/null +++ b/testbed/huggingface__accelerate/pyproject.toml @@ -0,0 +1,3 @@ +[tool.black] +line-length = 119 +target-version = ['py36'] diff --git a/testbed/huggingface__accelerate/setup.py b/testbed/huggingface__accelerate/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..f96ebc440d70dbaec6f51f0c95cbf89743a048b3 --- /dev/null +++ b/testbed/huggingface__accelerate/setup.py @@ -0,0 +1,87 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from setuptools import setup +from setuptools import find_packages + +extras = {} +extras["quality"] = ["black ~= 22.0", "isort >= 5.5.4", "flake8 >= 3.8.3", "hf-doc-builder >= 0.3.0"] +extras["docs"] = [] +extras["test_prod"] = ["pytest", "pytest-xdist", "pytest-subtests", "parameterized"] +extras["test_dev"] = ["datasets", "evaluate", "transformers", "scipy", "scikit-learn", "deepspeed<0.7.0", "tqdm"] +extras["testing"] = extras["test_prod"] + extras["test_dev"] +extras["rich"] = ["rich"] + +extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard"] +extras["dev"] = extras["quality"] + extras["testing"] + extras["rich"] + +extras["sagemaker"] = [ + "sagemaker", # boto3 is a required package in sagemaker +] + +setup( + name="accelerate", + version="0.15.0.dev0", + description="Accelerate", + long_description=open("README.md", "r", encoding="utf-8").read(), + long_description_content_type="text/markdown", + keywords="deep learning", + license="Apache", + author="The HuggingFace team", + author_email="sylvain@huggingface.co", + url="https://github.com/huggingface/accelerate", + package_dir={"": "src"}, + packages=find_packages("src"), + entry_points={ + "console_scripts": [ + "accelerate=accelerate.commands.accelerate_cli:main", + "accelerate-config=accelerate.commands.config:main", + "accelerate-launch=accelerate.commands.launch:main", + ] + }, + python_requires=">=3.7.0", + install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.4.0"], + extras_require=extras, + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + ], +) + +# Release checklist +# 1. Change the version in __init__.py and setup.py. +# 2. Commit these changes with the message: "Release: VERSION" +# 3. Add a tag in git to mark the release: "git tag VERSION -m 'Adds tag VERSION for pypi' " +# Push the tag to git: git push --tags origin main +# 4. Run the following commands in the top-level directory: +# python setup.py bdist_wheel +# python setup.py sdist +# 5. Upload the package to the pypi test server first: +# twine upload dist/* -r pypitest +# twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/ +# 6. Check that you can install it in a virtualenv by running: +# pip install -i https://testpypi.python.org/pypi accelerate +# accelerate env +# accelerate test +# 7. Upload the final version to actual pypi: +# twine upload dist/* -r pypi +# 8. Add release notes to the tag in github once everything is looking hunky-dory. +# 9. Update the version in __init__.py, setup.py to the new version "-dev" and push to master diff --git a/testbed/huggingface__accelerate/src/accelerate/__init__.py b/testbed/huggingface__accelerate/src/accelerate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9129fc1a56c18127619d1257a92596ec8f07b371 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/__init__.py @@ -0,0 +1,33 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +__version__ = "0.15.0.dev0" + +from .accelerator import Accelerator +from .big_modeling import ( + cpu_offload, + disk_offload, + dispatch_model, + init_empty_weights, + init_on_device, + load_checkpoint_and_dispatch, +) +from .launchers import debug_launcher, notebook_launcher +from .utils import ( + DeepSpeedPlugin, + DistributedDataParallelKwargs, + DistributedType, + FullyShardedDataParallelPlugin, + GradScalerKwargs, + InitProcessGroupKwargs, + find_executable_batch_size, + infer_auto_device_map, + is_rich_available, + load_checkpoint_in_model, + synchronize_rng_states, +) + + +if is_rich_available(): + from .utils import rich diff --git a/testbed/huggingface__accelerate/src/accelerate/accelerator.py b/testbed/huggingface__accelerate/src/accelerate/accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..7075c4dcdb69034269de78a0feba178d664b4bd5 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/accelerator.py @@ -0,0 +1,1920 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import math +import os +import shutil +import sys +import warnings +from contextlib import contextmanager +from functools import wraps +from typing import List, Optional, Union + +import torch + +from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state +from .data_loader import DataLoaderDispatcher, prepare_data_loader +from .logging import get_logger +from .optimizer import AcceleratedOptimizer +from .scheduler import AcceleratedScheduler +from .state import AcceleratorState, GradientState, parse_flag_from_env +from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers +from .utils import ( + MODEL_NAME, + DeepSpeedPlugin, + DistributedDataParallelKwargs, + DistributedType, + DynamoBackend, + FullyShardedDataParallelPlugin, + GradScalerKwargs, + InitProcessGroupKwargs, + KwargsHandler, + LoggerType, + MegatronLMPlugin, + PrecisionType, + ProjectConfiguration, + RNGType, + compare_versions, + convert_outputs_to_fp32, + extract_model_from_parallel, + gather, + get_pretty_name, + is_bf16_available, + is_deepspeed_available, + is_megatron_lm_available, + is_torch_version, + is_tpu_available, + pad_across_processes, + recursively_apply, + reduce, + release_memory, + save, + wait_for_everyone, +) + + +if is_deepspeed_available(): + import deepspeed + + from .utils import ( + DeepSpeedEngineWrapper, + DeepSpeedOptimizerWrapper, + DeepSpeedSchedulerWrapper, + DummyOptim, + DummyScheduler, + ) + +if is_megatron_lm_available(): + from .utils import ( + MegatronEngine, + MegatronLMDummyDataLoader, + MegatronLMDummyScheduler, + MegatronLMOptimizerWrapper, + MegatronLMSchedulerWrapper, + megatron_lm_initialize, + megatron_lm_prepare_data_loader, + megatron_lm_prepare_model, + megatron_lm_prepare_optimizer, + megatron_lm_prepare_scheduler, + ) + +if is_torch_version(">", "1.10.0"): + from torch.distributed.algorithms.join import Join + + +if is_tpu_available(check_device=False): + import torch_xla.distributed.xla_multiprocessing as xmp + + +if is_torch_version("<=", "1.13.5"): + from torch.optim.lr_scheduler import _LRScheduler as LRScheduler +else: + from torch.optim.lr_scheduler import LRScheduler as LRScheduler + +logger = get_logger(__name__) + + +class Accelerator: + """ + Creates an instance of an accelerator for distributed training (on multi-GPU, TPU) or mixed precision training. + + Args: + device_placement (`bool`, *optional*, defaults to `True`): + Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model, + etc...). + split_batches (`bool`, *optional*, defaults to `False`): + Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If + `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a + round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set + in your script multiplied by the number of processes. + mixed_precision (`str`, *optional*): + Whether or not to use mixed precision training (fp16 or bfloat16). Choose from 'no','fp16','bf16'. Will + default to the value in the environment variable `ACCELERATE_MIXED_PRECISION`, which will use the default + value in the accelerate config of the current system or the flag passed with the `accelerate.launch` + command. 'fp16' requires pytorch 1.6 or higher. 'bf16' requires pytorch 1.10 or higher. + gradient_accumulation_steps (`int`, *optional*, default to 1): + The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with + `Accelerator.accumulate`. + cpu (`bool`, *optional*): + Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force + the execution on one process only. + deepspeed_plugin (`DeepSpeedPlugin`, *optional*): + Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured + directly using *accelerate config* + fsdp_plugin (`FullyShardedDataParallelPlugin`, *optional*): + Tweak your FSDP related args using this argument. This argument is optional and can be configured directly + using *accelerate config* + megatron_lm_plugin (`MegatronLMPlugin`, *optional*): + Tweak your MegatronLM related args using this argument. This argument is optional and can be configured + directly using *accelerate config* + rng_types (list of `str` or [`~utils.RNGType`]): + The list of random number generators to synchronize at the beginning of each iteration in your prepared + dataloaders. Should be one or several of: + + - `"torch"`: the base torch random number generator + - `"cuda"`: the CUDA random number generator (GPU only) + - `"xla"`: the XLA random number generator (TPU only) + - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your + dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. + + Will default to `["torch"]` for PyTorch versions <=1.5.1 and `["generator"]` for PyTorch versions >= 1.6. + log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*): + A list of loggers to be setup for experiment tracking. Should be one or several of: + + - `"all"` + - `"tensorboard"` + - `"wandb"` + - `"comet_ml"` + If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can + also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`. + project_config (`ProjectConfiguration`, *optional*): + A configuration for how saving the state can be handled. + project_dir (`str`, `os.PathLike`, *optional*): + A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved + checkpoints. + dispatch_batches (`bool`, *optional*): + If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process + and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose + underlying dataset is an `IterableDataset`, `False` otherwise. + even_batches (`bool`, *optional*, defaults to `True`): + If set to `True`, in cases where the total batch size across all processes does not exactly divide the + dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among + all workers. + step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`): + Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only + done under certain circumstances (at the end of each epoch, for instance). + kwargs_handlers (`List[KwargHandler]`, *optional*) + A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision + are created. See [kwargs](kwargs) for more information. + dynamo_backend (`str` or `DynamoBackend`, *optional*, defaults to `"no"`): + Set to one of the possible dynamo backends to optimize your training with torch dynamo. + + **Available attributes:** + + - **device** (`torch.device`) -- The device to use. + - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration. + - **local_process_index** (`int`) -- The process index on the current machine. + - **mixed_precision** (`str`) -- The configured mixed precision mode. + - **num_processes** (`int`) -- The total number of processes used for training. + - **optimizer_step_was_skipped** (`bool`) -- Whether or not the optimizer update was skipped (because of + gradient overflow in mixed precision), in which + case the learning rate should not be changed. + - **process_index** (`int`) -- The overall index of the current process among all processes. + - **state** ([`~state.AcceleratorState`]) -- The distributed setup state. + - **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes. + - **use_distributed** (`bool`) -- Whether the current configuration is for distributed training. + """ + + def __init__( + self, + device_placement: bool = True, + split_batches: bool = False, + mixed_precision: Union[PrecisionType, str] = None, + gradient_accumulation_steps: int = 1, + cpu: bool = False, + deepspeed_plugin: DeepSpeedPlugin = None, + fsdp_plugin: FullyShardedDataParallelPlugin = None, + megatron_lm_plugin: MegatronLMPlugin = None, + rng_types: Optional[List[Union[str, RNGType]]] = None, + log_with: Optional[List[Union[str, LoggerType, GeneralTracker]]] = None, + project_dir: Optional[Union[str, os.PathLike]] = None, + project_config: Optional[ProjectConfiguration] = None, + logging_dir: Optional[Union[str, os.PathLike]] = None, + dispatch_batches: Optional[bool] = None, + even_batches: bool = True, + step_scheduler_with_optimizer: bool = True, + kwargs_handlers: Optional[List[KwargsHandler]] = None, + dynamo_backend: Union[DynamoBackend, str] = None, + ): + if project_config is not None: + self.project_configuration = project_config + else: + self.project_configuration = ProjectConfiguration(project_dir=project_dir) + + if logging_dir is not None: + warnings.warn( + "`logging_dir` is deprecated and will be removed in version 0.18.0 of 🤗 Accelerate. Use `project_dir` instead.", + FutureWarning, + ) + self.project_configuration.logging_dir = logging_dir + if project_dir is not None and self.project_dir is None: + self.project_configuration.project_dir = project_dir + if mixed_precision is not None: + mixed_precision = str(mixed_precision) + if mixed_precision not in PrecisionType: + raise ValueError( + f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}" + ) + + if dynamo_backend is not None: + dynamo_backend = DynamoBackend(dynamo_backend.upper()) + + if deepspeed_plugin is None: # init from env variables + deepspeed_plugin = ( + DeepSpeedPlugin() if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" else None + ) + else: + assert isinstance( + deepspeed_plugin, DeepSpeedPlugin + ), "`deepspeed_plugin` must be an `accelerate.utils.DeepSpeedPlugin` object." + os.environ["ACCELERATE_USE_DEEPSPEED"] = "true" # use DeepSpeed if plugin is provided + if deepspeed_plugin: + if not is_deepspeed_available(): + raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.") + if compare_versions("deepspeed", "<", "0.6.5"): + raise ImportError("DeepSpeed version must be >= 0.6.5. Please update DeepSpeed.") + + mixed_precision = ( + os.environ.get("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision + ) + deepspeed_plugin.set_mixed_precision(mixed_precision) + deepspeed_plugin.set_deepspeed_weakref() + + if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" or isinstance( + fsdp_plugin, FullyShardedDataParallelPlugin + ): + if is_torch_version("<", "1.12.0"): + raise ValueError("FSDP requires PyTorch >= 1.12.0") + + if fsdp_plugin is None: # init from env variables + fsdp_plugin = ( + FullyShardedDataParallelPlugin() if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" else None + ) + else: + if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin): + raise TypeError("`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.") + os.environ["ACCELERATE_USE_FSDP"] = "true" # use FSDP if plugin is provided + + if megatron_lm_plugin is None: # init from env variables + megatron_lm_plugin = ( + MegatronLMPlugin() if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" else None + ) + else: + if not isinstance(megatron_lm_plugin, MegatronLMPlugin): + raise TypeError("`megatron_lm_plugin` must be a MegatronLMPlugin object.") + os.environ["ACCELERATE_USE_MEGATRON_LM"] = "true" # use MegatronLM if plugin is provided + + if megatron_lm_plugin: + if not is_megatron_lm_available(): + raise ImportError("Megatron is not installed. please build it from source.") + + # Kwargs handlers + self.ddp_handler = None + self.scaler_handler = None + self.init_handler = None + if kwargs_handlers is not None: + for handler in kwargs_handlers: + assert isinstance( + handler, KwargsHandler + ), f"Unsupported kwargs handler passed: {handler}, must be one that inherits `accelerate.utils.KwargsHandler`." + if isinstance(handler, DistributedDataParallelKwargs): + if self.ddp_handler is not None: + raise ValueError("You can only pass one `DistributedDataParallelKwargs` in `kwargs_handler`.") + else: + self.ddp_handler = handler + elif isinstance(handler, GradScalerKwargs): + if self.scaler_handler is not None: + raise ValueError("You can only pass one `GradScalerKwargs` in `kwargs_handler`.") + else: + self.scaler_handler = handler + elif isinstance(handler, InitProcessGroupKwargs): + if self.init_handler is not None: + raise ValueError("You can only pass one `InitProcessGroupKwargs` in `kwargs_handler`.") + else: + self.init_handler = handler + + kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {} + self.state = AcceleratorState( + mixed_precision=mixed_precision, + cpu=cpu, + dynamo_backend=dynamo_backend, + deepspeed_plugin=deepspeed_plugin, + fsdp_plugin=fsdp_plugin, + megatron_lm_plugin=megatron_lm_plugin, + _from_accelerator=True, + **kwargs, + ) + + trackers = filter_trackers(log_with, self.logging_dir) + if len(trackers) < 1 and log_with is not None: + warnings.warn(f"`log_with={log_with}` was passed but no supported trackers are currently installed.") + self.log_with = trackers + + if ( + (mixed_precision != "bf16") + and getattr(self.state, "downcast_bfloat", False) + and (self.state.distributedType != DistributedType.TPU) + ): + raise ValueError("Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU") + + if gradient_accumulation_steps > 1: + if self.state.distributed_type == DistributedType.TPU: + raise NotImplementedError( + "Gradient accumulation on TPU is not supported. Pass in `gradient_accumulation_steps=1`" + ) + + self.gradient_accumulation_steps = gradient_accumulation_steps + self.device_placement = device_placement + self.split_batches = split_batches + self.dispatch_batches = dispatch_batches + if dispatch_batches is True and is_torch_version("<", "1.8.0"): + raise ImportError( + "Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}." + ) + self.even_batches = even_batches + self.step_scheduler_with_optimizer = step_scheduler_with_optimizer + + # Mixed precision attributes + self.scaler = None + self.native_amp = False + err = "{mode} mixed precision requires {requirement}" + if ( + self.state.mixed_precision == "fp16" + and self.device.type != "cpu" + and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM) + ): + self.native_amp = True + if not torch.cuda.is_available() and not parse_flag_from_env("ACCELERATE_USE_MPS_DEVICE"): + raise ValueError(err.format(mode="fp16", requirement="a GPU")) + kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {} + if self.distributed_type == DistributedType.FSDP: + from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler + + self.scaler = ShardedGradScaler(**kwargs) + else: + self.scaler = torch.cuda.amp.GradScaler(**kwargs) + elif self.state.mixed_precision == "bf16" and self.distributed_type not in ( + DistributedType.DEEPSPEED, + DistributedType.FSDP, + DistributedType.MEGATRON_LM, + ): + if self.device.type == "cpu": + self.native_amp = is_torch_version(">=", "1.10") + else: + self.native_amp = is_bf16_available(True) + if mixed_precision == "bf16" and not self.native_amp and not is_tpu_available(): + raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device.")) + + # Only on the GPU do we care about scaling the gradients + if torch.cuda.is_available() and self.device.type != "cpu": + kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {} + self.scaler = torch.cuda.amp.GradScaler(**kwargs) + + # Start of internal step tracking + self.step = 0 + self.gradient_state = GradientState() + + # Internal references to the training objects + self._optimizers = [] + self._models = [] + self._schedulers = [] + self._dataloaders = [] + self._custom_objects = [] + + # RNG Types + self.rng_types = rng_types + if self.rng_types is None: + self.rng_types = ["generator"] + + @property + def use_distributed(self): + """ + Whether the Accelerator is configured for distributed training + """ + return self.distributed_type != DistributedType.NO and self.num_processes > 1 + + @property + def distributed_type(self): + return self.state.distributed_type + + @property + def num_processes(self): + return self.state.num_processes + + @property + def process_index(self): + return self.state.process_index + + @property + def local_process_index(self): + return self.state.local_process_index + + @property + def device(self): + return self.state.device + + @property + def project_dir(self): + return self.project_configuration.project_dir + + @property + def logging_dir(self): + return self.project_configuration.logging_dir + + @property + def save_iteration(self): + return self.project_configuration.iteration + + @property + def is_main_process(self): + """True for one process only.""" + return ( + self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process + ) + + @property + def is_local_main_process(self): + """True for one process per server.""" + return ( + self.local_process_index == 0 + if self.distributed_type != DistributedType.MEGATRON_LM + else self.is_last_process + ) + + @property + def use_fp16(self): + return self.mixed_precision != "no" + + @property + def is_last_process(self): + return self.process_index == self.num_processes - 1 + + @property + def mixed_precision(self): + return self.state.mixed_precision + + def on_main_process(func): + """ + A decorator that will run the decorated function on the main process only. + """ + + @wraps(func) + def wrapper(self, *args, **kwargs): + if self.is_main_process or not self.use_distributed: + return func(self, *args, **kwargs) + + return wrapper + + def on_local_main_process(func): + """ + A decorator that will run the decorated function on the local main process only. + """ + + @wraps(func) + def wrapper(self, *args, **kwargs): + if self.is_local_main_process or not self.use_distributed: + return func(self, *args, **kwargs) + + return wrapper + + def on_last_process(func): + """ + A decorator that will run the decorated function on the last process only. + """ + + @wraps(func) + def wrapper(self, *args, **kwargs): + if self.is_last_process or not self.use_distributed: + return func(self, *args, **kwargs) + + return wrapper + + def on_process(process_idx): + """ + A decorator that will run the decorated function on a given process index only. + """ + + def decorator(func): + @wraps(func) + def wrapper(self, *args, **kwargs): + if self.process_idx == process_idx or not self.use_distributed: + return func(self, *args, **kwargs) + + return wrapper + + return decorator + + def on_local_process(local_process_idx): + """ + A decorator that will run the decorated function on a given local process index only. + """ + + def decorator(func): + @wraps(func) + def wrapper(self, *args, **kwargs): + if self.local_process_idx == local_process_idx or not self.use_distributed: + return func(self, *args, **kwargs) + + return wrapper + + return decorator + + def _goes_first(self, is_main): + if not is_main: + self.wait_for_everyone() + + yield + + if is_main: + self.wait_for_everyone() + + @contextmanager + def main_process_first(self): + """ + Lets the main process go first inside a with block. + + The other processes will enter the with block after the main process exits. + """ + yield from self._goes_first(self.is_main_process) + + @contextmanager + def local_main_process_first(self): + """ + Lets the local main process go inside a with block. + + The other processes will enter the with block after the main process exits. + """ + yield from self._goes_first(self.is_local_main_process) + + @contextmanager + def no_sync(self, model): + """ + A context manager to disable gradient synchronizations across DDP processes by calling + `torch.nn.parallel.DistributedDataParallel.no_sync`. + + If `model` is not in DDP, this context manager does nothing + + Args: + model (`torch.nn.Module`): + PyTorch Module that was prepared with `Accelerator.prepare` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer) + >>> input_a = next(iter(dataloader)) + >>> input_b = next(iter(dataloader)) + + >>> with accelerator.no_sync(): + ... outputs = model(input_a) + ... loss = loss_func(outputs) + ... accelerator.backward(loss) + ... # No synchronization across processes, only accumulate gradients + >>> outputs = model(input_b) + >>> accelerator.backward(loss) + >>> # Synchronization across all processes + >>> optimizer.step() + >>> optimizer.zero_grad() + ``` + """ + context = contextlib.nullcontext + if self.use_distributed: + context = getattr(model, "no_sync", context) + + with context(): + yield + + def _do_sync(self): + "Sets the right `sync_gradients` context and either resets or increases `self.step`" + if self.gradient_state.end_of_dataloader: + self.step = 0 + self.gradient_state._set_sync_gradients(True) + else: + self.step += 1 + self.gradient_state._set_sync_gradients((self.step % self.gradient_accumulation_steps) == 0) + + @property + def sync_gradients(self): + return self.gradient_state.sync_gradients + + @contextmanager + def accumulate(self, model): + """ + A context manager that will lightly wrap around and perform gradient accumulation automatically + + Args: + model (`torch.nn.Module`): + PyTorch Module that was prepared with `Accelerator.prepare` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=2) + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + + >>> with accelerator.accumulate(): + ... for input, output in dataloader: + ... outputs = model(input) + ... loss = loss_func(outputs) + ... loss.backward() + ... optimizer.step() + ... scheduler.step() + ... optimizer.zero_grad() + ``` + """ + self._do_sync() + if self.sync_gradients: + context = contextlib.nullcontext + else: + context = self.no_sync + + with context(model): + yield + + @contextmanager + def join_uneven_inputs(self, joinables, even_batches=None): + """ + A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper + around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the + length of the dataset. + + Args: + joinables (`List[torch.distributed.algorithms.Joinable]`): + A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a + PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training. + even_batches (`bool`, *optional*) + If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided, + the default `Accelerator` value wil be used. + + + + `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other + configuration, this method will have no effect. + + + + + + Overidding `even_batches` will not affect iterable-style data loaders. + + + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(even_batches=True) + >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader) + + >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False): + ... for input, output in dataloader: + ... outputs = model(input) + ... loss = loss_func(outputs) + ... loss.backward() + ... optimizer.step() + ... optimizer.zero_grad() + ``` + + """ + if is_torch_version("<", "1.10.0"): + raise ValueError(f"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.") + + if self.distributed_type == DistributedType.MULTI_GPU: + dl_even_batches_values = [] + + if even_batches is not None: + iterable_dl_seen = False + # override value in batch sampler for map-style datasets + for dl_idx, dl in enumerate(self._dataloaders): + if isinstance(dl, DataLoaderDispatcher): + iterable_dl_seen = True + continue + dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches)) + dl.batch_sampler.even_batches = even_batches + + if iterable_dl_seen: + warnings.warn( + "Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable" + ) + else: + even_batches = self.even_batches + + enable_join = False if even_batches else True + try: + with Join(joinables, enable=enable_join, throw_on_early_termination=False): + yield + finally: + # reset any batch samplers that have been modified + for dl_idx, even_batches_value in dl_even_batches_values: + self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value + else: + # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs + if self.distributed_type != DistributedType.NO: + warnings.warn( + "Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect." + ) + + with contextlib.nullcontext(joinables): + yield + + def print(self, *args, **kwargs): + """ + Use in replacement of `print()` to only print once per server. + """ + if self.is_local_main_process: + print(*args, **kwargs) + + def _prepare_one(self, obj, first_pass=False, device_placement=None): + # First pass of preparation: DataLoader, model, optimizer + if first_pass: + if isinstance(obj, torch.utils.data.DataLoader): + return self.prepare_data_loader(obj, device_placement=device_placement) + elif isinstance(obj, torch.nn.Module): + return self.prepare_model(obj, device_placement=device_placement) + elif isinstance(obj, torch.optim.Optimizer): + optimizer = self.prepare_optimizer(obj, device_placement=device_placement) + return optimizer + # Second pass of preparation: LR scheduler (which need the full list of optimizers) + elif isinstance(obj, LRScheduler): + scheduler = self.prepare_scheduler(obj) + return scheduler + # Return the unprocessed object if previous criteria was not met + return obj + + def _prepare_fsdp(self, *args): + result = [] + for obj in args: + if isinstance(obj, torch.nn.Module): + model = obj + break + optimizers = [] + + self._schedulers = [] + self._models = [] + intermediate_result = [] + for obj in args: + if isinstance(obj, torch.optim.Optimizer): + if len(obj.param_groups) > 1: + logger.warning( + "FSDP Warning: When using FSDP, several parameter groups will be conflated into " + "a single one due to nested module wrapping and parameter flattening." + ) + try: + optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults) + except TypeError: + if "differentiable" in obj.optimizer.defaults: + # https://github.com/huggingface/accelerate/issues/801 + defaults = {k: v for k, v in obj.optimizer.defaults.items() if k != "differentiable"} + optimizer = obj.optimizer.__class__(model.parameters(), **defaults) + else: + raise + obj = self.prepare_optimizer(optimizer) + optimizers.append(obj) + elif isinstance(obj, torch.nn.Module): + self._models.append(obj) + intermediate_result.append(obj) + + for obj in intermediate_result: + if isinstance(obj, AcceleratedScheduler): + obj.optimizer = optimizers + for i, opt in enumerate(self._optimizers): + if getattr(obj.scheduler, "optimizer", None) == opt.optimizer: + obj.scheduler.optimizer = optimizers[i] + obj.optimizers = [optimizers[i]] + break + self._schedulers.append(obj) + result.append(obj) + self._optimizers = optimizers + return tuple(result) + + def prepare(self, *args, device_placement=None): + """ + Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same + order. + + Args: + *args (list of objects): + Any of the following type of objects: + + - `torch.utils.data.DataLoader`: PyTorch Dataloader + - `torch.nn.Module`: PyTorch Module + - `torch.optim.Optimizer`: PyTorch Optimizer + - `torch.optim.lr_scheduler.LRScheduler`: PyTorch LR Scheduler + + device_placement (`List[bool]`, *optional*): + Used to customize whether automatic device placement should be performed for each object passed. Needs + to be a list of the same length as `args`. + + + + You don't need to prepare a model if you only use it for inference without any kind of mixed precision + + + """ + if device_placement is None: + device_placement = [None for _ in args] + elif self.distributed_type in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM): + raise ValueError("You can't customize device placements with DeepSpeed or Megatron-LM.") + elif len(device_placement) != len(args): + raise ValueError( + f"`device_placement` should be a list with {len(args)} elements (the number of objects passed)." + ) + + if self.distributed_type == DistributedType.FSDP: + model_count = 0 + optimizer_present = False + for obj in args: + if isinstance(obj, torch.nn.Module): + model_count += 1 + if isinstance(obj, torch.optim.Optimizer): + optimizer_present = True + if model_count > 1 and optimizer_present: + raise ValueError( + "For FSDP to work with multiple models (>1), " + "prepare must be called for all the models before optimizers are created. " + "Then pass the optimizers to the prepare call in the same order as corresponding models." + ) + elif model_count == 1 and optimizer_present: + logger.warning( + "FSDP Warning: When using FSDP, " + "it is efficient and recommended to call prepare for the model before creating the optimizer" + ) + + # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will + # have parameters disconnected from the model (so no training :-( ). + # If the model and optimizer have parameters on different devices we raise an error. + if self.distributed_type == DistributedType.TPU: + model_device, optimizer_device = self._get_devices() + if model_device is not None and optimizer_device is not None and model_device != optimizer_device: + raise ValueError( + "The model and the optimizer parameters are not on the same device, which probably means you " + "created an optimizer around your model **before** putting on the device. Make sure the line " + "model.to(device) is before the optimizer creation in your script or remove it entirely and use " + "the flag default value for `device_placement` in your `Accelerator` to let it handle that " + "part for you." + ) + + # If we're dealing with device placement, this deals with that by... + tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.TPU + if tpu_should_fix_optimizer: + # 1. grabbing old model parameters + old_named_params = self._get_named_parameters(*args) + + if self.distributed_type == DistributedType.DEEPSPEED: + result = self._prepare_deepspeed(*args) + elif self.distributed_type == DistributedType.MEGATRON_LM: + result = self._prepare_megatron_lm(*args) + else: + result = tuple( + self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement) + ) + result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement)) + + if tpu_should_fix_optimizer: + # 2. grabbing new model parameters + new_named_params = self._get_named_parameters(*result) + # 3. building a map from the first to the second + mapping = {p: new_named_params[n] for n, p in old_named_params.items()} + # 4. using that map to update the parameters of the optimizer + for obj in result: + if isinstance(obj, torch.optim.Optimizer): + obj._switch_parameters(mapping) + + if self.distributed_type == DistributedType.FSDP and model_count == 1 and optimizer_present: + result = self._prepare_fsdp(*result) + + return result if len(result) > 1 else result[0] + + def prepare_model(self, model: torch.nn.Module, device_placement=None): + """ + Prepares a PyTorch model for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + model (`torch.nn.Module`): + A PyTorch model to prepare. You don't need to prepare a model if it is used only for inference without + any kind of mixed precision + device_placement (`bool`, *optional*): + Whether or not to place the model on the proper device. Will default to `self.device_placement`. + """ + if device_placement is None: + device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP + self._models.append(model) + if device_placement: + model = model.to(self.device) + if self.state.dynamo_backend != DynamoBackend.NO: + import torch._dynamo as dynamo + + model = dynamo.optimize(self.state.dynamo_backend.value.lower())(model) + if self.distributed_type == DistributedType.MULTI_GPU: + if any(p.requires_grad for p in model.parameters()): + kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {} + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs + ) + elif self.distributed_type == DistributedType.FSDP: + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + + # Check if the model is already a FSDP model due to `Manual Wrapping` and if so, + # don't wrap it again + if type(model) != FSDP: + self.state.fsdp_plugin.set_auto_wrap_policy(model) + fsdp_plugin = self.state.fsdp_plugin + model = FSDP( + model, + sharding_strategy=fsdp_plugin.sharding_strategy, + cpu_offload=fsdp_plugin.cpu_offload, + auto_wrap_policy=fsdp_plugin.auto_wrap_policy, + backward_prefetch=fsdp_plugin.backward_prefetch, + mixed_precision=fsdp_plugin.mixed_precision_policy, + ignored_modules=fsdp_plugin.ignored_modules, + device_id=self.device, + limit_all_gathers=fsdp_plugin.limit_all_gathers, + ) + self._models[-1] = model + elif self.distributed_type == DistributedType.MULTI_CPU: + kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {} + model = torch.nn.parallel.DistributedDataParallel(model, **kwargs) + if self.native_amp: + model._original_forward = model.forward + if self.mixed_precision == "fp16" and is_torch_version(">=", "1.10"): + model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward) + elif self.mixed_precision == "bf16" and self.distributed_type != DistributedType.TPU: + model.forward = torch.autocast(device_type=self.device.type, dtype=torch.bfloat16)(model.forward) + else: + model.forward = torch.cuda.amp.autocast()(model.forward) + model.forward = convert_outputs_to_fp32(model.forward) + if self.distributed_type == DistributedType.TPU and self.state.fork_launched: + model = xmp.MpModelWrapper(model).to(self.device) + return model + + def _prepare_deepspeed(self, *args): + + deepspeed_plugin = self.state.deepspeed_plugin + + if deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] == "auto": + result = [ + self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj + for obj in args + ] + + batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")] + if self.split_batches: + batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes] + if len(batch_sizes) == 0: + raise ValueError( + "When using DeepSpeed `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders " + "or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file" + "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`." + ) + + batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes) + if len(batch_sizes) > 1: + logger.info( + "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here " + f"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device})." + ) + else: + batch_size_per_device = deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] + result = [obj for obj in args] + + if self.gradient_accumulation_steps != deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"]: + logger.info( + f"Updating DeepSpeed's gradient accumulation steps to {self.gradient_accumulation_steps} from " + f"{deepspeed_plugin.deepspeed_config['gradient_accumulation_steps']}." + ) + deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"] = self.gradient_accumulation_steps + config_kwargs = { + "train_micro_batch_size_per_gpu": batch_size_per_device, + "train_batch_size": batch_size_per_device + * deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"] + * self.num_processes, + "gradient_clipping": 1.0, + "zero_optimization.stage3_gather_16bit_weights_on_model_save": False, + } + + model = None + optimizer = None + scheduler = None + for obj in result: + if isinstance(obj, torch.nn.Module): + model = obj + elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)): + optimizer = obj + elif (isinstance(obj, (LRScheduler, DummyScheduler))) or ( + type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES + ): + scheduler = obj + + if optimizer is not None: + if "optimizer" in deepspeed_plugin.deepspeed_config and not isinstance(optimizer, (DummyOptim)): + raise ValueError( + "You cannot specify an optimizer in the config file and in the code at the same time. " + "Please remove the optimizer from the config file or " + "create `accelerate.utils.DummyOptim` in the code." + ) + elif "optimizer" not in deepspeed_plugin.deepspeed_config and isinstance(optimizer, (DummyOptim)): + raise ValueError( + "You cannot create a `DummyOptim` without specifying an optimizer in the config file." + ) + + if isinstance(optimizer, (torch.optim.Optimizer)): + deepspeed_plugin.deepspeed_config["zero_allow_untested_optimizer"] = True + + if scheduler is not None: + if "scheduler" in deepspeed_plugin.deepspeed_config and not isinstance(scheduler, (DummyScheduler)): + raise ValueError( + "You cannot specify a scheduler in the config file and in the code at the same time. " + "Please remove the scheduler from the config file or " + "create `accelerate.utils.DummyScheduler` in the code." + ) + elif "scheduler" not in deepspeed_plugin.deepspeed_config and isinstance(scheduler, (DummyScheduler)): + raise ValueError( + "You cannot create a `DummyScheduler` without specifying a scheduler in the config file." + ) + + if optimizer is not None and scheduler is not None: + if isinstance(optimizer, (DummyOptim)) and not isinstance(scheduler, (DummyScheduler)): + raise ValueError( + "You can only specify `accelerate.utils.DummyScheduler` in the code when using " + "`accelerate.utils.DummyOptim`." + ) + + if model is not None: + if hasattr(model, "config") and hasattr(model.config, "hidden_size"): + hidden_size = model.config.hidden_size + config_kwargs.update( + { + "zero_optimization.reduce_bucket_size": hidden_size * hidden_size, + "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size, + "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size, + } + ) + + if isinstance(optimizer, (DummyOptim)): + config_kwargs.update( + {"optimizer.params.lr": optimizer.lr, "optimizer.params.weight_decay": optimizer.weight_decay} + ) + if isinstance(scheduler, (DummyScheduler)): + config_kwargs.update( + { + "scheduler.params.warmup_min_lr": 0, + "scheduler.params.warmup_max_lr": scheduler.optimizer.lr, + "scheduler.params.warmup_num_steps": scheduler.warmup_num_steps, + } + ) + if scheduler.total_num_steps is not None: + config_kwargs["scheduler.params.total_num_steps"] = ( + math.ceil(scheduler.total_num_steps / self.num_processes) + if not self.split_batches + else scheduler.total_num_steps + ) + deepspeed_plugin.deepspeed_config_process(must_match=False, **config_kwargs) + self.deepspeed_config = deepspeed_plugin.deepspeed_config + kwargs = dict(model=model, config_params=self.deepspeed_config) + if optimizer is not None: + if isinstance(optimizer, (DummyOptim)): + kwargs["model_parameters"] = optimizer.params + else: + kwargs["optimizer"] = optimizer + if scheduler is not None: + if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES: + kwargs["lr_scheduler"] = scheduler + + engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs) + if optimizer is not None: + optimizer = DeepSpeedOptimizerWrapper(optimizer) + if scheduler is not None: + if lr_scheduler is None: + scheduler = AcceleratedScheduler( + scheduler, + optimizer, + step_with_optimizer=self.step_scheduler_with_optimizer, + split_batches=self.split_batches, + ) + else: + scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer) + + for i in range(len(result)): + if isinstance(result[i], torch.nn.Module): + result[i] = engine + elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)): + result[i] = optimizer + elif (isinstance(result[i], (LRScheduler, DummyScheduler))) or ( + type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES + ): + result[i] = scheduler + # pointing for deepspeed_engine_wrapped.backward() + self.deepspeed_engine_wrapped = DeepSpeedEngineWrapper(engine) + self._models.append(engine) + if optimizer is not None: + self._optimizers.append(optimizer) + if scheduler is not None: + self._schedulers.append(scheduler) + if len(self._models) > 1: + raise AssertionError( + "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed" + ) + return tuple(result) + + def _prepare_megatron_lm(self, *args): + megatron_lm_plugin = self.state.megatron_lm_plugin + if not megatron_lm_plugin.megatron_dataset_flag: + batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")] + if len(batch_sizes) == 0: + raise ValueError( + "You must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM." + ) + + micro_batch_size = min(batch_sizes) if megatron_lm_plugin.is_train_batch_min else max(batch_sizes) + if len(batch_sizes) > 1: + logger.info( + "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here " + f"{megatron_lm_plugin.is_train_batch_min} will decide the `train_batch_size` ({micro_batch_size})." + ) + else: + for obj in args: + if isinstance(obj, MegatronLMDummyDataLoader): + micro_batch_size = obj.dataset_args["micro_batch_size"] + break + + dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree) + megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree) + + model = None + optimizer = None + scheduler = None + is_dummy_scheduler = False + batch_data = None + for obj in args: + if isinstance(obj, torch.utils.data.DataLoader) and batch_data is None: + batch_data = next(iter(obj)) + if isinstance(obj, torch.nn.Module): + model = obj + elif isinstance(obj, (torch.optim.Optimizer)): + optimizer = obj + elif isinstance(obj, (LRScheduler, MegatronLMDummyScheduler)): + scheduler = obj + + if model is not None: + megatron_lm_plugin.set_network_size_args(model, batch_data) + if optimizer is not None: + megatron_lm_plugin.set_optimizer_type(optimizer) + if scheduler is not None: + is_dummy_scheduler = isinstance(scheduler, MegatronLMDummyScheduler) + if not is_dummy_scheduler: + raise ValueError( + "You can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead." + ) + megatron_lm_plugin.set_scheduler_args(scheduler) + + # initialize megatron-lm + megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args) + counter = 0 + result = [] + for obj in args: + if isinstance(obj, torch.utils.data.DataLoader): + result.append(megatron_lm_prepare_data_loader(self, obj)) + counter += 1 + elif isinstance(obj, MegatronLMDummyDataLoader): + if counter == 0: + obj.set_megatron_data_args() + dataloaders = megatron_lm_prepare_data_loader(self, obj) + result.append(dataloaders[counter]) + counter += 1 + else: + result.append(obj) + + if model is not None: + model = megatron_lm_prepare_model(self) + if optimizer is not None: + optimizer = megatron_lm_prepare_optimizer(self, model) + if scheduler is not None: + scheduler = megatron_lm_prepare_scheduler(self, optimizer, scheduler) + + if model is not None: + model = MegatronEngine(self, model, optimizer, scheduler) + if optimizer is not None: + optimizer = MegatronLMOptimizerWrapper(optimizer) + if scheduler is not None: + scheduler = MegatronLMSchedulerWrapper(scheduler, optimizer) + + for i in range(len(result)): + if isinstance(result[i], torch.nn.Module): + result[i] = model + elif isinstance(result[i], torch.optim.Optimizer): + result[i] = optimizer + elif isinstance(result[i], MegatronLMDummyScheduler): + result[i] = scheduler + if model is not None: + self._models.append(model) + if optimizer is not None: + self._optimizers.append(optimizer) + if scheduler is not None: + self._schedulers.append(scheduler) + if len(self._models) > 1: + raise AssertionError( + "You can't use same `Accelerator()` instance with multiple models when using Megatron-LM" + ) + return tuple(result) + + def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader, device_placement=None): + """ + Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + data_loader (`torch.utils.data.DataLoader`): + A vanilla PyTorch DataLoader to prepare + device_placement (`bool`, *optional*): + Whether or not to place the batches on the proper device in the prepared dataloader. Will default to + `self.device_placement`. + """ + if device_placement is None: + device_placement = self.device_placement if self.distributed_type != DistributedType.TPU else False + prepared_data_loader = prepare_data_loader( + data_loader, + self.device, + num_processes=self.num_processes, + process_index=self.process_index, + split_batches=self.split_batches, + put_on_device=device_placement, + rng_types=self.rng_types.copy(), + dispatch_batches=self.dispatch_batches, + even_batches=self.even_batches, + ) + self._dataloaders.append(prepared_data_loader) + return prepared_data_loader + + def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None): + """ + Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + optimizer (`torch.optim.Optimizer`): + A vanilla PyTorch optimizer to prepare + device_placement (`bool`, *optional*): + Whether or not to place the optimizer on the proper device. Will default to `self.device_placement`. + """ + if device_placement is None: + device_placement = self.device_placement + optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler) + self._optimizers.append(optimizer) + return optimizer + + def prepare_scheduler(self, scheduler: LRScheduler): + """ + Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + scheduler (`torch.optim.lr_scheduler.LRScheduler`): + A vanilla PyTorch scheduler to prepare + """ + # We try to find the optimizer associated with `scheduler`, the default is the full list. + optimizer = self._optimizers + for opt in self._optimizers: + if getattr(scheduler, "optimizer", None) == opt.optimizer: + optimizer = opt + break + scheduler = AcceleratedScheduler( + scheduler, + optimizer, + step_with_optimizer=self.step_scheduler_with_optimizer, + split_batches=self.split_batches, + ) + self._schedulers.append(scheduler) + return scheduler + + def backward(self, loss, **kwargs): + """ + Scales the gradients in accordance to `Accelerator.gradient_accumulation_steps` and calls the correct + `backward()` based on the configuration. + + Should be used in lieu of `loss.backward()`. + """ + if self.distributed_type != DistributedType.DEEPSPEED: + # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward` + loss = loss / self.gradient_accumulation_steps + if self.distributed_type == DistributedType.DEEPSPEED: + self.deepspeed_engine_wrapped.backward(loss, **kwargs) + elif self.distributed_type == DistributedType.MEGATRON_LM: + return + elif self.scaler is not None: + self.scaler.scale(loss).backward(**kwargs) + else: + loss.backward(**kwargs) + + def unscale_gradients(self, optimizer=None): + """ + Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings. + + Args: + optimizer (`torch.optim.Optimizer` or `List[torch.optim.Optimizer]`, *optional*): + The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers + that were passed to [`~Accelerator.prepare`]. + """ + if self.use_fp16 and self.native_amp: + if optimizer is None: + # TODO: this unscales all optimizers where we should only unscale the one where parameters are. + optimizer = self._optimizers + elif not isinstance(optimizer, (tuple, list)): + optimizer = [optimizer] + for opt in optimizer: + while isinstance(opt, AcceleratedOptimizer): + opt = opt.optimizer + self.scaler.unscale_(opt) + + def clip_grad_norm_(self, parameters, max_norm, norm_type=2): + """ + Should be used in place of `torch.nn.utils.clip_grad_norm_`. + + Returns: + `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector). + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=2) + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + + >>> for (input, target) in dataloader: + ... optimizer.zero_grad() + ... output = model(input) + ... loss = loss_func(output, target) + ... accelerator.backward(loss) + ... if accelerator.sync_gradients: + ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm) + ... optimizer.step() + ``` + """ + if self.distributed_type == DistributedType.FSDP: + self.unscale_gradients() + parameters = [p for p in parameters] + for model in self._models: + if parameters == [p for p in model.parameters()]: + return model.clip_grad_norm_(max_norm, norm_type) + elif self.distributed_type == DistributedType.DEEPSPEED: + # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + # We cannot return the gradient norm because DeepSpeed does it. + return None + self.unscale_gradients() + return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type) + + def clip_grad_value_(self, parameters, clip_value): + """ + Should be used in place of `torch.nn.utils.clip_grad_value_`. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=2) + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + + >>> for (input, target) in dataloader: + ... optimizer.zero_grad() + ... output = model(input) + ... loss = loss_func(output, target) + ... accelerator.backward(loss) + ... if accelerator.sync_gradients: + ... accelerator.clip_grad_value_(model.parameters(), clip_value) + ... optimizer.step() + ``` + """ + if self.distributed_type in [DistributedType.DEEPSPEED, DistributedType.FSDP]: + raise Exception("DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.") + self.unscale_gradients() + torch.nn.utils.clip_grad_value_(parameters, clip_value) + + def gather(self, tensor): + """ + Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to + regroup the predictions from all processes when doing evaluation. + + Note: + This gather happens in all processes. + + Args: + tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`): + The tensors to gather across all processes. + + Returns: + `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The gathered tensor(s). Note that the + first dimension of the result is *num_processes* multiplied by the first dimension of the input tensors. + """ + return gather(tensor) + + def gather_for_metrics(self, tensor): + """ + Gathers `tensor` and potentially drops duplicates in the last batch if on a distributed system. Should be used + for gathering the inputs and targets for metric calculation. + + Args: + tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`): + The tensors for calculating metrics across all processes. + """ + tensor = self.gather(tensor) + if self.use_distributed: + if self.gradient_state.remainder == -1: + logger.info( + "The used dataset had no length, returning gathered tensors. You should drop the remainder yourself." + ) + return tensor + try: + # Then see if we're on the last batch of our eval dataloader + if self.gradient_state.end_of_dataloader and self.gradient_state.remainder > 0: + # Last batch needs to be truncated on distributed systems as it contains additional samples + def _adjust_samples(tensor): + return tensor[: self.gradient_state.remainder] + + return recursively_apply(_adjust_samples, tensor) + else: + # Not at the end of the dataloader, no need to adjust the tensors + return tensor + except: + # Dataset had no length or raised an error + return tensor + return tensor + + def reduce(self, tensor, reduction="sum"): + """ + Reduce the values in *tensor* across all processes based on *reduction*. + + Note: + All processes get the reduced value. + + Args: + tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`): + The tensors to reduce across all processes. + reduction (`str`, *optional*, defaults to "sum"): + A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation. + + Returns: + `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The reduced tensor(s). + """ + return reduce(tensor, reduction) + + def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False): + """ + Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so + they can safely be gathered. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to gather. + dim (`int`, *optional*, defaults to 0): + The dimension on which to pad. + pad_index (`int`, *optional*, defaults to 0): + The value with which to pad. + pad_first (`bool`, *optional*, defaults to `False`): + Whether to pad at the beginning or the end. + """ + return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first) + + def unwrap_model(self, model, keep_fp32_wrapper: bool = False): + """ + Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving + the model. + + Args: + model (`torch.nn.Module`): + The model to unwrap. + keep_fp32_wrapper (`bool`, *optional*, defaults to `False`): + Whether to not remove the mixed precision hook if it was added. + """ + return extract_model_from_parallel(model, keep_fp32_wrapper) + + def wait_for_everyone(self): + """ + Will stop the execution of the current process until every other process has reached that point (so this does + nothing when the script is only run in one process). Useful to do before saving a model. + """ + wait_for_everyone() + + @on_main_process + def init_trackers(self, project_name: str, config: Optional[dict] = None, init_kwargs: Optional[dict] = {}): + """ + Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations + + Args: + project_name (`str`): + The name of the project. All trackers will save their data based on this + config (`dict`, *optional*): + Optional starting configuration to be logged. + init_kwargs (`dict`, *optional*): + A nested dictionary of kwargs to be passed to a specific tracker's `__init__` function. Should be + formatted like so: + ```python + {"wandb": {"tags": ["tag_a", "tag_b"]}} + ``` + """ + self.trackers = [] + for tracker in self.log_with: + if issubclass(type(tracker), GeneralTracker): + # Custom trackers are already initialized + self.trackers.append(tracker) + else: + tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)] + if getattr(tracker_init, "requires_logging_directory"): + # We can skip this check since it was done in `__init__` + self.trackers.append( + tracker_init(project_name, self.logging_dir, **init_kwargs.get(str(tracker), {})) + ) + else: + self.trackers.append(tracker_init(project_name, **init_kwargs.get(str(tracker), {}))) + if config is not None: + for tracker in self.trackers: + tracker.store_init_configuration(config) + + @on_main_process + def get_tracker(self, name: str): + """ + Returns a `tracker` from `self.trackers` based on `name` on the main process only. + + Args: + name (`str`): + The name of a tracker, corresponding to the `.name` property. + """ + for tracker in self.trackers: + if tracker.name == name: + return tracker.tracker + raise ValueError(f"{name} is not an available tracker stored inside the `Accelerator`.") + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, log_kwargs: Optional[dict] = {}): + """ + Logs `values` to all stored trackers in `self.trackers` on the main process only. + + Args: + values (`dict`): + Values should be a dictionary-like object containing only types `int`, `float`, or `str`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + log_kwargs (`dict`, *optional*): + A nested dictionary of kwargs to be passed to a specific tracker's `log` function. Should be formatted + like so: + ```python + {"wandb": {"tags": ["tag_a", "tag_b"]}} + ``` + """ + for tracker in self.trackers: + tracker.log(values, step=step, **log_kwargs.get(tracker.name, {})) + + @on_main_process + def end_training(self): + """ + Runs any special end training behaviors, such as stopping trackers on the main process only. Should always be + called at the end of your script if using experiment tracking. + """ + for tracker in self.trackers: + tracker.finish() + + def save(self, obj, f): + """ + Save the object passed to disk once per machine. Use in place of `torch.save`. + + Args: + obj: The object to save. + f (`str` or `os.PathLike`): + Where to save the content of `obj`. + """ + save(obj, f) + + def save_state(self, output_dir: str = None, **save_model_func_kwargs): + """ + Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder. + + If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled + then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater + than `total_limit` then the oldest save is deleted. Each checkpoint is saved in seperate folders named + `checkpoint_`. + + Otherwise they are just saved to `output_dir`. + + + + Should only be used when wanting to save a checkpoint during training and restoring the state in the same + environment. + + + + Args: + output_dir (`str` or `os.PathLike`): + The name of the folder to save all relevant weights and states. + save_model_func_kwargs (`dict`, *optional*): + Additional keyword arguments for saving model which can be passed to the underlying save function, such + as optional arguments for DeepSpeed's `save_checkpoint` function. + """ + if self.project_configuration.automatic_checkpoint_naming: + output_dir = os.path.join(self.project_dir, "checkpoints") + os.makedirs(output_dir, exist_ok=True) + if self.project_configuration.automatic_checkpoint_naming: + folders = [os.path.join(output_dir, folder) for folder in os.listdir(output_dir)] + if self.project_configuration.total_limit is not None and ( + len(folders) + 1 > self.project_configuration.total_limit + ): + folders.sort() + logger.warning( + f"Deleting {len(folders) + 1 - self.project_configuration.total_limit} checkpoints to make room for new checkpoint." + ) + for folder in folders[: len(folders) + 1 - self.project_configuration.total_limit]: + shutil.rmtree(folder) + output_dir = os.path.join(output_dir, f"checkpoint_{self.save_iteration}") + if os.path.exists(output_dir): + raise ValueError( + f"Checkpoint directory {output_dir} ({self.save_iteration}) already exists. Please manually override `self.save_iteration` with what iteration to start with." + ) + os.makedirs(output_dir, exist_ok=True) + logger.info(f"Saving current state to {output_dir}") + + # Save the models taking care of FSDP and DeepSpeed nuances + weights = [] + for i, model in enumerate(self._models): + if self.distributed_type == DistributedType.FSDP: + logger.info("Saving FSDP model") + self.state.fsdp_plugin.save_model(self, model, output_dir, i) + logger.info(f"FSDP Model saved to output dir {output_dir}") + elif self.distributed_type == DistributedType.DEEPSPEED: + logger.info("Saving DeepSpeed Model and Optimizer") + ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}" + model.save_checkpoint(output_dir, ckpt_id, **save_model_func_kwargs) + logger.info(f"DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}") + elif self.distributed_type == DistributedType.MEGATRON_LM: + logger.info("Saving Megatron-LM Model, Optimizer and Scheduler") + model.save_checkpoint(output_dir) + logger.info(f"Megatron-LM Model , Optimizer and Scheduler saved to output dir {output_dir}") + else: + weights.append(self.get_state_dict(model, unwrap=False)) + + # Save the optimizers taking care of FSDP and DeepSpeed nuances + optimizers = [] + if self.distributed_type == DistributedType.FSDP: + for opt in self._optimizers: + logger.info("Saving FSDP Optimizer") + self.state.fsdp_plugin.save_optimizer(self, opt, self._models[i], output_dir, i) + logger.info(f"FSDP Optimizer saved to output dir {output_dir}") + elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: + optimizers = self._optimizers + + # Save the lr schedulers taking care of DeepSpeed nuances + schedulers = [] + if self.distributed_type == DistributedType.DEEPSPEED: + for i, scheduler in enumerate(self._schedulers): + if isinstance(scheduler, DeepSpeedSchedulerWrapper): + continue + schedulers.append(scheduler) + elif self.distributed_type not in [DistributedType.MEGATRON_LM]: + schedulers = self._schedulers + + save_location = save_accelerator_state( + output_dir, weights, optimizers, schedulers, self.state.process_index, self.scaler + ) + for i, obj in enumerate(self._custom_objects): + save_custom_state(obj, output_dir, i) + self.project_configuration.iteration += 1 + return save_location + + def load_state(self, input_dir: str, **load_model_func_kwargs): + """ + Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects. + + + + Should only be used in conjunction with [`Accelerator.save_state`]. + + + + Args: + input_dir (`str` or `os.PathLike`): + The name of the folder all relevant weights and states were saved in. + load_model_func_kwargs (`dict`, *optional*): + Additional keyword arguments for loading model which can be passed to the underlying load function, + such as optional arguments for DeepSpeed's `load_checkpoint` function. + """ + # Check if folder exists + input_dir = os.path.expanduser(input_dir) + if not os.path.isdir(input_dir): + raise ValueError(f"Tried to find {input_dir} but folder does not exist") + logger.info(f"Loading states from {input_dir}") + + # Load the models taking care of FSDP and DeepSpeed nuances + models = [] + for i, model in enumerate(self._models): + if self.distributed_type == DistributedType.FSDP: + logger.info("Loading FSDP model") + self.state.fsdp_plugin.load_model(self, model, input_dir, i) + logger.info(f"FSDP Model loaded from input dir {input_dir}") + elif self.distributed_type == DistributedType.DEEPSPEED: + logger.info("Loading DeepSpeed Model and Optimizer") + ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}" + model.load_checkpoint(input_dir, ckpt_id, **load_model_func_kwargs) + logger.info(f"DeepSpeed Model and Optimizer loaded from input dir {os.path.join(input_dir, ckpt_id)}") + elif self.distributed_type == DistributedType.MEGATRON_LM: + logger.info("Loading Megatron-LM Model, Optimizer and Scheduler") + model.load_checkpoint(input_dir) + logger.info(f"Megatron-LM Model , Optimizer and Scheduler loaded from input dir {input_dir}") + else: + models.append(model) + + # Load the optimizers taking care of FSDP and DeepSpeed nuances + optimizers = [] + if self.distributed_type == DistributedType.FSDP: + for i, opt in enumerate(self._optimizers): + logger.info("Loading FSDP Optimizer") + self.state.fsdp_plugin.load_optimizer(self, opt, self._models[i], input_dir, i) + logger.info(f"FSDP Optimizer loaded from input dir {input_dir}") + elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: + optimizers = self._optimizers + + # Load the lr schedulers taking care of DeepSpeed nuances + schedulers = [] + if self.distributed_type == DistributedType.DEEPSPEED: + for i, scheduler in enumerate(self._schedulers): + if isinstance(scheduler, DeepSpeedSchedulerWrapper): + continue + schedulers.append(scheduler) + elif self.distributed_type not in [DistributedType.MEGATRON_LM]: + schedulers = self._schedulers + + load_accelerator_state( + input_dir, models, optimizers, schedulers, self.state.process_index, self.scaler, **load_model_func_kwargs + ) + custom_checkpoints = [f for f in os.listdir(input_dir) if "custom_checkpoint" in f] + if len(custom_checkpoints) != len(self._custom_objects): + err = "Warning! Number of found checkpoints does not match the number of registered objects:" + err += f"\n\tFound checkpoints: {len(custom_checkpoints)}" + err += f"\n\tRegistered objects: {len(self._custom_objects)}\nSkipping." + logger.warning(err) + else: + logger.info(f"Loading in {len(custom_checkpoints)} custom states") + for index, obj in enumerate(self._custom_objects): + load_custom_state(obj, input_dir, index) + + def free_memory(self): + """ + Will release all references to the internal objects stored and call the garbage collector. You should call this + method between two trainings with different models/optimizers. + """ + self._schedulers = [] + self._optimizers = [] + self._models = [] + self._dataloaders = [] + self.deepspeed_engine_wrapped = None + release_memory() + + def clear(self): + """ + Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the + garbage collector. You should call this method between two trainings with different models/optimizers. + """ + self.free_memory() + + def _get_named_parameters(self, *args): + named_parameters = {} + for obj in args: + if isinstance(obj, torch.nn.Module): + obj = extract_model_from_parallel(obj) + named_parameters.update({n: p for n, p in obj.named_parameters()}) + return named_parameters + + def _get_devices(self, *args): + model_device = None + optimizer_device = None + for obj in args: + # Loop through model parameters and stop at the first once we have its device. + if isinstance(obj, torch.nn.Module): + for param in obj.parameters(): + model_device = param.device + break + # Loop through optimizer parameters groups and stop at the first once we have its device. + if isinstance(obj, torch.optim.Optimizer): + for param_group in obj.param_groups: + if len(param_group["params"]) > 0: + optimizer_device = param_group["params"][0].device + break + return (model_device, optimizer_device) + + def get_state_dict(self, model, unwrap=True): + """ + Returns the state dictionary of a model sent through [`Accelerator.prepare`] in full precision + + Args: + model (`torch.nn.Module`): + A PyTorch model sent through [`Accelerator.prepare`] + unwrap (`bool`, *optional*, defaults to `True`): + Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict + """ + is_zero_3 = False + if self.distributed_type == DistributedType.DEEPSPEED: + is_zero_3 = self.deepspeed_config["zero_optimization"]["stage"] == 3 + + if is_zero_3: + if model.zero_gather_16bit_weights_on_model_save(): + state_dict = model._zero3_consolidated_16bit_state_dict() + else: + raise ValueError( + "Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. " + "To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or " + "set `zero3_save_16bit_model` to True when using `accelerate config`. " + "To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights." + ) + else: + if unwrap: + model = self.unwrap_model(model) + state_dict = model.state_dict() + + if state_dict is not None: + for k in state_dict: + if state_dict[k].dtype == torch.float16: + state_dict[k] = state_dict[k].float() + + return state_dict + + def register_for_checkpointing(self, *objects): + """ + Makes note of `objects` and will save or load them in during `save_state` or `load_state`. + + These should be utilized when the state is being loaded or saved in the same script. It is not designed to be + used in different scripts + + + + Every `object` must have a `load_state_dict` and `state_dict` function to be stored. + + + """ + invalid_objects = [] + for obj in objects: + if not hasattr(obj, "state_dict") or not hasattr(obj, "load_state_dict"): + invalid_objects.append(obj) + if len(invalid_objects) > 0: + err = "All `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:" + for index, obj in enumerate(invalid_objects): + err += f"\n\t- Item at index {index}, `{get_pretty_name(obj)}`" + raise ValueError(err) + self._custom_objects.extend(objects) + + @contextmanager + def autocast(self): + """ + Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing + different will happen otherwise. + """ + if self.native_amp: + if self.mixed_precision == "fp16" and is_torch_version(">=", "1.10"): + autocast_context = torch.cuda.amp.autocast(dtype=torch.float16) + elif self.mixed_precision == "bf16": + if self.distributed_type in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: + autocast_context = torch.autocast(dtype=torch.bfloat16, device_type=self.device.type) + else: + autocast_context = torch.cuda.amp.autocast() + + autocast_context.__enter__() + yield + autocast_context.__exit__(*sys.exc_info()) + else: + yield + + @property + def optimizer_step_was_skipped(self): + """ + Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which + case the learning rate should not be changed. + """ + for optimizer in self._optimizers: + if optimizer.step_was_skipped: + return True + return False diff --git a/testbed/huggingface__accelerate/src/accelerate/big_modeling.py b/testbed/huggingface__accelerate/src/accelerate/big_modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..6777eda606c8e4eb7f7e7c80c4ef857f6c50097e --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/big_modeling.py @@ -0,0 +1,422 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from contextlib import contextmanager +from typing import Dict, List, Optional, Union + +import torch +import torch.nn as nn + +from .hooks import AlignDevicesHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks +from .utils import ( + OffloadedWeightsLoader, + check_device_map, + extract_submodules_state_dict, + find_tied_parameters, + get_balanced_memory, + infer_auto_device_map, + load_checkpoint_in_model, + offload_state_dict, + retie_parameters, +) +from .utils.versions import is_torch_version + + +@contextmanager +def init_empty_weights(include_buffers: bool = False): + """ + A context manager under which models are initialized with all parameters on the meta device, therefore creating an + empty model. Useful when just initializing the model would blow the available RAM. + + Args: + include_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to also put all buffers on the meta device while initializing. + + Example: + + ```python + import torch.nn as nn + from accelerate import init_empty_weights + + # Initialize a model with 100 billions parameters in no time and without using any RAM. + with init_empty_weights(): + tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) + ``` + + + + Any model created under this context manager has no weights. As such you can't do something like + `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. + + + """ + if not is_torch_version(">=", "1.9.0"): + raise NotImplementedError("Initializing empty weights to a meta device requires torch >= 1.9.0") + with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f: + yield f + + +@contextmanager +def init_on_device(device: torch.device, include_buffers: bool = False): + """ + A context manager under which models are initialized with all parameters on the specified device. + + Args: + device (`torch.device`): + Device to initialize all parameters on. + include_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to also put all buffers on the meta device while initializing. + + Example: + + ```python + import torch.nn as nn + from accelerate import init_on_device + + with init_on_device(device=torch.device("cuda")): + tst = nn.Liner(100, 100) # on `cuda` device + ``` + """ + old_register_parameter = nn.Module.register_parameter + if include_buffers: + old_register_buffer = nn.Module.register_buffer + + def register_empty_parameter(module, name, param): + old_register_parameter(module, name, param) + if param is not None: + param_cls = type(module._parameters[name]) + kwargs = module._parameters[name].__dict__ + module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs) + + def register_empty_buffer(module, name, buffer): + old_register_buffer(module, name, buffer) + if buffer is not None: + module._buffers[name] = module._buffers[name].to(device) + + # Patch tensor creation + if include_buffers: + tensor_constructors_to_patch = { + torch_function_name: getattr(torch, torch_function_name) + for torch_function_name in ["empty", "zeros", "ones", "full"] + } + else: + tensor_constructors_to_patch = {} + + def patch_tensor_constructor(fn): + def wrapper(*args, **kwargs): + kwargs["device"] = device + return fn(*args, **kwargs) + + return wrapper + + try: + nn.Module.register_parameter = register_empty_parameter + if include_buffers: + nn.Module.register_buffer = register_empty_buffer + for torch_function_name in tensor_constructors_to_patch.keys(): + setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name))) + yield + finally: + nn.Module.register_parameter = old_register_parameter + if include_buffers: + nn.Module.register_buffer = old_register_buffer + for torch_function_name, old_torch_function in tensor_constructors_to_patch.items(): + setattr(torch, torch_function_name, old_torch_function) + + +def cpu_offload( + model: nn.Module, + execution_device: Optional[torch.device] = None, + offload_buffers: bool = False, + state_dict: Optional[Dict[str, torch.Tensor]] = None, + preload_module_classes: Optional[List[str]] = None, +): + """ + Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one + copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that + state dict and put on the execution device passed as they are needed, then offloaded again. + + Args: + model (`torch.nn.Module`): + The model to offload. + execution_device (`torch.device`, *optional*): + The device on which the forward pass of the model will be executed (should be a GPU). Will default to the + model first parameter device. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to offload the buffers with the model parameters. + state_dict (`Dict[str, torch.Tensor]`, *optional*): + The state dict of the model that will be kept on CPU. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + if not is_torch_version(">=", "1.9.0"): + raise NotImplementedError("CPU offloading requires torch >= 1.9.0") + if execution_device is None: + execution_device = next(iter(model.parameters())).device + if state_dict is None: + state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()} + + add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True) + attach_align_device_hook( + model, + execution_device=execution_device, + offload=True, + offload_buffers=offload_buffers, + weights_map=state_dict, + preload_module_classes=preload_module_classes, + ) + + return model + + +def disk_offload( + model: nn.Module, + offload_dir: Union[str, os.PathLike], + execution_device: Optional[torch.device] = None, + offload_buffers: bool = False, + preload_module_classes: Optional[List[str]] = None, +): + """ + Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as + memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and + put on the execution device passed as they are needed, then offloaded again. + + Args: + model (`torch.nn.Module`): The model to offload. + offload_dir (`str` or `os.PathLike`): + The folder in which to offload the model weights (or where the model weights are already offloaded). + execution_device (`torch.device`, *optional*): + The device on which the forward pass of the model will be executed (should be a GPU). Will default to the + model's first parameter device. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to offload the buffers with the model parameters. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + if not is_torch_version(">=", "1.9.0"): + raise NotImplementedError("Disk offloading requires torch >= 1.9.0") + if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")): + offload_state_dict(offload_dir, model.state_dict()) + if execution_device is None: + execution_device = next(iter(model.parameters())).device + weights_map = OffloadedWeightsLoader(save_folder=offload_dir) + + add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True) + attach_align_device_hook( + model, + execution_device=execution_device, + offload=True, + offload_buffers=offload_buffers, + weights_map=weights_map, + preload_module_classes=preload_module_classes, + ) + + return model + + +def dispatch_model( + model: nn.Module, + device_map: Dict[str, Union[str, int, torch.device]], + main_device: Optional[torch.device] = None, + state_dict: Optional[Dict[str, torch.Tensor]] = None, + offload_dir: Optional[Union[str, os.PathLike]] = None, + offload_index: Optional[Dict[str, str]] = None, + offload_buffers: bool = False, + preload_module_classes: Optional[List[str]] = None, +): + """ + Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on + the CPU or even the disk. + + Args: + model (`torch.nn.Module`): + The model to dispatch. + device_map (`Dict[str, Union[str, int, torch.device]]`): + A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that + `"disk"` is accepted even if it's not a proper value for `torch.device`. + main_device (`str`, `int` or `torch.device`, *optional*): + The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or + `"disk"`. + state_dict (`Dict[str, torch.Tensor]`, *optional*): + The state dict of the part of the model that will be kept on CPU. + offload_dir (`str` or `os.PathLike`): + The folder in which to offload the model weights (or where the model weights are already offloaded). + offload_index (`Dict`, *optional*): + A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default + to the index saved in `save_folder`. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to offload the buffers with the model parameters. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + if not is_torch_version(">=", "1.9.0"): + raise NotImplementedError("Model dispatching requires torch >= 1.9.0") + # Error early if the device map is incomplete. + check_device_map(model, device_map) + + if main_device is None: + if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}: + main_device = "cpu" + else: + main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0] + + if main_device != "cpu": + cpu_modules = [name for name, device in device_map.items() if device == "cpu"] + if state_dict is None and len(cpu_modules) > 0: + state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules) + + disk_modules = [name for name, device in device_map.items() if device == "disk"] + if offload_dir is None and offload_index is None and len(disk_modules) > 0: + raise ValueError( + "We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules " + f"need to be offloaded: {', '.join(disk_modules)}." + ) + if ( + len(disk_modules) > 0 + and offload_index is None + and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json"))) + ): + disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules) + offload_state_dict(offload_dir, disk_state_dict) + + execution_device = { + name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items() + } + offloaded_devices = ["disk"] if main_device == "cpu" else ["cpu", "disk"] + offload = {name: device in offloaded_devices for name, device in device_map.items()} + save_folder = offload_dir if len(disk_modules) > 0 else None + if state_dict is not None or save_folder is not None or offload_index is not None: + device = main_device if offload_index is not None else None + weights_map = OffloadedWeightsLoader( + state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device + ) + else: + weights_map = None + + tied_params = find_tied_parameters(model) + attach_align_device_hook_on_blocks( + model, + execution_device=execution_device, + offload=offload, + offload_buffers=offload_buffers, + weights_map=weights_map, + preload_module_classes=preload_module_classes, + ) + # Attaching the hook may break tied weights, so we retie them + retie_parameters(model, tied_params) + model.hf_device_map = device_map + return model + + +def load_checkpoint_and_dispatch( + model: nn.Module, + checkpoint: Union[str, os.PathLike], + device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None, + max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, + no_split_module_classes: Optional[List[str]] = None, + offload_folder: Optional[Union[str, os.PathLike]] = None, + offload_buffers: bool = False, + dtype: Optional[Union[str, torch.dtype]] = None, + offload_state_dict: Optional[bool] = None, + preload_module_classes: Optional[List[str]] = None, +): + """ + Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are + loaded and adds the various hooks that will make this model run properly (even if split across devices). + + Args: + model (`torch.nn.Module`): The model in which we want to load a checkpoint. + checkpoint (`str` or `os.PathLike`): + The folder checkpoint to load. It can be: + - a path to a file containing a whole model state dict + - a path to a `.json` file containing the index to a sharded checkpoint + - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. + device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer + name, once a given module name is inside, every submodule of it will be sent to the same device. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more + information about each option see [here](big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU + and the available CPU RAM if unset. + no_split_module_classes (`List[str]`, *optional*): + A list of layer class names that should never be split across device (for instance any layer that has a + residual connection). + offload_folder (`str` or `os.PathLike`, *optional*): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + offload_buffers (`bool`, *optional*, defaults to `False`): + In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as + well as the parameters. + dtype (`str` or `torch.dtype`, *optional*): + If provided, the weights will be converted to that type when loaded. + offload_state_dict (`bool`, *optional*): + If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if + the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map + picked contains `"disk"` values. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + if not is_torch_version(">=", "1.9.0"): + raise NotImplementedError("Loading and dispatching requires torch >= 1.9.0") + if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: + raise ValueError( + "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " + "'sequential'." + ) + if device_map != "sequential": + max_memory = get_balanced_memory( + model, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + dtype=dtype, + low_zero=(device_map == "balanced_low_0"), + ) + if isinstance(device_map, str): + device_map = infer_auto_device_map( + model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype + ) + if offload_state_dict is None and "disk" in device_map.values(): + offload_state_dict = True + load_checkpoint_in_model( + model, + checkpoint, + device_map=device_map, + offload_folder=offload_folder, + dtype=dtype, + offload_state_dict=offload_state_dict, + offload_buffers=offload_buffers, + ) + if device_map is None: + return model + return dispatch_model( + model, + device_map=device_map, + offload_dir=offload_folder, + offload_buffers=offload_buffers, + preload_module_classes=preload_module_classes, + ) diff --git a/testbed/huggingface__accelerate/src/accelerate/checkpointing.py b/testbed/huggingface__accelerate/src/accelerate/checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..780bcc762294893c2f12099d610ef9aed633177f --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/checkpointing.py @@ -0,0 +1,192 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import random +from pathlib import Path +from typing import List + +import numpy as np +import torch +from torch.cuda.amp import GradScaler + +from .utils import ( + MODEL_NAME, + OPTIMIZER_NAME, + RNG_STATE_NAME, + SCALER_NAME, + SCHEDULER_NAME, + get_pretty_name, + is_tpu_available, + save, +) + + +if is_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + +from .logging import get_logger + + +logger = get_logger(__name__) + + +def save_accelerator_state( + output_dir: str, + model_states: List[dict], + optimizers: list, + schedulers: list, + process_index: int, + scaler: GradScaler = None, +): + """ + Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory. + + Args: + output_dir (`str` or `os.PathLike`): + The name of the folder to save all relevant weights and states. + model_states (`List[torch.nn.Module]`): + A list of model states + optimizers (`List[torch.optim.Optimizer]`): + A list of optimizer instances + schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): + A list of learning rate schedulers + process_index (`int`): + The current process index in the Accelerator state + scaler (`torch.cuda.amp.GradScaler`, *optional*): + An optional gradient scaler instance to save + """ + # Model states + for i, state in enumerate(model_states): + weights_name = f"{MODEL_NAME}.bin" if i == 0 else f"{MODEL_NAME}_{i}.bin" + output_model_file = os.path.join(output_dir, weights_name) + save(state, output_model_file) + logger.info(f"Model weights saved in {output_model_file}") + # Optimizer states + for i, opt in enumerate(optimizers): + state = opt.state_dict() + optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" + output_optimizer_file = os.path.join(output_dir, optimizer_name) + save(state, output_optimizer_file) + logger.info(f"Optimizer state saved in {output_optimizer_file}") + # Scheduler states + for i, scheduler in enumerate(schedulers): + state = scheduler.state_dict() + scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" + output_scheduler_file = os.path.join(output_dir, scheduler_name) + save(state, output_scheduler_file) + logger.info(f"Scheduler state saved in {output_scheduler_file}") + # GradScaler state + if scaler is not None: + state = scaler.state_dict() + output_scaler_file = os.path.join(output_dir, SCALER_NAME) + torch.save(state, output_scaler_file) + logger.info(f"Gradient scaler state saved in {output_scaler_file}") + # Random number generator states + states = {} + states_name = f"{RNG_STATE_NAME}_{process_index}.pkl" + states["random_state"] = random.getstate() + states["numpy_random_seed"] = np.random.get_state() + states["torch_manual_seed"] = torch.get_rng_state() + states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all() + # ^^ safe to call this function even if cuda is not available + if is_tpu_available(): + states["xm_seed"] = xm.get_rng_state() + output_states_file = os.path.join(output_dir, states_name) + torch.save(states, output_states_file) + logger.info(f"Random states saved in {output_states_file}") + return output_dir + + +def load_accelerator_state( + input_dir, models, optimizers, schedulers, process_index, scaler=None, **load_model_func_kwargs +): + """ + Loads states of the models, optimizers, scaler, and RNG generators from a given directory. + + Args: + input_dir (`str` or `os.PathLike`): + The name of the folder to load all relevant weights and states. + models (`List[torch.nn.Module]`): + A list of model instances + optimizers (`List[torch.optim.Optimizer]`): + A list of optimizer instances + schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): + A list of learning rate schedulers + process_index (`int`): + The current process index in the Accelerator state + scaler (`torch.cuda.amp.GradScaler`, *optional*): + An optional *GradScaler* instance to load + load_model_func_kwargs (`dict`, *optional*): + Additional arguments that can be passed to the model's `load_state_dict` method. + """ + # Model states + for i, model in enumerate(models): + weights_name = f"{MODEL_NAME}.bin" if i == 0 else f"{MODEL_NAME}_{i}.bin" + input_model_file = os.path.join(input_dir, weights_name) + models[i].load_state_dict(torch.load(input_model_file, map_location="cpu"), **load_model_func_kwargs) + logger.info("All model weights loaded successfully") + + # Optimizer states + for i, opt in enumerate(optimizers): + optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" + input_optimizer_file = os.path.join(input_dir, optimizer_name) + optimizers[i].load_state_dict(torch.load(input_optimizer_file, map_location="cpu")) + logger.info("All optimizer states loaded successfully") + + # Scheduler states + for i, scheduler in enumerate(schedulers): + scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" + input_scheduler_file = os.path.join(input_dir, scheduler_name) + scheduler.load_state_dict(torch.load(input_scheduler_file)) + logger.info("All scheduler states loaded successfully") + + # GradScaler state + if scaler is not None: + input_scaler_file = os.path.join(input_dir, SCALER_NAME) + scaler.load_state_dict(torch.load(input_scaler_file)) + logger.info("GradScaler state loaded successfully") + + # Random states + try: + states = torch.load(os.path.join(input_dir, f"{RNG_STATE_NAME}_{process_index}.pkl")) + random.setstate(states["random_state"]) + np.random.set_state(states["numpy_random_seed"]) + torch.set_rng_state(states["torch_manual_seed"]) + torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"]) + # ^^ safe to call this function even if cuda is not available + if is_tpu_available(): + xm.set_rng_state(states["xm_seed"]) + logger.info("All random states loaded successfully") + except: + logger.info("Could not load random states") + + +def save_custom_state(obj, path, index: int = 0): + """ + Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl` + """ + # Should this be the right way to get a qual_name type value from `obj`? + save_location = Path(path) / f"custom_checkpoint_{index}.pkl" + logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}") + torch.save(obj.state_dict(), save_location) + + +def load_custom_state(obj, path, index: int = 0): + """ + Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl` + """ + load_location = f"{path}/custom_checkpoint_{index}.pkl" + logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}") + obj.load_state_dict(torch.load(load_location)) diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/__init__.py b/testbed/huggingface__accelerate/src/accelerate/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/accelerate_cli.py b/testbed/huggingface__accelerate/src/accelerate/commands/accelerate_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..7716526c526280b9333791f94222f9458cb7ff6d --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/accelerate_cli.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from argparse import ArgumentParser + +from accelerate.commands.config import get_config_parser +from accelerate.commands.env import env_command_parser +from accelerate.commands.launch import launch_command_parser +from accelerate.commands.test import test_command_parser +from accelerate.commands.tpu import tpu_command_parser + + +def main(): + parser = ArgumentParser("Accelerate CLI tool", usage="accelerate []") + subparsers = parser.add_subparsers(help="accelerate command helpers") + + # Register commands + get_config_parser(subparsers=subparsers) + env_command_parser(subparsers=subparsers) + launch_command_parser(subparsers=subparsers) + tpu_command_parser(subparsers=subparsers) + test_command_parser(subparsers=subparsers) + + # Let's go + args = parser.parse_args() + + if not hasattr(args, "func"): + parser.print_help() + exit(1) + + # Run + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/config/__init__.py b/testbed/huggingface__accelerate/src/accelerate/commands/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b15459828a7483d6edd3b41b37aa42796e13391 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/config/__init__.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from .config import config_command_parser +from .config_args import default_config_file, load_config_from_file # noqa: F401 +from .default import default_command_parser +from .update import update_command_parser + + +def get_config_parser(subparsers=None): + parent_parser = argparse.ArgumentParser(add_help=False) + # The main config parser + config_parser = config_command_parser(subparsers) + # The subparser to add commands to + subcommands = config_parser.add_subparsers(title="subcommands", dest="subcommand") + + # Then add other parsers with the parent parser + default_command_parser(subcommands, parents=[parent_parser]) + update_command_parser(subcommands, parents=[parent_parser]) + + return config_parser + + +def main(): + config_parser = get_config_parser() + args = config_parser.parse_args() + + if not hasattr(args, "func"): + config_parser.print_help() + exit(1) + + # Run + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/config/cluster.py b/testbed/huggingface__accelerate/src/accelerate/commands/config/cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..50acf55a1eecff564db36912011d464a8e383d66 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/config/cluster.py @@ -0,0 +1,507 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from ...utils import ( + ComputeEnvironment, + DistributedType, + DynamoBackend, + is_deepspeed_available, + is_transformers_available, +) +from ...utils.constants import ( + DEEPSPEED_MULTINODE_LAUNCHERS, + FSDP_AUTO_WRAP_POLICY, + FSDP_BACKWARD_PREFETCH, + FSDP_SHARDING_STRATEGY, + FSDP_STATE_DICT_TYPE, +) +from .config_args import ClusterConfig +from .config_utils import ( + _ask_field, + _ask_options, + _convert_distributed_mode, + _convert_dynamo_backend, + _convert_mixed_precision, + _convert_yes_no_to_bool, +) + + +def get_cluster_input(): + distributed_type = _ask_options( + "Which type of machine are you using?", + ["No distributed training", "multi-CPU", "multi-GPU", "TPU", "MPS"], + _convert_distributed_mode, + ) + + machine_rank = 0 + num_machines = 1 + num_processes = 1 + gpu_ids = None + main_process_ip = None + main_process_port = None + rdzv_backend = "static" + same_network = True + tpu_name = None + tpu_zone = None + commands = None + command_file = None + if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_CPU]: + num_machines = _ask_field( + "How many different machines will you use (use more than 1 for multi-node training)? [1]: ", + int, + default=1, + ) + if num_machines > 1: + machine_rank = _ask_options( + "What is the rank of this machine?", + list(range(num_machines)), + int, + ) + main_process_ip = _ask_field( + "What is the IP address of the machine that will host the main process? ", + ) + main_process_port = _ask_field( + "What is the port you will use to communicate with the main process? ", + int, + ) + same_network = _ask_field( + "Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + if not same_network: + rdzv_backend = _ask_field( + "What rendezvous backend will you use? ('static', 'c10d', ...): ", default="static" + ) + + if distributed_type == DistributedType.NO: + use_cpu = _ask_field( + "Do you want to run your training on CPU only (even if a GPU is available)? [yes/NO]:", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + elif distributed_type == DistributedType.MULTI_CPU: + use_cpu = True + else: + use_cpu = False + + use_dynamo = _ask_field( + "Do you wish to optimize your script with torch dynamo?[yes/NO]:", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_dynamo: + dynamo_backend = _ask_options( + "Which dynamo backend would you like to use?", + [ + "eager", + "aot_eager", + "inductor", + "nvfuser", + "aot_nvfuser", + "aot_cudagraphs", + "ofi", + "fx2trt", + "onnxrt", + "ipex", + ], + _convert_dynamo_backend, + default=2, + ) + else: + dynamo_backend = DynamoBackend.NO + + deepspeed_config = {} + if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO]: + use_deepspeed = _ask_field( + "Do you want to use DeepSpeed? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_deepspeed: + distributed_type = DistributedType.DEEPSPEED + assert ( + is_deepspeed_available() + ), "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source" + + if distributed_type == DistributedType.DEEPSPEED: + use_deepspeed_config = _ask_field( + "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_deepspeed_config: + deepspeed_config["deepspeed_config_file"] = _ask_field( + "Please enter the path to the json DeepSpeed config file: ", + str, + default="none", + ) + else: + deepspeed_config["zero_stage"] = _ask_options( + "What should be your DeepSpeed's ZeRO optimization stage?", + [0, 1, 2, 3], + int, + default=2, + ) + + deepspeed_devices = ["none", "cpu", "nvme"] + if deepspeed_config["zero_stage"] >= 2: + deepspeed_config["offload_optimizer_device"] = _ask_options( + "Where to offload optimizer states?", deepspeed_devices, lambda x: deepspeed_devices[int(x)] + ) + deepspeed_config["offload_param_device"] = _ask_options( + "Where to offload parameters?", deepspeed_devices, lambda x: deepspeed_devices[int(x)] + ) + deepspeed_config["gradient_accumulation_steps"] = _ask_field( + "How many gradient accumulation steps you're passing in your script? [1]: ", + int, + default=1, + ) + use_gradient_clipping = _ask_field( + "Do you want to use gradient clipping? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_gradient_clipping: + deepspeed_config["gradient_clipping"] = _ask_field( + "What is the gradient clipping value? [1.0]: ", + float, + default=1.0, + ) + if deepspeed_config["zero_stage"] == 3: + deepspeed_config["zero3_save_16bit_model"] = _ask_field( + "Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + deepspeed_config["zero3_init_flag"] = _ask_field( + "Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if deepspeed_config["zero3_init_flag"]: + if not is_transformers_available(): + raise Exception( + "When `zero3_init_flag` is set, it requires Transformers to be installed. " + "Please run `pip3 install transformers`." + ) + + if num_machines > 1: + launcher_query = "Which Type of launcher do you want to use?" + deepspeed_config["deepspeed_multinode_launcher"] = _ask_options( + launcher_query, + DEEPSPEED_MULTINODE_LAUNCHERS, + lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)], + ) + + if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]: + deepspeed_config["deepspeed_hostfile"] = _ask_field( + "DeepSpeed configures multi-node compute resources with hostfile. " + "Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; " + "for more information please refer official [documentation]" + "(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). " + "Please specify the location of hostfile: ", + str, + ) + + is_exclusion_filter = _ask_field( + "Do you want to specify exclusion filter string? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if is_exclusion_filter: + deepspeed_config["deepspeed_exclusion_filter"] = _ask_field( + "DeepSpeed exclusion filter string: ", + str, + ) + + is_inclusion_filter = _ask_field( + "Do you want to specify inclusion filter string? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if is_inclusion_filter: + deepspeed_config["deepspeed_inclusion_filter"] = _ask_field( + "DeepSpeed inclusion filter string: ", + str, + ) + + fsdp_config = {} + if distributed_type in [DistributedType.MULTI_GPU]: + use_fsdp = _ask_field( + "Do you want to use FullyShardedDataParallel? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_fsdp: + distributed_type = DistributedType.FSDP + if distributed_type == DistributedType.FSDP: + sharding_strategy_query = "What should be your sharding strategy?" + fsdp_config["fsdp_sharding_strategy"] = _ask_options( + sharding_strategy_query, + FSDP_SHARDING_STRATEGY, + lambda x: int(x) + 1, + default=1, + ) + fsdp_config["fsdp_offload_params"] = _ask_field( + "Do you want to offload parameters and gradients to CPU? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + fsdp_wrap_query = "What should be your auto wrap policy?" + fsdp_config["fsdp_auto_wrap_policy"] = _ask_options( + fsdp_wrap_query, + FSDP_AUTO_WRAP_POLICY, + lambda x: FSDP_AUTO_WRAP_POLICY[int(x)], + ) + if fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[0]: + fsdp_config["fsdp_transformer_layer_cls_to_wrap"] = _ask_field( + "What is the transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` ...? : ", + str, + ) + elif fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[1]: + fsdp_config["fsdp_min_num_params"] = _ask_field( + "What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ", + int, + default=1e8, + ) + fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?" + fsdp_config["fsdp_backward_prefetch_policy"] = _ask_options( + fsdp_backward_prefetch_query, + FSDP_BACKWARD_PREFETCH, + lambda x: FSDP_BACKWARD_PREFETCH[int(x)], + ) + fsdp_state_dict_type_query = "What should be your FSDP's state dict type?" + fsdp_config["fsdp_state_dict_type"] = _ask_options( + fsdp_state_dict_type_query, + FSDP_STATE_DICT_TYPE, + lambda x: FSDP_STATE_DICT_TYPE[int(x)], + ) + + megatron_lm_config = {} + if distributed_type in [DistributedType.MULTI_GPU]: + use_megatron_lm = _ask_field( + "Do you want to use Megatron-LM ? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_megatron_lm: + distributed_type = DistributedType.MEGATRON_LM + if distributed_type == DistributedType.MEGATRON_LM: + prefix = "megatron_lm_" + megatron_lm_config[prefix + "tp_degree"] = _ask_field( + "What is the Tensor Parallelism degree/size? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + if megatron_lm_config[prefix + "tp_degree"] > 1: + megatron_lm_config[prefix + "sequence_parallelism"] = _ask_field( + "Do you want to enable Sequence Parallelism? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + + megatron_lm_config[prefix + "pp_degree"] = _ask_field( + "What is the Pipeline Parallelism degree/size? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + if megatron_lm_config[prefix + "pp_degree"] > 1: + megatron_lm_config[prefix + "num_micro_batches"] = _ask_field( + "What is the number of micro-batches? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + + megatron_lm_config[prefix + "recompute_activations"] = _ask_field( + "Do you want to enable selective activation recomputation? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + + megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field( + "Do you want to use distributed optimizer " + "which shards optimizer state and gradients across data pralellel ranks? [YES/no]: ", + _convert_yes_no_to_bool, + default=True, + error_message="Please enter yes or no.", + ) + + megatron_lm_config[prefix + "gradient_clipping"] = _ask_field( + "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ", + float, + default=1.0, + ) + + if distributed_type == DistributedType.TPU: + main_training_function = _ask_field( + "What is the name of the function in your script that should be launched in all parallel scripts? [main]: ", + default="main", + ) + use_cluster = _ask_field( + "Are you using a TPU cluster? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_cluster: + tpu_name = _ask_field( + "What is the name of your TPU cluster? ", + default=None, + error_message="Please enter the name of your TPU cluster.", + ) + tpu_zone = _ask_field( + "What is the zone of your TPU cluster? ", + default=None, + error_message="Please enter the zone of your TPU cluster.", + ) + run_commands = _ask_field( + "Do you have code you wish to run on startup in each pod? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if run_commands: + use_command_file = _ask_field( + "Is this code located in a bash script? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_command_file: + command_file = _ask_field( + "What is the path to your bash script? ", + default=None, + error_message="Please enter the path to your bash script.", + ) + command_file = os.path.abspath(command_file) + else: + print("Please enter each command seperately you wish to run on startup in each pod.") + commands = [] + another_command = True + while another_command: + commands.append( + _ask_field( + "Please enter a single command to be ran ", + default=None, + error_message="Please enter the commands you wish to run on startup in each pod as a single string.", + ) + ) + another_command = _ask_field( + "Do you wish to add another command? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + else: + main_training_function = "main" + + if distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_GPU, DistributedType.TPU]: + machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "") + if machine_type == "TPU": + machine_type += " cores" + else: + machine_type += "(s)" + num_processes = _ask_field( + f"How many {machine_type} should be used for distributed training? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: + num_processes = _ask_field( + "How many GPU(s) should be used for distributed training? [1]:", + int, + default=1, + error_message="Please enter an integer.", + ) + else: + num_processes = 1 + + if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO] and not use_cpu: + gpu_ids = _ask_field( + "What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]:", + default="all", + ) + + if distributed_type != DistributedType.TPU: + if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config: + mixed_precision = None + else: + mixed_precision = _ask_options( + "Do you wish to use FP16 or BF16 (mixed precision)?", + ["no", "fp16", "bf16"], + _convert_mixed_precision, + ) + else: + mixed_precision = "no" + + if use_dynamo and mixed_precision == "no" and not use_cpu: + print( + "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." + ) + + downcast_bf16 = "no" + if distributed_type == DistributedType.TPU and mixed_precision == "bf16": + downcast_bf16 = _ask_field( + "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no" + ) + + return ClusterConfig( + compute_environment=ComputeEnvironment.LOCAL_MACHINE, + distributed_type=distributed_type, + num_processes=num_processes, + gpu_ids=gpu_ids, + mixed_precision=mixed_precision, + downcast_bf16=downcast_bf16, + machine_rank=machine_rank, + num_machines=num_machines, + main_process_ip=main_process_ip, + main_process_port=main_process_port, + main_training_function=main_training_function, + deepspeed_config=deepspeed_config, + fsdp_config=fsdp_config, + megatron_lm_config=megatron_lm_config, + use_cpu=use_cpu, + rdzv_backend=rdzv_backend, + same_network=same_network, + tpu_name=tpu_name, + tpu_zone=tpu_zone, + commands=commands, + command_file=command_file, + dynamo_backend=dynamo_backend, + ) diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/config/config.py b/testbed/huggingface__accelerate/src/accelerate/commands/config/config.py new file mode 100644 index 0000000000000000000000000000000000000000..72414f2abe62d76bd5133f4b0ed99bf34133f6f6 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/config/config.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os + +from accelerate.utils import ComputeEnvironment + +from .cluster import get_cluster_input +from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 +from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 +from .sagemaker import get_sagemaker_input + + +description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine" + + +def get_user_input(): + compute_environment = _ask_options( + "In which compute environment are you running?", + ["This machine", "AWS (Amazon SageMaker)"], + _convert_compute_environment, + ) + if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: + config = get_sagemaker_input() + else: + config = get_cluster_input() + return config + + +def config_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("config", description=description) + else: + parser = argparse.ArgumentParser("Accelerate config command", description=description) + + parser.add_argument( + "--config_file", + default=None, + help=( + "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " + "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " + "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " + "with 'huggingface'." + ), + ) + + if subparsers is not None: + parser.set_defaults(func=config_command) + return parser + + +def config_command(args): + config = get_user_input() + if args.config_file is not None: + config_file = args.config_file + else: + if not os.path.isdir(cache_dir): + os.makedirs(cache_dir) + config_file = default_yaml_config_file + + if config_file.endswith(".json"): + config.to_json_file(config_file) + else: + config.to_yaml_file(config_file) + print(f"accelerate configuration saved at {config_file}") + + +def main(): + parser = config_command_parser() + args = parser.parse_args() + config_command(args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/config/config_args.py b/testbed/huggingface__accelerate/src/accelerate/commands/config/config_args.py new file mode 100644 index 0000000000000000000000000000000000000000..8b4a282928dcfb3a3483e958e5231cc2734f726d --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/config/config_args.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from dataclasses import dataclass +from enum import Enum +from typing import List, Optional, Union + +import yaml + +from ...utils import ComputeEnvironment, DistributedType, DynamoBackend, SageMakerDistributedType +from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION + + +hf_cache_home = os.path.expanduser( + os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) +) +cache_dir = os.path.join(hf_cache_home, "accelerate") +default_json_config_file = os.path.join(cache_dir, "default_config.yaml") +default_yaml_config_file = os.path.join(cache_dir, "default_config.yaml") + +# For backward compatibility: the default config is the json one if it's the only existing file. +if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file): + default_config_file = default_yaml_config_file +else: + default_config_file = default_json_config_file + + +def load_config_from_file(config_file): + config_file_exists = config_file is not None and os.path.isfile(config_file) + config_file = config_file if config_file_exists else default_config_file + with open(config_file, "r", encoding="utf-8") as f: + if config_file.endswith(".json"): + if ( + json.load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE) + == ComputeEnvironment.LOCAL_MACHINE + ): + config_class = ClusterConfig + else: + config_class = SageMakerConfig + return config_class.from_json_file(json_file=config_file) + else: + if ( + yaml.safe_load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE) + == ComputeEnvironment.LOCAL_MACHINE + ): + config_class = ClusterConfig + else: + config_class = SageMakerConfig + return config_class.from_yaml_file(yaml_file=config_file) + + +@dataclass +class BaseConfig: + compute_environment: ComputeEnvironment + distributed_type: Union[DistributedType, SageMakerDistributedType] + mixed_precision: str + use_cpu: bool + dynamo_backend: DynamoBackend + + def to_dict(self): + result = self.__dict__ + # For serialization, it's best to convert Enums to strings (or their underlying value type). + for key, value in result.items(): + if isinstance(value, Enum): + result[key] = value.value + result = {k: v for k, v in result.items() if v is not None} + return result + + @classmethod + def from_json_file(cls, json_file=None): + json_file = default_json_config_file if json_file is None else json_file + with open(json_file, "r", encoding="utf-8") as f: + config_dict = json.load(f) + if "compute_environment" not in config_dict: + config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE + if "mixed_precision" not in config_dict: + config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None + if "fp16" in config_dict: # Convert the config to the new format. + del config_dict["fp16"] + if "use_cpu" not in config_dict: + config_dict["use_cpu"] = False + if "dynamo_backend" not in config_dict: + config_dict["dynamo_backend"] = DynamoBackend.NO + return cls(**config_dict) + + def to_json_file(self, json_file): + with open(json_file, "w", encoding="utf-8") as f: + content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" + f.write(content) + + @classmethod + def from_yaml_file(cls, yaml_file=None): + yaml_file = default_yaml_config_file if yaml_file is None else yaml_file + with open(yaml_file, "r", encoding="utf-8") as f: + config_dict = yaml.safe_load(f) + if "compute_environment" not in config_dict: + config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE + + if "mixed_precision" not in config_dict: + config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None + if "fp16" in config_dict: # Convert the config to the new format. + del config_dict["fp16"] + if "use_cpu" not in config_dict: + config_dict["use_cpu"] = False + if "dynamo_backend" not in config_dict: + config_dict["dynamo_backend"] = DynamoBackend.NO + + return cls(**config_dict) + + def to_yaml_file(self, yaml_file): + with open(yaml_file, "w", encoding="utf-8") as f: + yaml.safe_dump(self.to_dict(), f) + + def __post_init__(self): + if isinstance(self.compute_environment, str): + self.compute_environment = ComputeEnvironment(self.compute_environment) + if isinstance(self.distributed_type, str): + if self.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: + self.distributed_type = SageMakerDistributedType(self.distributed_type) + else: + self.distributed_type = DistributedType(self.distributed_type) + if isinstance(self.dynamo_backend, str): + self.dynamo_backend = DynamoBackend(self.dynamo_backend.upper()) + + +@dataclass +class ClusterConfig(BaseConfig): + num_processes: int + machine_rank: int = 0 + num_machines: int = 1 + gpu_ids: Optional[str] = None + main_process_ip: Optional[str] = None + main_process_port: Optional[int] = None + rdzv_backend: Optional[str] = "static" + same_network: Optional[bool] = False + main_training_function: str = "main" + + # args for deepspeed_plugin + deepspeed_config: dict = None + # args for fsdp + fsdp_config: dict = None + # args for megatron_lm + megatron_lm_config: dict = None + # args for TPU + downcast_bf16: bool = False + + # args for TPU pods + tpu_name: str = None + tpu_zone: str = None + command_file: str = None + commands: List[str] = None + + def __post_init__(self): + if self.deepspeed_config is None: + self.deepspeed_config = {} + if self.fsdp_config is None: + self.fsdp_config = {} + if self.megatron_lm_config is None: + self.megatron_lm_config = {} + return super().__post_init__() + + +@dataclass +class SageMakerConfig(BaseConfig): + ec2_instance_type: str + iam_role_name: str + image_uri: str + profile: Optional[str] = None + region: str = "us-east-1" + num_machines: int = 1 + gpu_ids: str = "all" + base_job_name: str = f"accelerate-sagemaker-{num_machines}" + pytorch_version: str = SAGEMAKER_PYTORCH_VERSION + transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION + py_version: str = SAGEMAKER_PYTHON_VERSION + sagemaker_inputs_file: str = None + sagemaker_metrics_file: str = None diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/config/config_utils.py b/testbed/huggingface__accelerate/src/accelerate/commands/config/config_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..736c3a3d5bb342b24117e95b3170538dffb87ad2 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/config/config_utils.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from ...utils.dataclasses import ( + ComputeEnvironment, + DistributedType, + DynamoBackend, + PrecisionType, + SageMakerDistributedType, +) +from ..menu import BulletMenu + + +DYNAMO_BACKENDS = [ + "EAGER", + "AOT_EAGER", + "INDUCTOR", + "NVFUSER", + "AOT_NVFUSER", + "AOT_CUDAGRAPHS", + "OFI", + "FX2TRT", + "ONNXRT", + "IPEX", +] + + +def _ask_field(input_text, convert_value=None, default=None, error_message=None): + ask_again = True + while ask_again: + result = input(input_text) + try: + if default is not None and len(result) == 0: + return default + return convert_value(result) if convert_value is not None else result + except: + if error_message is not None: + print(error_message) + + +def _ask_options(input_text, options=[], convert_value=None, default=0): + menu = BulletMenu(input_text, options) + result = menu.run(default_choice=default) + return convert_value(result) if convert_value is not None else result + + +def _convert_compute_environment(value): + value = int(value) + return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value]) + + +def _convert_distributed_mode(value): + value = int(value) + return DistributedType(["NO", "MULTI_CPU", "MULTI_GPU", "TPU", "MPS"][value]) + + +def _convert_dynamo_backend(value): + value = int(value) + return DynamoBackend(DYNAMO_BACKENDS[value]) + + +def _convert_mixed_precision(value): + value = int(value) + return PrecisionType(["no", "fp16", "bf16"][value]) + + +def _convert_sagemaker_distributed_mode(value): + value = int(value) + return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value]) + + +def _convert_yes_no_to_bool(value): + return {"yes": True, "no": False}[value.lower()] + + +class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter): + """ + A custom formatter that will remove the usage line from the help message for subcommands. + """ + + def _format_usage(self, usage, actions, groups, prefix): + usage = super()._format_usage(usage, actions, groups, prefix) + usage = usage.replace(" [] ", "") + return usage diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/config/default.py b/testbed/huggingface__accelerate/src/accelerate/commands/config/default.py new file mode 100644 index 0000000000000000000000000000000000000000..f7ceb84ceaa772dda00d9e32f7a64afa3257a76e --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/config/default.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pathlib import Path + +import torch + +from .config_args import ClusterConfig, default_json_config_file +from .config_utils import SubcommandHelpFormatter + + +description = "Create a default config file for Accelerate with only a few flags set." + + +def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file, dynamo_backend="no"): + """ + Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also + set CPU if it is a CPU-only machine. + + Args: + mixed_precision (`str`, *optional*, defaults to "no"): + Mixed Precision to use. Should be one of "no", "fp16", or "bf16" + save_location (`str`, *optional*, defaults to `default_json_config_file`): + Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default + location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting + the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`. + """ + path = Path(save_location) + path.parent.mkdir(parents=True, exist_ok=True) + if path.exists(): + print( + f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." + ) + return False + mixed_precision = mixed_precision.lower() + if mixed_precision not in ["no", "fp16", "bf16"]: + raise ValueError(f"`mixed_precision` should be one of 'no', 'fp16', or 'bf16'. Received {mixed_precision}") + config = { + "compute_environment": "LOCAL_MACHINE", + "mixed_precision": mixed_precision, + "dynamo_backend": dynamo_backend, + } + if torch.cuda.is_available(): + num_gpus = torch.cuda.device_count() + config["num_processes"] = num_gpus + config["use_cpu"] = False + if num_gpus > 1: + config["distributed_type"] = "MULTI_GPU" + else: + config["distributed_type"] = "NO" + else: + num_gpus = 0 + config["use_cpu"] = True + config["num_processes"] = 1 + config["distributed_type"] = "NO" + config = ClusterConfig(**config) + config.to_json_file(path) + return path + + +def default_command_parser(parser, parents): + parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter) + parser.add_argument( + "--config_file", + default=default_json_config_file, + help=( + "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " + "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " + "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " + "with 'huggingface'." + ), + dest="save_location", + ) + + parser.add_argument( + "--mixed_precision", + choices=["no", "fp16", "bf16"], + type=str, + help="Whether or not to use mixed precision training. " + "Choose between FP16 and BF16 (bfloat16) training. " + "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", + default="no", + ) + parser.set_defaults(func=default_config_command) + return parser + + +def default_config_command(args): + config_file = write_basic_config(args.mixed_precision, args.save_location) + if config_file: + print(f"accelerate configuration saved at {config_file}") diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/config/sagemaker.py b/testbed/huggingface__accelerate/src/accelerate/commands/config/sagemaker.py new file mode 100644 index 0000000000000000000000000000000000000000..af4195f29cd41aad66c452956cc8ffdc76248de4 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/config/sagemaker.py @@ -0,0 +1,241 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os + +from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES +from ...utils.dataclasses import ComputeEnvironment, DynamoBackend, SageMakerDistributedType +from ...utils.imports import is_boto3_available +from .config_args import SageMakerConfig +from .config_utils import ( + _ask_field, + _ask_options, + _convert_dynamo_backend, + _convert_mixed_precision, + _convert_sagemaker_distributed_mode, + _convert_yes_no_to_bool, +) + + +if is_boto3_available(): + import boto3 # noqa: F401 + + +def _create_iam_role_for_sagemaker(role_name): + iam_client = boto3.client("iam") + + sagemaker_trust_policy = { + "Version": "2012-10-17", + "Statement": [ + {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} + ], + } + try: + # create the role, associated with the chosen trust policy + iam_client.create_role( + RoleName=role_name, AssumeRolePolicyDocument=json.dumps(sagemaker_trust_policy, indent=2) + ) + policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "sagemaker:*", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetAuthorizationToken", + "cloudwatch:PutMetricData", + "cloudwatch:GetMetricData", + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "logs:GetLogEvents", + "s3:CreateBucket", + "s3:ListBucket", + "s3:GetBucketLocation", + "s3:GetObject", + "s3:PutObject", + ], + "Resource": "*", + } + ], + } + # attach policy to role + iam_client.put_role_policy( + RoleName=role_name, + PolicyName=f"{role_name}_policy_permission", + PolicyDocument=json.dumps(policy_document, indent=2), + ) + except iam_client.exceptions.EntityAlreadyExistsException: + print(f"role {role_name} already exists. Using existing one") + + +def _get_iam_role_arn(role_name): + iam_client = boto3.client("iam") + return iam_client.get_role(RoleName=role_name)["Role"]["Arn"] + + +def get_sagemaker_input(): + credentials_configuration = _ask_options( + "How do you want to authorize?", + ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "], + int, + ) + aws_profile = None + if credentials_configuration == 0: + aws_profile = _ask_field("Enter your AWS Profile name: [default] ", default="default") + os.environ["AWS_PROFILE"] = aws_profile + else: + print( + "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with," + "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" + ) + aws_access_key_id = _ask_field("AWS Access Key ID: ") + os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id + + aws_secret_access_key = _ask_field("AWS Secret Access Key: ") + os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key + + aws_region = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1") + os.environ["AWS_DEFAULT_REGION"] = aws_region + + role_management = _ask_options( + "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?", + ["Provide IAM Role name", "Create new IAM role using credentials"], + int, + ) + if role_management == 0: + iam_role_name = _ask_field("Enter your IAM role name: ") + else: + iam_role_name = "accelerate_sagemaker_execution_role" + print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials') + _create_iam_role_for_sagemaker(iam_role_name) + + is_custom_docker_image = _ask_field( + "Do you want to use custom Docker image? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + docker_image = None + if is_custom_docker_image: + docker_image = _ask_field("Enter your Docker image: ", lambda x: str(x).lower()) + + is_sagemaker_inputs_enabled = _ask_field( + "Do you want to provide SageMaker input channels with data locations? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + sagemaker_inputs_file = None + if is_sagemaker_inputs_enabled: + sagemaker_inputs_file = _ask_field( + "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ", + lambda x: str(x).lower(), + ) + + is_sagemaker_metrics_enabled = _ask_field( + "Do you want to enable SageMaker metrics? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + sagemaker_metrics_file = None + if is_sagemaker_metrics_enabled: + sagemaker_metrics_file = _ask_field( + "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ", + lambda x: str(x).lower(), + ) + + distributed_type = _ask_options( + "What is the distributed mode?", + ["No distributed training", "Data parallelism"], + _convert_sagemaker_distributed_mode, + ) + use_dynamo = _ask_field( + "Do you wish to optimize your script with torch dynamo?[yes/NO]:", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_dynamo: + dynamo_backend = _ask_options( + "Which dynamo backend would you like to use?", + [ + "eager", + "aot_eager", + "inductor", + "nvfuser", + "aot_nvfuser", + "aot_cudagraphs", + "ofi", + "fx2trt", + "onnxrt", + "ipex", + ], + _convert_dynamo_backend, + default=2, + ) + else: + dynamo_backend = DynamoBackend.NO + ec2_instance_query = "Which EC2 instance type you want to use for your training?" + if distributed_type != SageMakerDistributedType.NO: + ec2_instance_type = _ask_options( + ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)] + ) + else: + ec2_instance_query += "? [ml.p3.2xlarge]:" + ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge") + + num_machines = 1 + if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): + num_machines = _ask_field( + "How many machines do you want use? [1]: ", + int, + default=1, + ) + + mixed_precision = _ask_options( + "Do you wish to use FP16 or BF16 (mixed precision)?", + ["no", "fp16", "bf16"], + _convert_mixed_precision, + ) + + if use_dynamo and mixed_precision == "no": + print( + "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." + ) + + return SageMakerConfig( + image_uri=docker_image, + compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, + distributed_type=distributed_type, + use_cpu=False, + dynamo_backend=dynamo_backend, + ec2_instance_type=ec2_instance_type, + profile=aws_profile, + region=aws_region, + iam_role_name=iam_role_name, + mixed_precision=mixed_precision, + num_machines=num_machines, + sagemaker_inputs_file=sagemaker_inputs_file, + sagemaker_metrics_file=sagemaker_metrics_file, + ) diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/config/update.py b/testbed/huggingface__accelerate/src/accelerate/commands/config/update.py new file mode 100644 index 0000000000000000000000000000000000000000..5f025594b04ada3e3a78687befc5c1bc1d236adf --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/config/update.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pathlib import Path + +from .config_args import default_config_file, load_config_from_file +from .config_utils import SubcommandHelpFormatter + + +description = "Update an existing config file with the latest defaults while maintaining the old configuration." + + +def update_config(args): + """ + Update an existing config file with the latest defaults while maintaining the old configuration. + """ + config_file = args.config_file + if config_file is None and Path(default_config_file).exists(): + config_file = default_config_file + elif not Path(config_file).exists(): + raise ValueError(f"The passed config file located at {config_file} doesn't exist.") + config = load_config_from_file(config_file) + + if config_file.endswith(".json"): + config.to_json_file(config_file) + else: + config.to_yaml_file(config_file) + return config_file + + +def update_command_parser(parser, parents): + parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter) + parser.add_argument( + "--config_file", + default=None, + help=( + "The path to the config file to update. Will default to a file named default_config.yaml in the cache " + "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " + "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " + "with 'huggingface'." + ), + ) + + parser.set_defaults(func=update_config_command) + return parser + + +def update_config_command(args): + config_file = update_config(args) + print(f"Sucessfully updated the configuration file at {config_file}.") diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/env.py b/testbed/huggingface__accelerate/src/accelerate/commands/env.py new file mode 100644 index 0000000000000000000000000000000000000000..a19c04d4a254c95ee60425a280f3d16015cc81c8 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/env.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import platform + +import numpy as np +import torch + +from accelerate import __version__ as version +from accelerate.commands.config import default_config_file, load_config_from_file + + +def env_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("env") + else: + parser = argparse.ArgumentParser("Accelerate env command") + + parser.add_argument( + "--config_file", default=None, help="The config file to use for the default values in the launching script." + ) + + if subparsers is not None: + parser.set_defaults(func=env_command) + return parser + + +def env_command(args): + pt_version = torch.__version__ + pt_cuda_available = torch.cuda.is_available() + + accelerate_config = "Not found" + # Get the default from the config file. + if args.config_file is not None or os.path.isfile(default_config_file): + accelerate_config = load_config_from_file(args.config_file).to_dict() + + info = { + "`Accelerate` version": version, + "Platform": platform.platform(), + "Python version": platform.python_version(), + "Numpy version": np.__version__, + "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", + } + + print("\nCopy-and-paste the text below in your GitHub issue\n") + print("\n".join([f"- {prop}: {val}" for prop, val in info.items()])) + + print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:") + accelerate_config_str = ( + "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()]) + if isinstance(accelerate_config, dict) + else f"\t{accelerate_config}" + ) + print(accelerate_config_str) + + info["`Accelerate` configs"] = accelerate_config + + return info + + +def main() -> int: + parser = env_command_parser() + args = parser.parse_args() + env_command(args) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/launch.py b/testbed/huggingface__accelerate/src/accelerate/commands/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..d4ecd5436df5d0615edaeb044b0639378858e621 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/launch.py @@ -0,0 +1,1104 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import importlib +import logging +import os +import subprocess +import sys +from ast import literal_eval +from pathlib import Path +from typing import Dict, List + +import torch + +import psutil +from accelerate.commands.config import default_config_file, load_config_from_file +from accelerate.commands.config.config_args import SageMakerConfig +from accelerate.commands.config.config_utils import DYNAMO_BACKENDS +from accelerate.state import get_int_from_env +from accelerate.utils import ( + ComputeEnvironment, + DistributedType, + DynamoBackend, + PrecisionType, + PrepareForLaunch, + _filter_args, + is_deepspeed_available, + is_rich_available, + is_sagemaker_available, + is_torch_version, + patch_environment, +) +from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS +from accelerate.utils.dataclasses import SageMakerDistributedType +from accelerate.utils.launch import env_var_path_add + + +if is_rich_available(): + from rich import get_console + from rich.logging import RichHandler + + FORMAT = "%(message)s" + logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]) + + +logger = logging.getLogger(__name__) + +options_to_group = { + "--multi-gpu": "Distributed GPUs", + "--tpu": "TPU", + "--mps": "MPS", + "--use_deepspeed": "DeepSpeed Arguments", + "--use_fsdp": "FSDP Arguments", + "--use_megatron_lm": "Megatron-LM Arguments", +} + + +def clean_option(option): + "Finds all cases of - after the first two characters and changes them to _" + if option.startswith("--"): + return option[:3] + option[3:].replace("-", "_") + + +class _CustomHelpAction(argparse._HelpAction): + """ + This is a custom help action that will hide all arguments that are not used in the command line when the help is + called. This is useful for the case where the user is using a specific platform and only wants to see the arguments + for that platform. + """ + + def __call__(self, parser, namespace, values, option_string=None): + if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]: + args = sys.argv[2:] + else: + args = sys.argv[1:] + opts = parser._actions + titles = [ + "Hardware Selection Arguments", + "Resource Selection Arguments", + "Training Paradigm Arguments", + "positional arguments", + "optional arguments", + ] + if len(args) > 1: + used_platforms = [arg for arg in args if arg in options_to_group.keys()] + args = list(map(clean_option, args)) + used_titles = [options_to_group[o] for o in used_platforms] + for i, arg in enumerate(opts): + # If the argument's container is outside of the used titles, hide it + if arg.container.title not in titles + used_titles: + setattr(opts[i], "help", argparse.SUPPRESS) + # If the argument is hardware selection, but not being passed, hide it + elif arg.container.title == "Hardware Selection Arguments": + if set(arg.option_strings).isdisjoint(set(args)): + setattr(opts[i], "help", argparse.SUPPRESS) + else: + setattr(opts[i], "help", arg.help + " (currently selected)") + # If the argument is a training paradigm, but not being passed, hide it + elif arg.container.title == "Training Paradigm Arguments": + if set(arg.option_strings).isdisjoint(set(used_platforms)): + setattr(opts[i], "help", argparse.SUPPRESS) + else: + setattr(opts[i], "help", arg.help + " (currently selected)") + for i, group in enumerate(list(parser._action_groups)): + # If all arguments in the group are hidden, hide the group + if all([arg.help == argparse.SUPPRESS for arg in group._group_actions]): + parser._action_groups.remove(group) + + super().__call__(parser, namespace, values, option_string) + + +def launch_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("launch", add_help=False) + else: + parser = argparse.ArgumentParser("Accelerate launch command", add_help=False) + + parser.register("action", "help", _CustomHelpAction) + parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.") + + parser.add_argument( + "--config_file", default=None, help="The config file to use for the default values in the launching script." + ) + parser.add_argument( + "--quiet", + "-q", + action="store_true", + help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)", + ) + # Hardware selection arguments + hardware_args = parser.add_argument_group( + "Hardware Selection Arguments", "Arguments for selecting the hardware to be used." + ) + hardware_args.add_argument( + "--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU." + ) + hardware_args.add_argument( + "--mps", + default=False, + action="store_true", + help="Whether or not this should use MPS-enabled GPU device on MacOS machines.", + ) + hardware_args.add_argument( + "--multi_gpu", + default=False, + action="store_true", + help="Whether or not this should launch a distributed GPU training.", + ) + hardware_args.add_argument( + "--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training." + ) + + # Resource selection arguments + resource_args = parser.add_argument_group( + "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used." + ) + resource_args.add_argument( + "--dynamo_backend", + type=str, + choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS], + help="Choose a backend to optimize your training with dynamo, see more at " + "https://github.com/pytorch/torchdynamo.", + ) + resource_args.add_argument( + "--mixed_precision", + type=str, + choices=["no", "fp16", "bf16"], + help="Whether or not to use mixed precision training. " + "Choose between FP16 and BF16 (bfloat16) training. " + "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", + ) + resource_args.add_argument( + "--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel." + ) + resource_args.add_argument( + "--num_machines", type=int, default=None, help="The total number of machines used in this training." + ) + resource_args.add_argument( + "--num_cpu_threads_per_process", + type=int, + default=None, + help="The number of CPU threads per process. Can be tuned for optimal performance.", + ) + + # Training Paradigm arguments + paradigm_args = parser.add_argument_group( + "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used." + ) + paradigm_args.add_argument( + "--use_deepspeed", + default=False, + action="store_true", + help="Whether to use deepspeed.", + ) + paradigm_args.add_argument( + "--use_fsdp", + default=False, + action="store_true", + help="Whether to use fsdp.", + ) + paradigm_args.add_argument( + "--use_megatron_lm", + default=False, + action="store_true", + help="Whether to use Megatron-LM.", + ) + + # distributed GPU training arguments + distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.") + distributed_args.add_argument( + "--gpu_ids", + default=None, + help="What GPUs (by id) should be used for training on this machine as a comma-seperated list", + ) + distributed_args.add_argument( + "--same_network", + default=False, + action="store_true", + help="Whether all machines used for multinode training exist on the same local network.", + ) + distributed_args.add_argument( + "--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched." + ) + distributed_args.add_argument( + "--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0." + ) + distributed_args.add_argument( + "--main_process_port", + type=int, + default=None, + help="The port to use to communicate with the machine of rank 0.", + ) + # Rendezvous related arguments + distributed_args.add_argument( + "--rdzv_conf", + type=str, + default="", + help="Additional rendezvous configuration (=,=,...).", + ) + distributed_args.add_argument( + "--max_restarts", + type=int, + default=0, + help="Maximum number of worker group restarts before failing.", + ) + distributed_args.add_argument( + "--monitor_interval", + type=float, + default=5, + help="Interval, in seconds, to monitor the state of workers.", + ) + parser.add_argument( + "-m", + "--module", + action="store_true", + help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.", + ) + parser.add_argument( + "--no_python", + action="store_true", + help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.", + ) + + # tpu arguments + tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.") + tpu_args.add_argument( + "--main_training_function", + type=str, + default=None, + help="The name of the main function to be executed in your script (only for TPU training).", + ) + tpu_args.add_argument( + "--downcast_bf16", + action="store_true", + help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.", + ) + + # DeepSpeed arguments + deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.") + deepspeed_args.add_argument( + "--deepspeed_config_file", + default=None, + type=str, + help="DeepSpeed config file.", + ) + deepspeed_args.add_argument( + "--zero_stage", + default=None, + type=int, + help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to `2`.", + ) + deepspeed_args.add_argument( + "--offload_optimizer_device", + default=None, + type=str, + help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--offload_param_device", + default=None, + type=str, + help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--gradient_accumulation_steps", + default=None, + type=int, + help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to `1`.", + ) + deepspeed_args.add_argument( + "--gradient_clipping", + default=None, + type=float, + help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to `1.0`.", + ) + deepspeed_args.add_argument( + "--zero3_init_flag", + default=None, + type=str, + help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. " + "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.", + ) + deepspeed_args.add_argument( + "--zero3_save_16bit_model", + default=None, + type=str, + help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. " + "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.", + ) + deepspeed_args.add_argument( + "--deepspeed_hostfile", + default=None, + type=str, + help="DeepSpeed hostfile for configuring multi-node compute resources.", + ) + deepspeed_args.add_argument( + "--deepspeed_exclusion_filter", + default=None, + type=str, + help="DeepSpeed exclusion filter string when using mutli-node setup.", + ) + deepspeed_args.add_argument( + "--deepspeed_inclusion_filter", + default=None, + type=str, + help="DeepSpeed inclusion filter string when using mutli-node setup.", + ) + deepspeed_args.add_argument( + "--deepspeed_multinode_launcher", + default=None, + type=str, + help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.", + ) + + # fsdp arguments + fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.") + fsdp_args.add_argument( + "--fsdp_offload_params", + default="false", + type=str, + help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_min_num_params", + type=int, + default=1e8, + help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_sharding_strategy", + type=int, + default=1, + help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_auto_wrap_policy", + type=str, + default=None, + help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_transformer_layer_cls_to_wrap", + default=None, + type=str, + help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... " + "(useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_backward_prefetch_policy", + default=None, + type=str, + help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_state_dict_type", + default=None, + type=str, + help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).", + ) + + # megatron_lm args + megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.") + megatron_lm_args.add_argument( + "--megatron_lm_tp_degree", + type=int, + default=1, + help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_pp_degree", + type=int, + default=1, + help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_num_micro_batches", + type=int, + default=None, + help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_sequence_parallelism", + default=None, + type=str, + help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. " + "(useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_recompute_activations", + default=None, + type=str, + help="Decides Whether (true|false) to enable Selective Activation Recomputation. " + "(useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_use_distributed_optimizer", + default=None, + type=str, + help="Decides Whether (true|false) to use distributed optimizer " + "which shards optimizer state and gradients across Data Pralellel (DP) ranks. " + "(useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_gradient_clipping", + default=1.0, + type=float, + help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). " + "(useful only when `use_megatron_lm` flag is passed).", + ) + + # AWS arguments + aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.") + aws_args.add_argument( + "--aws_access_key_id", + type=str, + default=None, + help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job", + ) + aws_args.add_argument( + "--aws_secret_access_key", + type=str, + default=None, + help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.", + ) + parser.add_argument( + "--debug", + action="store_true", + help="Whether to print out the torch.distributed stack trace when something fails.", + ) + parser.add_argument( + "training_script", + type=str, + help=( + "The full path to the script to be launched in parallel, followed by all the arguments for the training " + "script." + ), + ) + + # Other arguments of the training scripts + parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.") + + if subparsers is not None: + parser.set_defaults(func=launch_command) + return parser + + +def simple_launcher(args): + cmd = [] + if args.no_python and args.module: + raise ValueError("--module and --no_python cannot be used together") + if not args.no_python: + cmd.append(sys.executable) + if args.module: + cmd.append("-m") + cmd.append(args.training_script) + cmd.extend(args.training_script_args) + + current_env = os.environ.copy() + current_env["ACCELERATE_USE_CPU"] = str(args.cpu or args.use_cpu) + current_env["ACCELERATE_USE_MPS_DEVICE"] = str(args.mps) + if args.mps: + current_env["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + elif args.gpu_ids != "all" and args.gpu_ids is not None: + current_env["CUDA_VISIBLE_DEVICES"] = args.gpu_ids + if args.num_machines > 1: + current_env["MASTER_ADDR"] = args.main_process_ip + current_env["MASTER_PORT"] = str(args.main_process_port) + elif args.num_processes > 1: + current_env["MASTER_ADDR"] = args.main_process_ip if args.main_process_ip is not None else "127.0.0.1" + current_env["MASTER_PORT"] = str(args.main_process_port) if args.main_process_port is not None else "29500" + + try: + mixed_precision = PrecisionType(args.mixed_precision.lower()) + except ValueError: + raise ValueError( + f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." + ) + + current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) + + try: + dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) + except ValueError: + raise ValueError(f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.") + current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value + + current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) + + process = subprocess.Popen(cmd, env=current_env) + process.wait() + if process.returncode != 0: + if not args.quiet: + raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) + else: + sys.exit(1) + + +def multi_gpu_launcher(args): + if is_torch_version(">=", "1.9.1"): + import torch.distributed.run as distrib_run + else: + raise NotImplementedError("Native multi-GPU training requires pytorch>=1.9.1") + num_processes = getattr(args, "num_processes") + num_machines = getattr(args, "num_machines") + main_process_ip = getattr(args, "main_process_ip") + main_process_port = getattr(args, "main_process_port") + if num_machines > 1: + setattr(args, "nproc_per_node", str(num_processes // num_machines)) + setattr(args, "nnodes", str(num_machines)) + setattr(args, "node_rank", int(args.machine_rank)) + if getattr(args, "same_network", False): + setattr(args, "master_addr", str(main_process_ip)) + setattr(args, "master_port", str(main_process_port)) + else: + setattr(args, "rdzv_endpoint", f"{main_process_ip}:{main_process_port}") + else: + setattr(args, "nproc_per_node", str(num_processes)) + if main_process_port is not None: + setattr(args, "master_port", str(main_process_port)) + + if args.module and args.no_python: + raise ValueError("--module and --no_python cannot be used together") + elif args.module: + setattr(args, "module", True) + elif args.no_python: + setattr(args, "no_python", True) + + current_env = os.environ.copy() + gpu_ids = getattr(args, "gpu_ids", "all") + if gpu_ids != "all" and args.gpu_ids is not None: + current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids + mixed_precision = args.mixed_precision.lower() + try: + mixed_precision = PrecisionType(mixed_precision) + except ValueError: + raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.") + + current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) + + try: + dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) + except ValueError: + raise ValueError(f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.") + current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value + + if args.use_fsdp: + current_env["ACCELERATE_USE_FSDP"] = "true" + current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy) + current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower() + current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params) + if args.fsdp_auto_wrap_policy is not None: + current_env["FSDP_AUTO_WRAP_POLICY"] = str(args.fsdp_auto_wrap_policy) + if args.fsdp_transformer_layer_cls_to_wrap is not None: + current_env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = str(args.fsdp_transformer_layer_cls_to_wrap) + if args.fsdp_backward_prefetch_policy is not None: + current_env["FSDP_BACKWARD_PREFETCH"] = str(args.fsdp_backward_prefetch_policy) + if args.fsdp_state_dict_type is not None: + current_env["FSDP_STATE_DICT_TYPE"] = str(args.fsdp_state_dict_type) + + if args.use_megatron_lm: + prefix = "MEGATRON_LM_" + current_env["ACCELERATE_USE_MEGATRON_LM"] = "true" + current_env[prefix + "TP_DEGREE"] = str(args.megatron_lm_tp_degree) + current_env[prefix + "PP_DEGREE"] = str(args.megatron_lm_pp_degree) + current_env[prefix + "GRADIENT_CLIPPING"] = str(args.megatron_lm_gradient_clipping) + if args.megatron_lm_num_micro_batches is not None: + current_env[prefix + "NUM_MICRO_BATCHES"] = str(args.megatron_lm_num_micro_batches) + if args.megatron_lm_sequence_parallelism is not None: + current_env[prefix + "SEQUENCE_PARALLELISM"] = str(args.megatron_lm_sequence_parallelism) + if args.megatron_lm_recompute_activations is not None: + current_env[prefix + "RECOMPUTE_ACTIVATIONS"] = str(args.megatron_lm_recompute_activations) + if args.megatron_lm_use_distributed_optimizer is not None: + current_env[prefix + "USE_DISTRIBUTED_OPTIMIZER"] = str(args.megatron_lm_use_distributed_optimizer) + + current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) + + debug = getattr(args, "debug", False) + args = _filter_args(args) + with patch_environment(**current_env): + try: + distrib_run.run(args) + except: + if is_rich_available() and debug: + console = get_console() + console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") + console.print_exception(suppress=[__file__], show_locals=False) + + +def deepspeed_launcher(args): + if is_torch_version(">=", "1.9.1"): + import torch.distributed.run as distrib_run + if not is_deepspeed_available(): + raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.") + num_processes = getattr(args, "num_processes") + num_machines = getattr(args, "num_machines") + main_process_ip = getattr(args, "main_process_ip") + main_process_port = getattr(args, "main_process_port") + + # make sure launcher is not None + if args.deepspeed_multinode_launcher is None: + # set to default pdsh + setattr(args, "deepspeed_multinode_launcher", DEEPSPEED_MULTINODE_LAUNCHERS[0]) + + if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: + cmd = ["deepspeed", "--no_local_rank"] + cmd.extend(["--hostfile", str(args.deepspeed_hostfile), "--launcher", str(args.deepspeed_multinode_launcher)]) + if args.deepspeed_exclusion_filter is not None: + cmd.extend( + [ + "--exclude", + str(args.deepspeed_exclusion_filter), + ] + ) + elif args.deepspeed_inclusion_filter is not None: + cmd.extend( + [ + "--include", + str(args.deepspeed_inclusion_filter), + ] + ) + else: + cmd.extend(["--num_gpus", str(args.num_processes // args.num_machines)]) + cmd.extend(["--master_port", str(main_process_port)]) + if args.module and args.no_python: + raise ValueError("--module and --no_python cannot be used together") + elif args.module: + cmd.append("--module") + elif args.no_python: + cmd.append("--no_python") + cmd.append(args.training_script) + cmd.extend(args.training_script_args) + elif num_machines > 1 and args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]: + setattr(args, "nproc_per_node", str(num_processes // num_machines)) + setattr(args, "nnodes", str(num_machines)) + setattr(args, "node_rank", int(args.machine_rank)) + if getattr(args, "same_network", False): + setattr(args, "master_addr", str(main_process_ip)) + setattr(args, "master_port", str(main_process_port)) + else: + setattr(args, "rdzv_endpoint", f"{main_process_ip}:{main_process_port}") + else: + setattr(args, "nproc_per_node", str(num_processes)) + if main_process_port is not None: + setattr(args, "master_port", str(main_process_port)) + + if args.module and args.no_python: + raise ValueError("--module and --no_python cannot be used together") + elif args.module: + setattr(args, "module", True) + elif args.no_python: + setattr(args, "no_python", True) + + current_env = os.environ.copy() + gpu_ids = getattr(args, "gpu_ids", "all") + if gpu_ids != "all" and args.gpu_ids is not None: + current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids + try: + mixed_precision = PrecisionType(args.mixed_precision.lower()) + except ValueError: + raise ValueError( + f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." + ) + + current_env["PYTHONPATH"] = env_var_path_add("PYTHONPATH", os.path.abspath(".")) + current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) + current_env["ACCELERATE_CONFIG_DS_FIELDS"] = str(args.deepspeed_fields_from_accelerate_config).lower() + current_env["ACCELERATE_USE_DEEPSPEED"] = "true" + if args.zero_stage is not None: + current_env["ACCELERATE_DEEPSPEED_ZERO_STAGE"] = str(args.zero_stage) + if args.gradient_accumulation_steps is not None: + current_env["ACCELERATE_GRADIENT_ACCUMULATION_STEPS"] = str(args.gradient_accumulation_steps) + if args.gradient_clipping is not None: + current_env["ACCELERATE_GRADIENT_CLIPPING"] = str(args.gradient_clipping).lower() + if args.offload_optimizer_device is not None: + current_env["ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE"] = str(args.offload_optimizer_device).lower() + if args.offload_param_device is not None: + current_env["ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE"] = str(args.offload_param_device).lower() + if args.zero3_init_flag is not None: + current_env["ACCELERATE_DEEPSPEED_ZERO3_INIT"] = str(args.zero3_init_flag).lower() + if args.zero3_save_16bit_model is not None: + current_env["ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL"] = str(args.zero3_save_16bit_model).lower() + if args.deepspeed_config_file is not None: + current_env["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = str(args.deepspeed_config_file) + + if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: + with open(".deepspeed_env", "a") as f: + for key, value in current_env.items(): + if ";" in value or " " in value: + continue + f.write(f"{key}={value}\n") + + process = subprocess.Popen(cmd, env=current_env) + process.wait() + if process.returncode != 0: + if not args.quiet: + raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) + else: + sys.exit(1) + else: + if is_torch_version("<", "1.9.1"): + raise NotImplementedError("Multi-node training requires pytorch>=1.9.1") + + debug = getattr(args, "debug", False) + args = _filter_args(args) + with patch_environment(**current_env): + try: + distrib_run.run(args) + except: + if is_rich_available() and debug: + console = get_console() + console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") + console.print_exception(suppress=[__file__], show_locals=False) + + +def tpu_launcher(args): + import torch_xla.distributed.xla_multiprocessing as xmp + + current_env = {} + + if args.no_python: + raise ValueError("--no_python cannot be used with TPU launcher") + + if args.mixed_precision == "bf16": + if args.downcast_bf16: + current_env["XLA_USE_BF16"] = "0" + current_env["XLA_DOWNCAST_BF16"] = "1" + else: + current_env["XLA_USE_BF16"] = "1" + current_env["XLA_DOWNCAST_BF16"] = "0" + + if args.module: + mod_name = args.training_script + else: + # Import training_script as a module + script_path = Path(args.training_script) + sys.path.append(str(script_path.parent.resolve())) + mod_name = script_path.stem + + mod = importlib.import_module(mod_name) + if not hasattr(mod, args.main_training_function): + raise ValueError( + f"Your training script should have a function named {args.main_training_function}, or you should pass a " + "different value to `--main_training_function`." + ) + + # Patch sys.argv + sys.argv = [mod.__file__] + args.training_script_args + + main_function = getattr(mod, args.main_training_function) + with patch_environment(**current_env): + xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes) + + +def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]: + if len(nargs) < 0: + return {} + # helper function to infer type for argsparser + + def _infer_type(s): + try: + s = float(s) + + if s // 1 == s: + return int(s) + return s + except ValueError: + return s + + parser = argparse.ArgumentParser() + _, unknown = parser.parse_known_args(nargs) + for index, argument in enumerate(unknown): + if argument.startswith(("-", "--")): + action = None + if index + 1 < len(unknown): # checks if next index would be in list + if unknown[index + 1].startswith(("-", "--")): # checks if next element is an key + # raise an error if element is store_true or store_false + raise ValueError( + "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types" + ) + else: # raise an error if last element is store_true or store_false + raise ValueError( + "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types" + ) + # adds argument to parser based on action_store true + if action is None: + parser.add_argument(argument, type=_infer_type) + else: + parser.add_argument(argument, action=action) + + return { + key: (literal_eval(value) if value in ("True", "False") else value) + for key, value in parser.parse_args(nargs).__dict__.items() + } + + +def sagemaker_launcher(sagemaker_config: SageMakerConfig, args): + if not is_sagemaker_available(): + raise ImportError( + "Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`" + ) + if args.module or args.no_python: + raise ValueError( + "SageMaker requires a python training script file and cannot be used with --module or --no_python" + ) + + from sagemaker.huggingface import HuggingFace + + # configure environment + print("Configuring Amazon SageMaker environment") + os.environ["AWS_DEFAULT_REGION"] = sagemaker_config.region + + # configure credentials + if sagemaker_config.profile is not None: + os.environ["AWS_PROFILE"] = sagemaker_config.profile + elif args.aws_access_key_id is not None and args.aws_secret_access_key is not None: + os.environ["AWS_ACCESS_KEY_ID"] = args.aws_access_key_id + os.environ["AWS_SECRET_ACCESS_KEY"] = args.aws_secret_access_key + else: + raise EnvironmentError( + "You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile" + ) + + # extract needed arguments + source_dir = os.path.dirname(args.training_script) + if not source_dir: # checks if string is empty + source_dir = "." + entry_point = os.path.basename(args.training_script) + if not entry_point.endswith(".py"): + raise ValueError(f'Your training script should be a python script and not "{entry_point}"') + + print("Converting Arguments to Hyperparameters") + hyperparameters = _convert_nargs_to_dict(args.training_script_args) + + try: + mixed_precision = PrecisionType(args.mixed_precision.lower()) + except ValueError: + raise ValueError( + f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." + ) + + try: + dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) + except ValueError: + raise ValueError(f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DYNAMO_BACKENDS}.") + + # Environment variables to be set for use during training job + environment = { + "ACCELERATE_USE_SAGEMAKER": "true", + "ACCELERATE_MIXED_PRECISION": str(mixed_precision), + "ACCELERATE_DYNAMO_BACKEND": dynamo_backend.value, + "ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE": sagemaker_config.distributed_type.value, + } + # configure distribution set up + distribution = None + if sagemaker_config.distributed_type == SageMakerDistributedType.DATA_PARALLEL: + distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} + + # configure sagemaker inputs + sagemaker_inputs = None + if sagemaker_config.sagemaker_inputs_file is not None: + print(f"Loading SageMaker Inputs from {sagemaker_config.sagemaker_inputs_file} file") + sagemaker_inputs = {} + with open(sagemaker_config.sagemaker_inputs_file) as file: + for i, line in enumerate(file): + if i == 0: + continue + l = line.split("\t") + sagemaker_inputs[l[0]] = l[1].strip() + print(f"Loaded SageMaker Inputs: {sagemaker_inputs}") + + # configure sagemaker metrics + sagemaker_metrics = None + if sagemaker_config.sagemaker_metrics_file is not None: + print(f"Loading SageMaker Metrics from {sagemaker_config.sagemaker_metrics_file} file") + sagemaker_metrics = [] + with open(sagemaker_config.sagemaker_metrics_file) as file: + for i, line in enumerate(file): + if i == 0: + continue + l = line.split("\t") + metric_dict = { + "Name": l[0], + "Regex": l[1].strip(), + } + sagemaker_metrics.append(metric_dict) + print(f"Loaded SageMaker Metrics: {sagemaker_metrics}") + + # configure session + print("Creating Estimator") + huggingface_estimator = HuggingFace( + image_uri=sagemaker_config.image_uri, + entry_point=entry_point, + source_dir=source_dir, + role=sagemaker_config.iam_role_name, + transformers_version=sagemaker_config.transformers_version, + pytorch_version=sagemaker_config.pytorch_version, + py_version=sagemaker_config.py_version, + base_job_name=sagemaker_config.base_job_name, + instance_count=sagemaker_config.num_machines, + instance_type=sagemaker_config.ec2_instance_type, + debugger_hook_config=False, + distribution=distribution, + hyperparameters=hyperparameters, + environment=environment, + metric_definitions=sagemaker_metrics, + ) + + huggingface_estimator.fit(inputs=sagemaker_inputs) + print(f"You can find your model data at: {huggingface_estimator.model_data}") + + +def launch_command(args): + # Sanity checks + if sum([args.multi_gpu, args.cpu, args.tpu, args.mps, args.use_deepspeed, args.use_fsdp]) > 1: + raise ValueError( + "You can only use one of `--cpu`, `--multi_gpu`, `--mps`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time." + ) + if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2): + raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.") + + defaults = None + warned = [] + mp_from_config_flag = False + # Get the default from the config file. + if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu: + defaults = load_config_from_file(args.config_file) + if ( + not args.multi_gpu + and not args.tpu + and not args.mps + and not args.use_deepspeed + and not args.use_fsdp + and not args.use_megatron_lm + ): + args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED + args.multi_gpu = defaults.distributed_type == DistributedType.MULTI_GPU + args.tpu = defaults.distributed_type == DistributedType.TPU + args.use_fsdp = defaults.distributed_type == DistributedType.FSDP + args.mps = defaults.distributed_type == DistributedType.MPS + args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM + if not args.mps: + if args.gpu_ids is None: + if defaults.gpu_ids is not None: + args.gpu_ids = defaults.gpu_ids + else: + args.gpu_ids = "all" + if len(args.gpu_ids.split(",")) < 2 and args.multi_gpu and (args.gpu_ids != "all"): + args.multi_gpu = False + if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE: + # Update args with the defaults + for name, attr in defaults.__dict__.items(): + if isinstance(attr, dict): + for k in defaults.deepspeed_config: + setattr(args, k, defaults.deepspeed_config[k]) + for k in defaults.fsdp_config: + arg_to_set = k + if "fsdp" not in arg_to_set: + arg_to_set = "fsdp_" + arg_to_set + setattr(args, arg_to_set, defaults.fsdp_config[k]) + for k in defaults.megatron_lm_config: + setattr(args, k, defaults.megatron_lm_config[k]) + continue + + # Those args are handled separately + if ( + name not in ["compute_environment", "mixed_precision", "distributed_type"] + and getattr(args, name, None) is None + ): + setattr(args, name, attr) + if not args.mixed_precision: + if defaults.mixed_precision is None: + args.mixed_precision = "no" + else: + args.mixed_precision = defaults.mixed_precision + mp_from_config_flag = True + + if args.dynamo_backend is None: + warned.append("\t`--dynamo_backend` was set to a value of `'no'`") + args.dynamo_backend = "no" + else: + if args.num_processes is None: + args.num_processes = torch.cuda.device_count() if args.multi_gpu else 1 + warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`") + if args.num_machines is None: + warned.append("\t`--num_machines` was set to a value of `1`") + args.num_machines = 1 + if args.mixed_precision is None: + warned.append("\t`--mixed_precision` was set to a value of `'no'`") + args.mixed_precision = "no" + if not hasattr(args, "use_cpu"): + args.use_cpu = args.cpu + if args.dynamo_backend is None: + warned.append("\t`--dynamo_backend` was set to a value of `'no'`") + args.dynamo_backend = "no" + + if args.num_cpu_threads_per_process is None: + args.num_cpu_threads_per_process = 1 + if args.use_cpu and args.num_processes > 1: + local_size = get_int_from_env( + ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1 + ) + threads_per_process = int(psutil.cpu_count(logical=False) / local_size) + if threads_per_process > 1: + args.num_cpu_threads_per_process = threads_per_process + warned.append( + f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs" + ) + + if any(warned): + message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n" + message += "\n".join(warned) + message += ( + "\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`." + ) + logger.warning(message) + + # Use the proper launcher + if args.use_deepspeed and not args.cpu: + args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else [] + if mp_from_config_flag: + args.deepspeed_fields_from_accelerate_config.append("mixed_precision") + args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config) + deepspeed_launcher(args) + elif args.use_fsdp and not args.cpu: + multi_gpu_launcher(args) + elif args.use_megatron_lm and not args.cpu: + multi_gpu_launcher(args) + elif args.multi_gpu and not args.cpu: + multi_gpu_launcher(args) + elif args.tpu and not args.cpu: + tpu_launcher(args) + elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: + sagemaker_launcher(defaults, args) + else: + simple_launcher(args) + + +def main(): + parser = launch_command_parser() + args = parser.parse_args() + launch_command(args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/menu/__init__.py b/testbed/huggingface__accelerate/src/accelerate/commands/menu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ec17fba58159837dfd36d11ec47cdc0e58eea2e4 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/menu/__init__.py @@ -0,0 +1,5 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all + +from .selection_menu import BulletMenu diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/menu/cursor.py b/testbed/huggingface__accelerate/src/accelerate/commands/menu/cursor.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f0bb7b68025ae4fe0c2c76c095eb36b4e64f2c --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/menu/cursor.py @@ -0,0 +1,65 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet +""" + +import os +import sys +from contextlib import contextmanager + + +# Windows only +if os.name == "nt": + import ctypes + import msvcrt # noqa + + class CursorInfo(ctypes.Structure): + # _fields is a specific attr expected by ctypes + _fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] + + +def hide_cursor(): + if os.name == "nt": + ci = CursorInfo() + handle = ctypes.windll.kernel32.GetStdHandle(-11) + ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) + ci.visible = False + ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) + elif os.name == "posix": + sys.stdout.write("\033[?25l") + sys.stdout.flush() + + +def show_cursor(): + if os.name == "nt": + ci = CursorInfo() + handle = ctypes.windll.kernel32.GetStdHandle(-11) + ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) + ci.visible = True + ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) + elif os.name == "posix": + sys.stdout.write("\033[?25h") + sys.stdout.flush() + + +@contextmanager +def hide(): + "Context manager to hide the terminal cursor" + try: + hide_cursor() + yield + finally: + show_cursor() diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/menu/helpers.py b/testbed/huggingface__accelerate/src/accelerate/commands/menu/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..de46f37ddcf4591167e3e01791391e4b1729034f --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/menu/helpers.py @@ -0,0 +1,59 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A variety of helper functions and constants when dealing with terminal menu choices, based on +https://github.com/bchao1/bullet +""" + +import enum +import shutil +import sys + + +TERMINAL_WIDTH, _ = shutil.get_terminal_size() + +CURSOR_TO_CHAR = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"} + + +class Direction(enum.Enum): + UP = 0 + DOWN = 1 + + +def forceWrite(content, end=""): + sys.stdout.write(str(content) + end) + sys.stdout.flush() + + +def writeColor(content, color, end=""): + forceWrite(f"\u001b[{color}m{content}\u001b[0m", end) + + +def reset_cursor(): + forceWrite("\r") + + +def move_cursor(num_lines: int, direction: str): + forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}") + + +def clear_line(): + forceWrite(" " * TERMINAL_WIDTH) + reset_cursor() + + +def linebreak(): + reset_cursor() + forceWrite("-" * TERMINAL_WIDTH) diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/menu/input.py b/testbed/huggingface__accelerate/src/accelerate/commands/menu/input.py new file mode 100644 index 0000000000000000000000000000000000000000..266f7e7dba33e045073f935fb3a16e4eef1ddf8a --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/menu/input.py @@ -0,0 +1,86 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This file contains utilities for handling input from the user and registering specific keys to specific functions, +based on https://github.com/bchao1/bullet +""" + +from typing import List + +from .keymap import KEYMAP, get_character + + +def mark(key: str): + """ + Mark the function with the key code so it can be handled in the register + """ + + def decorator(func): + handle = getattr(func, "handle_key", []) + handle += [key] + setattr(func, "handle_key", handle) + return func + + return decorator + + +def mark_multiple(*keys: List[str]): + """ + Mark the function with the key codes so it can be handled in the register + """ + + def decorator(func): + handle = getattr(func, "handle_key", []) + handle += keys + setattr(func, "handle_key", handle) + return func + + return decorator + + +class KeyHandler(type): + """ + Metaclass that adds the key handlers to the class + """ + + def __new__(cls, name, bases, attrs): + new_cls = super().__new__(cls, name, bases, attrs) + if not hasattr(new_cls, "key_handler"): + setattr(new_cls, "key_handler", {}) + setattr(new_cls, "handle_input", KeyHandler.handle_input) + + for value in attrs.values(): + handled_keys = getattr(value, "handle_key", []) + for key in handled_keys: + new_cls.key_handler[key] = value + return new_cls + + @staticmethod + def handle_input(cls): + "Finds and returns the selected character if it exists in the handler" + char = get_character() + if char != KEYMAP["undefined"]: + char = ord(char) + handler = cls.key_handler.get(char) + if handler: + cls.current_selection = char + return handler(cls) + else: + return None + + +def register(cls): + """Adds KeyHandler metaclass to the class""" + return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy()) diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/menu/keymap.py b/testbed/huggingface__accelerate/src/accelerate/commands/menu/keymap.py new file mode 100644 index 0000000000000000000000000000000000000000..3f08236b32d3eb694a98e65de52d0699d60f6835 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/menu/keymap.py @@ -0,0 +1,133 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet +""" + + +import os +import string +import sys + + +ARROW_KEY_FLAG = 1 << 8 + +KEYMAP = { + "tab": ord("\t"), + "newline": ord("\r"), + "esc": 27, + "up": 65 + ARROW_KEY_FLAG, + "down": 66 + ARROW_KEY_FLAG, + "right": 67 + ARROW_KEY_FLAG, + "left": 68 + ARROW_KEY_FLAG, + "mod_int": 91, + "undefined": sys.maxsize, + "interrupt": 3, + "insert": 50, + "delete": 51, + "pg_up": 53, + "pg_down": 54, +} + +KEYMAP["arrow_begin"] = KEYMAP["up"] +KEYMAP["arrow_end"] = KEYMAP["left"] + +if sys.platform == "win32": + WIN_CH_BUFFER = [] + WIN_KEYMAP = { + b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG, + b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG, + b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG, + b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG, + b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG, + b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG, + b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG, + b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG, + } + +for i in range(10): + KEYMAP[str(i)] = ord(str(i)) + + +def get_raw_chars(): + "Gets raw characters from inputs" + if os.name == "nt": + import msvcrt + + encoding = "mbcs" + # Flush the keyboard buffer + while msvcrt.kbhit(): + msvcrt.getwch() + if len(WIN_CH_BUFFER) == 0: + # Read the keystroke + ch = msvcrt.getwch() + # If it is a prefix char, get second part + if ch.encode(encoding) in (b"\x00", b"\xe0"): + ch2 = ch + msvcrt.getwch() + # Translate actual Win chars to bullet char types + try: + chx = chr(WIN_KEYMAP[ch2.encode(encoding)]) + WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"])) + WIN_CH_BUFFER.append(chx) + if ord(chx) in ( + KEYMAP["insert"] - 1 << 9, + KEYMAP["delete"] - 1 << 9, + KEYMAP["pg_up"] - 1 << 9, + KEYMAP["pg_down"] - 1 << 9, + ): + WIN_CH_BUFFER.append(chr(126)) + ch = chr(KEYMAP["esc"]) + except KeyError: + ch = ch2[1] + else: + pass + else: + ch = WIN_CH_BUFFER.pop(0) + elif os.name == "posix": + import termios + import tty + + fd = sys.stdin.fileno() + old_settings = termios.tcgetattr(fd) + try: + tty.setraw(fd) + ch = sys.stdin.read(1) + finally: + termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) + return ch + + +def get_character(): + "Gets a character from the keyboard and returns the key code" + char = get_raw_chars() + if ord(char) in [KEYMAP["interrupt"], KEYMAP["newline"]]: + return char + + elif ord(char) == KEYMAP["esc"]: + combo = get_raw_chars() + if ord(combo) == KEYMAP["mod_int"]: + key = get_raw_chars() + if ord(key) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: + return chr(ord(key) + ARROW_KEY_FLAG) + else: + return KEYMAP["undefined"] + else: + return get_raw_chars() + + else: + if char in string.printable: + return char + else: + return KEYMAP["undefined"] diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/menu/selection_menu.py b/testbed/huggingface__accelerate/src/accelerate/commands/menu/selection_menu.py new file mode 100644 index 0000000000000000000000000000000000000000..d5a1c8e35a2e6a99294dedcbcf37ddafa86486ad --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/menu/selection_menu.py @@ -0,0 +1,125 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Main driver for the selection menu, based on https://github.com/bchao1/bullet +""" +import sys + +from . import cursor, input +from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor +from .keymap import KEYMAP + + +@input.register +class BulletMenu: + """ + A CLI menu to select a choice from a list of choices using the keyboard. + """ + + def __init__(self, prompt: str = None, choices: list = []): + self.position = 0 + self.choices = choices + self.prompt = prompt + if sys.platform == "win32": + self.arrow_char = "*" + else: + self.arrow_char = "➔ " + + def write_choice(self, index, end: str = ""): + if sys.platform != "win32": + writeColor(self.choices[index], 32, end) + else: + forceWrite(self.choices[index], end) + + def print_choice(self, index: int): + "Prints the choice at the given index" + if index == self.position: + forceWrite(f" {self.arrow_char} ") + self.write_choice(index) + else: + forceWrite(f" {self.choices[index]}") + reset_cursor() + + def move_direction(self, direction: Direction, num_spaces: int = 1): + "Should not be directly called, used to move a direction of either up or down" + old_position = self.position + if direction == Direction.DOWN: + if self.position + 1 >= len(self.choices): + return + self.position += num_spaces + else: + if self.position - 1 < 0: + return + self.position -= num_spaces + clear_line() + self.print_choice(old_position) + move_cursor(num_spaces, direction.name) + self.print_choice(self.position) + + @input.mark(KEYMAP["up"]) + def move_up(self): + self.move_direction(Direction.UP) + + @input.mark(KEYMAP["down"]) + def move_down(self): + self.move_direction(Direction.DOWN) + + @input.mark(KEYMAP["newline"]) + def select(self): + move_cursor(len(self.choices) - self.position, "DOWN") + return self.position + + @input.mark(KEYMAP["interrupt"]) + def interrupt(self): + move_cursor(len(self.choices) - self.position, "DOWN") + raise KeyboardInterrupt + + @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)]) + def select_row(self): + index = int(chr(self.current_selection)) + movement = index - self.position + if index == self.position: + return + if index < len(self.choices): + if self.position > index: + self.move_direction(Direction.UP, -movement) + elif self.position < index: + self.move_direction(Direction.DOWN, movement) + else: + return + else: + return + + def run(self, default_choice: int = 0): + "Start the menu and return the selected choice" + if self.prompt: + linebreak() + forceWrite(self.prompt, "\n") + forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n") + self.position = default_choice + for i in range(len(self.choices)): + self.print_choice(i) + forceWrite("\n") + move_cursor(len(self.choices) - self.position, "UP") + with cursor.hide(): + while True: + choice = self.handle_input() + if choice is not None: + reset_cursor() + for _ in range(len(self.choices) + 1): + move_cursor(1, "UP") + clear_line() + self.write_choice(choice, "\n") + return choice diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/test.py b/testbed/huggingface__accelerate/src/accelerate/commands/test.py new file mode 100644 index 0000000000000000000000000000000000000000..41da7559679995bfdda106750c9e8c7112010a10 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/test.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os + +from accelerate.test_utils import execute_subprocess_async + + +def test_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("test") + else: + parser = argparse.ArgumentParser("Accelerate test command") + + parser.add_argument( + "--config_file", + default=None, + help=( + "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " + "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " + "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " + "with 'huggingface'." + ), + ) + + if subparsers is not None: + parser.set_defaults(func=test_command) + return parser + + +def test_command(args): + script_name = os.path.sep.join(__file__.split(os.path.sep)[:-2] + ["test_utils", "scripts", "test_script.py"]) + + test_args = f""" + --config_file={args.config_file} {script_name} + """.split() + cmd = ["accelerate-launch"] + test_args + result = execute_subprocess_async(cmd, env=os.environ.copy()) + if result.returncode == 0: + print("Test is a success! You are ready for your distributed training!") + + +def main(): + parser = test_command_parser() + args = parser.parse_args() + test_command(args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/src/accelerate/commands/tpu.py b/testbed/huggingface__accelerate/src/accelerate/commands/tpu.py new file mode 100644 index 0000000000000000000000000000000000000000..6b90770c750ec0190da72d6dff417866dd391a51 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/commands/tpu.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import subprocess + +from accelerate.commands.config.config_args import default_config_file, load_config_from_file +from packaging.version import Version, parse + + +_description = "Run commands across TPU VMs for initial setup before running `accelerate launch`." + + +def tpu_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("tpu-config", description=_description) + else: + parser = argparse.ArgumentParser("Accelerate tpu-config command", description=_description) + # Core arguments + config_args = parser.add_argument_group( + "Config Arguments", "Arguments that can be configured through `accelerate config`." + ) + config_args.add_argument( + "--config_file", + type=str, + default=None, + help="Path to the config file to use for accelerate.", + ) + config_args.add_argument( + "--tpu_name", + default=None, + help="The name of the TPU to use. If not specified, will use the TPU specified in the config file.", + ) + config_args.add_argument( + "--tpu_zone", + default=None, + help="The zone of the TPU to use. If not specified, will use the zone specified in the config file.", + ) + pod_args = parser.add_argument_group("TPU Arguments", "Arguments for options ran inside the TPU.") + pod_args.add_argument( + "--command_file", + default=None, + help="The path to the file containing the commands to run on the pod on startup.", + ) + pod_args.add_argument( + "--command", + action="append", + nargs="+", + help="A command to run on the pod. Can be passed multiple times.", + ) + pod_args.add_argument( + "--install_accelerate", + action="store_true", + help="Whether to install accelerate on the pod. Defaults to False.", + ) + pod_args.add_argument( + "--accelerate_version", + default="latest", + help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.", + ) + pod_args.add_argument( + "--debug", action="store_true", help="If set, will print the command that would be run instead of running it." + ) + + if subparsers is not None: + parser.set_defaults(func=tpu_command_launcher) + return parser + + +def tpu_command_launcher(args): + defaults = None + + # Get the default from the config file if it exists. + if args.config_file is not None or os.path.isfile(default_config_file): + defaults = load_config_from_file(args.config_file) + if not args.command_file and defaults.command_file is not None and not args.command: + args.command_file = defaults.command_file + if not args.command and defaults.commands is not None: + args.command = defaults.commands + if not args.tpu_name: + args.tpu_name = defaults.tpu_name + if not args.tpu_zone: + args.tpu_zone = defaults.tpu_zone + if args.accelerate_version == "dev": + args.accelerate_version = "git+https://github.com/huggingface/accelerate.git" + elif args.accelerate_version == "latest": + args.accelerate_version = "accelerate -U" + elif isinstance(parse(args.accelerate_version), Version): + args.accelerate_version = f"accelerate=={args.accelerate_version}" + + if not args.command_file and not args.command: + raise ValueError("You must specify either a command file or a command to run on the pod.") + + if args.command_file: + with open(args.command_file, "r") as f: + args.command = [f.read().splitlines()] + + # To turn list of lists into list of strings + if isinstance(args.command[0], list): + args.command = [line for cmd in args.command for line in cmd] + # Default to the shared folder and install accelerate + new_cmd = ["cd /usr/share"] + if args.install_accelerate: + new_cmd += [f"pip install {args.accelerate_version}"] + new_cmd += args.command + args.command = "; ".join(new_cmd) + + # Then send it to gcloud + # Eventually try to use google-api-core to do this instead of subprocess + cmd = [ + "gcloud", + "compute", + "tpus", + "tpu-vm", + "ssh", + args.tpu_name, + "--zone", + args.tpu_zone, + "--command", + args.command, + "--worker", + "all", + ] + if args.debug: + print(f"Running {' '.join(cmd)}") + return + subprocess.run(cmd) + print("Successfully setup pod.") + + +def main(): + parser = tpu_command_parser() + args = parser.parse_args() + + tpu_command_launcher(args) diff --git a/testbed/huggingface__accelerate/src/accelerate/data_loader.py b/testbed/huggingface__accelerate/src/accelerate/data_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..7dafed0e5e9835b6855542ff9e3693717e5f4b74 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/data_loader.py @@ -0,0 +1,749 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from contextlib import suppress +from typing import List, Optional, Union + +import torch +from torch.utils.data import BatchSampler, DataLoader, IterableDataset + +from .logging import get_logger +from .state import AcceleratorState, DistributedType, GradientState, is_tpu_available +from .utils import ( + RNGType, + broadcast, + broadcast_object_list, + concatenate, + find_batch_size, + get_data_structure, + initialize_tensors, + is_torch_version, + send_to_device, + slice_tensors, + synchronize_rng_states, +) + + +if is_tpu_available(check_device=False): + import torch_xla.distributed.parallel_loader as xpl + + class MpDeviceLoaderWrapper(xpl.MpDeviceLoader): + """ + Wrapper for the xpl.MpDeviceLoader class that knows the total batch size. + + **Available attributes:** + + - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. + Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total + number of processes + + - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. + """ + + @property + def total_batch_size(self): + return self._loader.total_batch_size + + @property + def total_dataset_length(self): + return self._loader.total_dataset_length + + +logger = get_logger(__name__) + +# kwargs of the DataLoader in min version 1.4.0. +_PYTORCH_DATALOADER_KWARGS = { + "batch_size": 1, + "shuffle": False, + "sampler": None, + "batch_sampler": None, + "num_workers": 0, + "collate_fn": None, + "pin_memory": False, + "drop_last": False, + "timeout": 0, + "worker_init_fn": None, + "multiprocessing_context": None, + "generator": None, +} + +# kwargs added after by version +_PYTORCH_DATALOADER_ADDITIONAL_KWARGS = { + "1.7.0": {"prefetch_factor": 2, "persistent_workers": False}, +} + +for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items(): + if is_torch_version(">=", v): + _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs) + + +class BatchSamplerShard(BatchSampler): + """ + Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will + always yield a number of batches that is a round multiple of `num_processes` and that all have the same size. + Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration + at the first batch that would be too small / not present on all processes or loop with indices from the beginning. + + Args: + batch_sampler (`torch.utils.data.sampler.BatchSampler`): + The batch sampler to split in several shards. + num_processes (`int`, *optional*, defaults to 1): + The number of processes running concurrently. + process_index (`int`, *optional*, defaults to 0): + The index of the current process. + split_batches (`bool`, *optional*, defaults to `False`): + Whether the shards should be created by splitting a batch to give a piece of it on each process, or by + yielding different full batches on each process. + + On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in: + + - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if + this argument is set to `False`. + - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]` + then `[6, 7]` if this argument is set to `True`. + even_batches (`bool`, *optional*, defaults to `True`): + Whether or not to loop back at the beginning of the sampler when the number of samples is not a round + multiple of (original batch size / number of processes). + + + + `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` + equal to `False` + + """ + + def __init__( + self, + batch_sampler: BatchSampler, + num_processes: int = 1, + process_index: int = 0, + split_batches: bool = False, + even_batches: bool = True, + ): + if split_batches and batch_sampler.batch_size % num_processes != 0: + raise ValueError( + f"To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) " + f"needs to be a round multiple of the number of processes ({num_processes})." + ) + self.batch_sampler = batch_sampler + self.num_processes = num_processes + self.process_index = process_index + self.split_batches = split_batches + self.even_batches = even_batches + self.batch_size = getattr(batch_sampler, "batch_size", None) + self.drop_last = getattr(batch_sampler, "drop_last", False) + if self.batch_size is None and self.even_batches: + raise ValueError("You need to use `even_batches=False` when the batch sampler has no batch size.") + + @property + def total_length(self): + return len(self.batch_sampler) + + def __len__(self): + if self.split_batches: + # Split batches does not change the length of the batch sampler + return len(self.batch_sampler) + if len(self.batch_sampler) % self.num_processes == 0: + # If the length is a round multiple of the number of processes, it's easy. + return len(self.batch_sampler) // self.num_processes + length = len(self.batch_sampler) // self.num_processes + if self.drop_last: + # Same if we drop the remainder. + return length + elif self.even_batches: + # When we even batches we always get +1 + return length + 1 + else: + # Otherwise it depends on the process index. + return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length + + def __iter__(self): + return self._iter_with_split() if self.split_batches else self._iter_with_no_split() + + def _iter_with_split(self): + initial_data = [] + batch_length = self.batch_sampler.batch_size // self.num_processes + for idx, batch in enumerate(self.batch_sampler): + if idx == 0: + initial_data = batch + if len(batch) == self.batch_size: + # If the batch is full, we yield the part of it this process is responsible of. + yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] + + # If drop_last is True of the last batch was full, iteration is over, otherwise... + if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size: + if not self.even_batches: + if len(batch) > batch_length * self.process_index: + yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] + else: + # For degenerate cases where the dataset has less than num_process * batch_size samples + while len(initial_data) < self.batch_size: + initial_data += initial_data + batch = batch + initial_data + yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] + + def _iter_with_no_split(self): + initial_data = [] + batch_to_yield = [] + for idx, batch in enumerate(self.batch_sampler): + # We gather the initial indices in case we need to circle back at the end. + if not self.drop_last and idx < self.num_processes: + initial_data += batch + # We identify the batch to yield but wait until we ar sure every process gets a full batch before actually + # yielding it. + if idx % self.num_processes == self.process_index: + batch_to_yield = batch + if idx % self.num_processes == self.num_processes - 1 and ( + self.batch_size is None or len(batch) == self.batch_size + ): + yield batch_to_yield + batch_to_yield = [] + + # If drop_last is True, iteration is over, otherwise... + if not self.drop_last and len(initial_data) > 0: + if not self.even_batches: + if len(batch_to_yield) > 0: + yield batch_to_yield + else: + # ... we yield the complete batch we had saved before if it has the proper length + if len(batch_to_yield) == self.batch_size: + yield batch_to_yield + + # For degenerate cases where the dataset has less than num_process * batch_size samples + while len(initial_data) < self.num_processes * self.batch_size: + initial_data += initial_data + + # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next + if len(batch) == self.batch_size: + batch = [] + idx += 1 + + # Make sure we yield a multiple of self.num_processes batches + cycle_index = 0 + while idx % self.num_processes != 0 or len(batch) > 0: + end_index = cycle_index + self.batch_size - len(batch) + batch += initial_data[cycle_index:end_index] + if idx % self.num_processes == self.process_index: + yield batch + cycle_index = end_index + batch = [] + idx += 1 + + +class IterableDatasetShard(IterableDataset): + """ + Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will + always yield a number of samples that is a round multiple of the actual batch size (depending of the value of + `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the + `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would + be too small or loop with indices from the beginning. + + Args: + dataset (`torch.utils.data.dataset.IterableDataset`): + The batch sampler to split in several shards. + batch_size (`int`, *optional*, defaults to 1): + The size of the batches per shard (if `split_batches=False`) or the size of the batches (if + `split_batches=True`). + drop_last (`bool`, *optional*, defaults to `False`): + Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the + beginning. + num_processes (`int`, *optional*, defaults to 1): + The number of processes running concurrently. + process_index (`int`, *optional*, defaults to 0): + The index of the current process. + split_batches (`bool`, *optional*, defaults to `False`): + Whether the shards should be created by splitting a batch to give a piece of it on each process, or by + yielding different full batches on each process. + + On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in: + + - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this + argument is set to `False`. + - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if + this argument is set to `True`. + """ + + def __init__( + self, + dataset: IterableDataset, + batch_size: int = 1, + drop_last: bool = False, + num_processes: int = 1, + process_index: int = 0, + split_batches: bool = False, + ): + if split_batches and batch_size > 1 and batch_size % num_processes != 0: + raise ValueError( + f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) " + f"needs to be a round multiple of the number of processes ({num_processes})." + ) + self.dataset = dataset + self.batch_size = batch_size + self.drop_last = drop_last + self.num_processes = num_processes + self.process_index = process_index + self.split_batches = split_batches + + def __iter__(self): + real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes) + process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size + process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size) + + first_batch = None + current_batch = [] + for element in self.dataset: + current_batch.append(element) + # Wait to have a full batch before yielding elements. + if len(current_batch) == real_batch_size: + for i in process_slice: + yield current_batch[i] + if first_batch is None: + first_batch = current_batch.copy() + current_batch = [] + + # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning. + if not self.drop_last and len(current_batch) > 0: + if first_batch is None: + first_batch = current_batch.copy() + while len(current_batch) < real_batch_size: + current_batch += first_batch + for i in process_slice: + yield current_batch[i] + + +class DataLoaderShard(DataLoader): + """ + Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup. + + Args: + dataset (`torch.utils.data.dataset.Dataset`): + The dataset to use to build this datalaoder. + device (`torch.device`, *optional*): + If passed, the device to put all batches on. + rng_types (list of `str` or [`~utils.RNGType`]): + The list of random number generators to synchronize at the beginning of each iteration. Should be one or + several of: + + - `"torch"`: the base torch random number generator + - `"cuda"`: the CUDA random number generator (GPU only) + - `"xla"`: the XLA random number generator (TPU only) + - `"generator"`: an optional `torch.Generator` + generator (`torch.Generator`, *optional*): + A random number generator to keep synchronized across processes. + kwargs: + All other keyword arguments to pass to the regular `DataLoader` initialization. + + **Available attributes:** + + - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. + Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total + number of processes + + - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. + """ + + def __init__(self, dataset, device=None, rng_types=None, synchronized_generator=None, **kwargs): + super().__init__(dataset, **kwargs) + self.device = device + self.rng_types = rng_types + self.synchronized_generator = synchronized_generator + self.gradient_state = GradientState() + + def __iter__(self): + if self.rng_types is not None: + synchronize_rng_states(self.rng_types, self.synchronized_generator) + self.gradient_state._set_end_of_dataloader(False) + # We can safely pass because the default is -1 + with suppress(Exception): + length = getattr(self.dataset, "total_dataset_length", len(self.dataset)) + self.gradient_state._set_remainder(length % self.total_batch_size) + dataloader_iter = super().__iter__() + # We iterate one batch ahead to check when we are at the end + try: + current_batch = next(dataloader_iter) + except StopIteration: + yield + while True: + try: + # But we still move it to the device so it is done before `StopIteration` is reached + if self.device is not None: + current_batch = send_to_device(current_batch, self.device) + next_batch = next(dataloader_iter) + yield current_batch + current_batch = next_batch + except StopIteration: + self.gradient_state._set_end_of_dataloader(True) + yield current_batch + break + + @property + def total_batch_size(self): + batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler + return ( + batch_sampler.batch_size + if batch_sampler.split_batches + else (batch_sampler.batch_size * batch_sampler.num_processes) + ) + + @property + def total_dataset_length(self): + if hasattr("total_length", self.dataset): + return self.dataset.total_length + else: + return len(self.dataset) + + +class DataLoaderDispatcher(DataLoader): + """ + Args: + Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each + process their part of the batch. + split_batches (`bool`, *optional*, defaults to `False`): + Whether the resulting `DataLoader` should split the batches of the original data loader across devices or + yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of + `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be + the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial + `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch + size of the `dataloader` is a round multiple of `batch_size`. + + **Available attributes:** + + - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. + Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total + number of processes + + - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. + """ + + def __init__(self, dataset, split_batches: bool = False, _drop_last: bool = False, **kwargs): + shuffle = False + if is_torch_version(">=", "1.11.0"): + from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe + + # We need to save the shuffling state of the DataPipe + if isinstance(dataset, ShufflerIterDataPipe): + shuffle = dataset._shuffle_enabled + super().__init__(dataset, **kwargs) + self.split_batches = split_batches + if is_torch_version("<", "1.8.0"): + raise ImportError( + f"Using `DataLoaderDispatcher` requires PyTorch 1.8.0 minimum. You have {torch.__version__}." + ) + if shuffle: + torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle) + + self.gradient_state = GradientState() + self.state = AcceleratorState() + self._drop_last = _drop_last + # We can safely pass because the default is -1 + with suppress(Exception): + length = getattr(self.dataset, "total_dataset_length", len(self.dataset)) + self.gradient_state._set_remainder(length % self.total_batch_size) + + def _fetch_batches(self, iterator): + batches, batch = None, None + # On process 0, we gather the batch to dispatch. + if self.state.process_index == 0: + try: + if self.split_batches: + # One batch of the main iterator is dispatched and split. + batch = next(iterator) + else: + # num_processes batches of the main iterator are concatenated then dispatched and split. + # We add the batches one by one so we have the remainder available when drop_last=False. + batches = [] + for _ in range(self.state.num_processes): + batches.append(next(iterator)) + batch = concatenate(batches, dim=0) + # In both cases, we need to get the structure of the batch that we will broadcast on other + # processes to initialize the tensors with the right shape. + # data_structure, stop_iteration + batch_info = [get_data_structure(batch), False] + except StopIteration: + batch_info = [None, True] + else: + batch_info = [None, self._stop_iteration] + # This is inplace, so after this instruction, every process has the same `batch_info` as process 0. + broadcast_object_list(batch_info) + self._stop_iteration = batch_info[1] + if self._stop_iteration: + # If drop_last is False and split_batches is False, we may have a remainder to take care of. + if not self.split_batches and not self._drop_last: + if self.state.process_index == 0 and len(batches) > 0: + batch = concatenate(batches, dim=0) + batch_info = [get_data_structure(batch), False] + else: + batch_info = [None, True] + broadcast_object_list(batch_info) + return batch, batch_info + + def __iter__(self): + self.gradient_state._set_end_of_dataloader(False) + main_iterator = None + if self.state.process_index == 0: + # We only iterate through the DataLoader on process 0. + main_iterator = super().__iter__() + stop_iteration = False + self._stop_iteration = False + first_batch = None + next_batch, next_batch_info = self._fetch_batches(main_iterator) + while not stop_iteration: + batch, batch_info = next_batch, next_batch_info + + if self.state.process_index != 0: + # Initialize tensors on other processes than process 0. + batch = initialize_tensors(batch_info[0]) + batch = send_to_device(batch, self.state.device) + # Broadcast the batch before splitting it. + batch = broadcast(batch, from_process=0) + + if not self._drop_last and first_batch is None: + # We keep at least num processes elements of the first batch to be able to complete the last batch + first_batch = slice_tensors(batch, slice(0, self.state.num_processes)) + + observed_batch_size = find_batch_size(batch) + batch_size = observed_batch_size // self.state.num_processes + + stop_iteration = self._stop_iteration + if not stop_iteration: + # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in + # the dataloader since the number of batches is a round multiple of the number of processes. + next_batch, next_batch_info = self._fetch_batches(main_iterator) + # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them. + if self._stop_iteration and next_batch_info[0] is None: + stop_iteration = True + + if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0: + # If the last batch is not complete, let's add the first batch to it. + batch = concatenate([batch, first_batch], dim=0) + # Batch size computation above is wrong, it's off by 1 so we fix it. + batch_size += 1 + + data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size) + batch = slice_tensors(batch, data_slice) + + if stop_iteration: + self.gradient_state._set_remainder(observed_batch_size) + self.gradient_state._set_end_of_dataloader(True) + yield batch + + def __len__(self): + whole_length = super().__len__() + if self.split_batches: + return whole_length + elif self._drop_last: + return whole_length // self.state.num_processes + else: + return math.ceil(whole_length / self.state.num_processes) + + @property + def total_batch_size(self): + return ( + self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes) + ) + + @property + def total_dataset_length(self): + return len(self.dataset) + + +def prepare_data_loader( + dataloader: DataLoader, + device: Optional[torch.device] = None, + num_processes: Optional[int] = None, + process_index: Optional[int] = None, + split_batches: bool = False, + put_on_device: bool = False, + rng_types: Optional[List[Union[str, RNGType]]] = None, + dispatch_batches: Optional[bool] = None, + even_batches: bool = True, +) -> DataLoader: + """ + Wraps a PyTorch `DataLoader` to generate batches for one of the processes only. + + Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration + at the first batch that would be too small / not present on all processes or loop with indices from the beginning. + + Args: + dataloader (`torch.utils.data.dataloader.DataLoader`): + The data loader to split across several devices. + device (`torch.device`): + The target device for the returned `DataLoader`. + num_processes (`int`, *optional*): + The number of processes running concurrently. Will default to the value given by + [`~state.AcceleratorState`]. + process_index (`int`, *optional*): + The index of the current process. Will default to the value given by [`~state.AcceleratorState`]. + split_batches (`bool`, *optional*, defaults to `False`): + Whether the resulting `DataLoader` should split the batches of the original data loader across devices or + yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of + `num_processes` batches at each iteration). + + Another way to see this is that the observed batch size will be the same as the initial `dataloader` if + this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes` + otherwise. + + Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of + `batch_size`. + put_on_device (`bool`, *optional*, defaults to `False`): + Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or + dictionaries of tensors). + rng_types (list of `str` or [`~utils.RNGType`]): + The list of random number generators to synchronize at the beginning of each iteration. Should be one or + several of: + + - `"torch"`: the base torch random number generator + - `"cuda"`: the CUDA random number generator (GPU only) + - `"xla"`: the XLA random number generator (TPU only) + - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your + dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. + + dispatch_batches (`bool`, *optional*): + If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches + are split and broadcast to each process. Will default to `True` when the underlying dataset is an + `IterableDataset`, `False` otherwise. + even_batches (`bool`, *optional*, defaults to `True`): + If set to `True`, in cases where the total batch size across all processes does not exactly divide the + dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among + all workers. + + Returns: + `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches + + + + `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` + equal to `False` + + + """ + if dispatch_batches is None: + if is_torch_version("<", "1.8.0") or not put_on_device: + dispatch_batches = False + else: + dispatch_batches = isinstance(dataloader.dataset, IterableDataset) + + if dispatch_batches and not put_on_device: + raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.") + # Grab defaults from AcceleratorState + state = AcceleratorState() + if num_processes is None: + num_processes = state.num_processes + if process_index is None: + process_index = state.process_index + + # Sanity check + if split_batches and dataloader.batch_size > 1 and dataloader.batch_size % num_processes != 0: + raise ValueError( + f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) " + f"needs to be a round multiple of the number of processes ({num_processes})." + ) + + new_dataset = dataloader.dataset + # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it + new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None + sampler_is_batch_sampler = False + synchronized_generator = None + # No change if no multiprocess + if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches: + if isinstance(new_dataset, IterableDataset): + if getattr(dataloader.dataset, "generator", None) is not None: + synchronized_generator = dataloader.dataset.generator + new_dataset = IterableDatasetShard( + new_dataset, + batch_size=dataloader.batch_size, + drop_last=dataloader.drop_last, + num_processes=num_processes, + process_index=process_index, + split_batches=split_batches, + ) + else: + # New batch sampler for the current process. + sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) + if sampler_is_batch_sampler: + sampler = dataloader.sampler.sampler + else: + sampler = dataloader.batch_sampler.sampler + if hasattr(sampler, "generator"): + if sampler.generator is None: + sampler.generator = torch.Generator() + synchronized_generator = sampler.generator + + batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler + new_batch_sampler = BatchSamplerShard( + batch_sampler, + num_processes=num_processes, + process_index=process_index, + split_batches=split_batches, + even_batches=even_batches, + ) + + # We ignore all of those since they are all dealt with by our new_batch_sampler + ignore_kwargs = [ + "batch_size", + "shuffle", + "sampler", + "batch_sampler", + "drop_last", + ] + + if rng_types is not None and synchronized_generator is None and "generator" in rng_types: + rng_types.remove("generator") + + kwargs = { + k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) + for k in _PYTORCH_DATALOADER_KWARGS + if k not in ignore_kwargs + } + + # Need to provide batch_size as batch_sampler is None for Iterable dataset + if new_batch_sampler is None: + kwargs["drop_last"] = dataloader.drop_last + kwargs["batch_size"] = ( + dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size + ) + + if dispatch_batches: + kwargs.pop("generator") + dataloader = DataLoaderDispatcher( + new_dataset, + split_batches=split_batches, + batch_sampler=new_batch_sampler, + _drop_last=dataloader.drop_last, + **kwargs, + ) + elif sampler_is_batch_sampler: + dataloader = DataLoaderShard( + new_dataset, + device=device if put_on_device and state.distributed_type != DistributedType.TPU else None, + sampler=new_batch_sampler, + batch_size=dataloader.batch_size, + rng_types=rng_types, + synchronized_generator=synchronized_generator, + **kwargs, + ) + else: + dataloader = DataLoaderShard( + new_dataset, + device=device if put_on_device and state.distributed_type != DistributedType.TPU else None, + batch_sampler=new_batch_sampler, + rng_types=rng_types, + synchronized_generator=synchronized_generator, + **kwargs, + ) + + if state.distributed_type == DistributedType.TPU: + return MpDeviceLoaderWrapper(dataloader, device) + return dataloader diff --git a/testbed/huggingface__accelerate/src/accelerate/hooks.py b/testbed/huggingface__accelerate/src/accelerate/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..ef429a36900f09e63898792ba25f5169676f1a0f --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/hooks.py @@ -0,0 +1,494 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +from typing import Dict, List, Mapping, Optional, Union + +import torch +import torch.nn as nn + +from .utils import PrefixedDataset, find_device, named_module_tensors, send_to_device, set_module_tensor_to_device + + +class ModelHook: + """ + A hook that contains callbacks to be executed just before and after the forward method of a model. The difference + with PyTorch existing hooks is that they get passed along the kwargs. + + Class attribute: + - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under + the `torch.no_grad()` context manager. + """ + + no_grad = False + + def init_hook(self, module): + """ + To be executed when the hook is attached to the module. + + Args: + module (`torch.nn.Module`): The module attached to this hook. + """ + return module + + def pre_forward(self, module, *args, **kwargs): + """ + To be executed just before the forward method of the model. + + Args: + module (`torch.nn.Module`): The module whose forward pass will be executed just after this event. + args (`Tuple[Any]`): The positional arguments passed to the module. + kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module. + + Returns: + `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`. + """ + return args, kwargs + + def post_forward(self, module, output): + """ + To be executed just after the forward method of the model. + + Args: + module (`torch.nn.Module`): The module whose forward pass been executed just before this event. + output (`Any`): The output of the module. + + Returns: + `Any`: The processed `output`. + """ + return output + + def detach_hook(self, module): + """ + To be executed when the hook is detached from a module. + + Args: + module (`torch.nn.Module`): The module detached from this hook. + """ + return module + + +class SequentialHook(ModelHook): + """ + A hook that can contain several hooks and iterates through them at each event. + """ + + def __init__(self, *hooks): + self.hooks = hooks + + def init_hook(self, module): + for hook in self.hooks: + module = hook.init_hook(module) + return module + + def pre_forward(self, module, *args, **kwargs): + for hook in self.hooks: + args, kwargs = hook.pre_forward(module, *args, **kwargs) + return args, kwargs + + def post_forward(self, module, output): + for hook in self.hooks: + output = hook.post_forward(module, output) + return output + + def detach_hook(self, module): + for hook in self.hooks: + module = hook.detach_hook(module) + return module + + +def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False): + """ + Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove + this behavior and restore the original `forward` method, use `remove_hook_from_module`. + + + + If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks + together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class. + + + + Args: + module (`torch.nn.Module`): The module to attach a hook to. + hook (`ModelHook`): The hook to attach. + append (`bool`, *optional*, defaults to `False`): + Whether the hook should be chained with an existing one (if module already contains a hook) or not. + + Returns: + `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can + be discarded). + """ + + if append and (getattr(module, "_hf_hook", None) is not None): + old_hook = module._hf_hook + remove_hook_from_module(module) + hook = SequentialHook(old_hook, hook) + + if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"): + # If we already put some hook on this module, we replace it with the new one. + old_forward = module._old_forward + else: + old_forward = module.forward + module._old_forward = old_forward + + module = hook.init_hook(module) + module._hf_hook = hook + + @functools.wraps(old_forward) + def new_forward(*args, **kwargs): + args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs) + if module._hf_hook.no_grad: + with torch.no_grad(): + output = old_forward(*args, **kwargs) + else: + output = old_forward(*args, **kwargs) + return module._hf_hook.post_forward(module, output) + + module.forward = new_forward + return module + + +def remove_hook_from_module(module: nn.Module, recurse=False): + """ + Removes any hook attached to a module via `add_hook_to_module`. + + Args: + module (`torch.nn.Module`): The module to attach a hook to. + recurse (`bool`, **optional**): Whether to remove the hooks recursively + + Returns: + `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can + be discarded). + """ + + if hasattr(module, "_hf_hook"): + module._hf_hook.detach_hook(module) + delattr(module, "_hf_hook") + + if hasattr(module, "_old_forward"): + module.forward = module._old_forward + delattr(module, "_old_forward") + + if recurse: + for child in module.children(): + remove_hook_from_module(child, recurse) + + return module + + +class AlignDevicesHook(ModelHook): + """ + A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the + associated module, potentially offloading the weights after the forward pass. + + Args: + execution_device (`torch.device`, *optional*): + The device on which inputs and model weights should be placed before the forward pass. + offload (`bool`, *optional*, defaults to `False`): + Whether or not the weights should be offloaded after the forward pass. + io_same_device (`bool`, *optional*, defaults to `False`): + Whether or not the output should be placed on the same device as the input was. + weights_map (`Mapping[str, torch.Tensor]`, *optional*): + When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the associated module's buffers when offloading. + place_submodules (`bool`, *optional*, defaults to `False`): + Whether to place the submodules on `execution_device` during the `init_hook` event. + """ + + def __init__( + self, + execution_device: Optional[Union[int, str, torch.device]] = None, + offload: bool = False, + io_same_device: bool = False, + weights_map: Optional[Mapping] = None, + offload_buffers: bool = False, + place_submodules: bool = False, + ): + self.execution_device = execution_device + self.offload = offload + self.io_same_device = io_same_device + self.weights_map = weights_map + self.offload_buffers = offload_buffers + self.place_submodules = place_submodules + + # Will contain the input device when `io_same_device=True`. + self.input_device = None + self.param_original_devices = {} + self.buffer_original_devices = {} + + def init_hook(self, module): + if not self.offload and self.execution_device is not None: + for name, _ in named_module_tensors(module, recurse=self.place_submodules): + set_module_tensor_to_device(module, name, self.execution_device) + elif self.offload: + self.original_devices = { + name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules) + } + if self.weights_map is None: + self.weights_map = { + name: param.to("cpu") + for name, param in named_module_tensors( + module, include_buffers=self.offload_buffers, recurse=self.place_submodules + ) + } + + for name, _ in named_module_tensors( + module, include_buffers=self.offload_buffers, recurse=self.place_submodules + ): + set_module_tensor_to_device(module, name, "meta") + if not self.offload_buffers and self.execution_device is not None: + for name, _ in module.named_buffers(recurse=self.place_submodules): + set_module_tensor_to_device(module, name, self.execution_device) + return module + + def pre_forward(self, module, *args, **kwargs): + if self.io_same_device: + self.input_device = find_device([args, kwargs]) + if self.offload: + for name, _ in named_module_tensors( + module, include_buffers=self.offload_buffers, recurse=self.place_submodules + ): + set_module_tensor_to_device(module, name, self.execution_device, value=self.weights_map[name]) + + return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device) + + def post_forward(self, module, output): + if self.offload: + for name, _ in named_module_tensors( + module, include_buffers=self.offload_buffers, recurse=self.place_submodules + ): + set_module_tensor_to_device(module, name, "meta") + + if self.io_same_device and self.input_device is not None: + output = send_to_device(output, self.input_device) + + return output + + def detach_hook(self, module): + if self.offload: + for name, device in self.original_devices.items(): + if device != torch.device("meta"): + set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None)) + + +def attach_execution_device_hook( + module: torch.nn.Module, + execution_device: Union[int, str, torch.device], + preload_module_classes: Optional[List[str]] = None, +): + """ + Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right + execution device + + Args: + module (`torch.nn.Module`): + The module where we want to attach the hooks. + execution_device (`int`, `str` or `torch.device`): + The device on which inputs and model weights should be placed before the forward pass. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0: + add_hook_to_module(module, AlignDevicesHook(execution_device)) + + # Break the recursion if we get to a preload module. + if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes: + return + + for child in module.children(): + attach_execution_device_hook(child, execution_device) + + +def attach_align_device_hook( + module: torch.nn.Module, + execution_device: Optional[torch.device] = None, + offload: bool = False, + weights_map: Optional[Mapping] = None, + offload_buffers: bool = False, + module_name: str = "", + preload_module_classes: Optional[List[str]] = None, +): + """ + Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or + buffers. + + Args: + module (`torch.nn.Module`): + The module where we want to attach the hooks. + execution_device (`torch.device`, *optional*): + The device on which inputs and model weights should be placed before the forward pass. + offload (`bool`, *optional*, defaults to `False`): + Whether or not the weights should be offloaded after the forward pass. + weights_map (`Mapping[str, torch.Tensor]`, *optional*): + When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the associated module's buffers when offloading. + module_name (`str`, *optional*, defaults to `""`): + The name of the module. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + # Attach the hook on this module if it has any direct tensor. + directs = named_module_tensors(module) + full_offload = ( + offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes + ) + + if len(list(directs)) > 0 or full_offload: + if weights_map is not None: + prefix = f"{module_name}." if len(module_name) > 0 else "" + prefixed_weights_map = PrefixedDataset(weights_map, prefix) + else: + prefixed_weights_map = None + hook = AlignDevicesHook( + execution_device=execution_device, + offload=offload, + weights_map=prefixed_weights_map, + offload_buffers=offload_buffers, + place_submodules=full_offload, + ) + add_hook_to_module(module, hook, append=True) + + # We stop the recursion in case we hit the full offload. + if full_offload: + return + + # Recurse on all children of the module. + for child_name, child in module.named_children(): + child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name + attach_align_device_hook( + child, + execution_device=execution_device, + offload=offload, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=child_name, + preload_module_classes=preload_module_classes, + ) + + +def remove_hook_from_submodules(module: nn.Module): + """ + Recursively removes all hooks attached on the submodules of a given model. + + Args: + module (`torch.nn.Module`): The module on which to remove all hooks. + """ + remove_hook_from_module(module) + for child in module.children(): + remove_hook_from_submodules(child) + + +def attach_align_device_hook_on_blocks( + module: nn.Module, + execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None, + offload: Union[bool, Dict[str, bool]] = False, + weights_map: Mapping = None, + offload_buffers: bool = False, + module_name: str = "", + preload_module_classes: Optional[List[str]] = None, +): + """ + Attaches `AlignDevicesHook` to all blocks of a given model as needed. + + Args: + module (`torch.nn.Module`): + The module where we want to attach the hooks. + execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*): + The device on which inputs and model weights should be placed before the forward pass. It can be one device + for the whole module, or a dictionary mapping module name to device. + offload (`bool`, *optional*, defaults to `False`): + Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole + module, or a dictionary mapping module name to boolean. + weights_map (`Mapping[str, torch.Tensor]`, *optional*): + When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the associated module's buffers when offloading. + module_name (`str`, *optional*, defaults to `""`): + The name of the module. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + # If one device and one offload, we've got one hook. + if not isinstance(execution_device, Mapping) and not isinstance(offload, dict): + if not offload: + hook = AlignDevicesHook(execution_device=execution_device, io_same_device=True, place_submodules=True) + add_hook_to_module(module, hook) + else: + attach_align_device_hook( + module, + execution_device=execution_device, + offload=True, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=module_name, + ) + return + + if not isinstance(execution_device, Mapping): + execution_device = {key: execution_device for key in offload.keys()} + if not isinstance(offload, Mapping): + offload = {key: offload for key in execution_device.keys()} + + if module_name in execution_device and not offload[module_name]: + hook = AlignDevicesHook( + execution_device=execution_device[module_name], + offload_buffers=offload_buffers, + io_same_device=(module_name == ""), + place_submodules=True, + ) + add_hook_to_module(module, hook) + attach_execution_device_hook(module, execution_device[module_name]) + elif module_name in execution_device: + attach_align_device_hook( + module, + execution_device=execution_device[module_name], + offload=True, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=module_name, + preload_module_classes=preload_module_classes, + ) + if not hasattr(module, "_hf_hook"): + hook = AlignDevicesHook(execution_device=execution_device[module_name], io_same_device=(module_name == "")) + add_hook_to_module(module, hook) + attach_execution_device_hook( + module, execution_device[module_name], preload_module_classes=preload_module_classes + ) + elif module_name == "": + hook = AlignDevicesHook(io_same_device=True) + add_hook_to_module(module, hook) + + for child_name, child in module.named_children(): + child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name + attach_align_device_hook_on_blocks( + child, + execution_device=execution_device, + offload=offload, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=child_name, + preload_module_classes=preload_module_classes, + ) diff --git a/testbed/huggingface__accelerate/src/accelerate/launchers.py b/testbed/huggingface__accelerate/src/accelerate/launchers.py new file mode 100644 index 0000000000000000000000000000000000000000..2c9db6a7595e2ecdd48d2499e21e74f29b2ee902 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/launchers.py @@ -0,0 +1,163 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import tempfile + +import torch + +from .state import AcceleratorState +from .utils import PrecisionType, PrepareForLaunch, patch_environment + + +def notebook_launcher(function, args=(), num_processes=None, mixed_precision="no", use_port="29500"): + """ + Launches a training function, using several processes if it's possible in the current environment (TPU with + multiple cores for instance). + + Args: + function (`Callable`): + The training function to execute. If it accepts arguments, the first argument should be the index of the + process run. + args (`Tuple`): + Tuple of arguments to pass to the function (it will receive `*args`). + num_processes (`int`, *optional*): + The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to + the number of GPUs available otherwise. + mixed_precision (`str`, *optional*, defaults to `"no"`): + If `fp16` or `bf16`, will use mixed precision training on multi-GPU. + use_port (`str`, *optional*, defaults to `"29500"`): + The port to use to communicate between processes when launching a multi-GPU training. + """ + # Are we in a google colab or a Kaggle Kernel? + in_colab = False + in_kaggle = False + if any(key.startswith("KAGGLE") for key in os.environ.keys()): + in_kaggle = True + elif "IPython" in sys.modules: + in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython()) + + try: + mixed_precision = PrecisionType(mixed_precision.lower()) + except ValueError: + raise ValueError( + f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." + ) + + if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME", None) is not None): + # TPU launch + import torch_xla.distributed.xla_multiprocessing as xmp + + if len(AcceleratorState._shared_state) > 0: + raise ValueError( + "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " + "your training function. Restart your notebook and make sure no cells initializes an " + "`Accelerator`." + ) + if num_processes is None: + num_processes = 8 + + launcher = PrepareForLaunch(function, distributed_type="TPU") + print(f"Launching a training on {num_processes} TPU cores.") + xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork") + elif in_colab: + # No need for a distributed launch otherwise as it's either CPU or one GPU. + if torch.cuda.is_available(): + print("Launching training on one GPU.") + else: + print("Launching training on one CPU.") + function(*args) + else: + if num_processes is None: + raise ValueError( + "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." + ) + + if num_processes > 1: + # Multi-GPU launch + from torch.multiprocessing import start_processes + + if len(AcceleratorState._shared_state) > 0: + raise ValueError( + "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " + "inside your training function. Restart your notebook and make sure no cells initializes an " + "`Accelerator`." + ) + + if torch.cuda.is_initialized(): + raise ValueError( + "To launch a multi-GPU training from your notebook, you need to avoid running any instruction " + "using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA " + "function." + ) + + # torch.distributed will expect a few environment variable to be here. We set the ones common to each + # process here (the other ones will be set be the launcher). + with patch_environment( + world_size=num_processes, master_addr="127.0.01", master_port=use_port, mixed_precision=mixed_precision + ): + launcher = PrepareForLaunch(function, distributed_type="MULTI_GPU") + + print(f"Launching training on {num_processes} GPUs.") + start_processes(launcher, args=args, nprocs=num_processes, start_method="fork") + + else: + # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. + use_mps_device = "false" + if torch.backends.mps.is_available(): + print("Launching training on MPS.") + use_mps_device = "true" + elif torch.cuda.is_available(): + print("Launching training on one GPU.") + else: + print("Launching training on CPU.") + with patch_environment(use_mps_device=use_mps_device): + function(*args) + + +def debug_launcher(function, args=(), num_processes=2): + """ + Launches a training function using several processes on CPU for debugging purposes. + + + + This function is provided for internal testing and debugging, but it's not intended for real trainings. It will + only use the CPU. + + + + Args: + function (`Callable`): + The training function to execute. + args (`Tuple`): + Tuple of arguments to pass to the function (it will receive `*args`). + num_processes (`int`, *optional*, defaults to 2): + The number of processes to use for training. + """ + from torch.multiprocessing import start_processes + + with tempfile.NamedTemporaryFile() as tmp_file: + # torch.distributed will expect a few environment variable to be here. We set the ones common to each + # process here (the other ones will be set be the launcher). + with patch_environment( + world_size=num_processes, + master_addr="127.0.01", + master_port="29500", + accelerate_mixed_precision="no", + accelerate_debug_rdv_file=tmp_file.name, + accelerate_use_cpu="yes", + ): + launcher = PrepareForLaunch(function, debug=True) + start_processes(launcher, args=args, nprocs=num_processes, start_method="fork") diff --git a/testbed/huggingface__accelerate/src/accelerate/logging.py b/testbed/huggingface__accelerate/src/accelerate/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..679f2da5c8261720216959c808e8902714d41f6b --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/logging.py @@ -0,0 +1,85 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os + +from .state import AcceleratorState +from .utils import DistributedType + + +class MultiProcessAdapter(logging.LoggerAdapter): + """ + An adapter to assist with logging in multiprocess. + + `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes + or only the main executed one. Default is `main_process_only=True`. + """ + + @staticmethod + def _should_log(main_process_only): + "Check if log should be performed" + state = AcceleratorState() + if state.distributed_type != DistributedType.MEGATRON_LM: + process_index_flag = state.local_process_index == 0 + else: + process_index_flag = state.process_index == state.num_processes - 1 + return not main_process_only or (main_process_only and process_index_flag) + + def log(self, level, msg, *args, **kwargs): + """ + Delegates logger call after checking if we should log. + + Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes + or only the main executed one. Default is `True` if not passed + """ + main_process_only = kwargs.pop("main_process_only", True) + if self.isEnabledFor(level) and self._should_log(main_process_only): + msg, kwargs = self.process(msg, kwargs) + self.logger.log(level, msg, *args, **kwargs) + + +def get_logger(name: str, log_level: str = None): + """ + Returns a `logging.Logger` for `name` that can handle multiprocessing. + + If a log should be called on all processes, pass `main_process_only=False` + + Args: + name (`str`): + The name for the logger, such as `__file__` + log_level (`str`, *optional*): + The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not + + Example: + + ```python + >>> from accelerate.logging import get_logger + + >>> logger = get_logger(__name__) + + >>> logger.info("My log", main_process_only=False) + >>> logger.debug("My log", main_process_only=True) + + >>> logger = get_logger(__name__, accelerate_log_level="DEBUG") + >>> logger.info("My log") + >>> logger.debug("My second log") + ``` + """ + if log_level is None: + log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None) + logger = logging.getLogger(name) + if log_level is not None: + logger.setLevel(log_level.upper()) + return MultiProcessAdapter(logger, {}) diff --git a/testbed/huggingface__accelerate/src/accelerate/memory_utils.py b/testbed/huggingface__accelerate/src/accelerate/memory_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..eba10bb783bf0158b2126cbfbe28cf8c40db34ed --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/memory_utils.py @@ -0,0 +1,29 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all + + +import warnings + + +warnings.warn( + "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: " + "`from accelerate import find_executable_batch_size` to avoid this warning.", + FutureWarning, +) + +from .utils.memory import find_executable_batch_size diff --git a/testbed/huggingface__accelerate/src/accelerate/optimizer.py b/testbed/huggingface__accelerate/src/accelerate/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..d5eeef99a02f6c0f7cf6bca8576a57ed31c07472 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/optimizer.py @@ -0,0 +1,165 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import warnings + +import torch + +from .state import AcceleratorState, GradientState +from .utils import DistributedType, honor_type, is_torch_version, is_tpu_available + + +if is_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + + +def move_to_device(state, device): + if isinstance(state, (list, tuple)): + return honor_type(state, (move_to_device(t, device) for t in state)) + elif isinstance(state, dict): + return type(state)({k: move_to_device(v, device) for k, v in state.items()}) + elif isinstance(state, torch.Tensor): + return state.to(device) + return state + + +class AcceleratedOptimizer(torch.optim.Optimizer): + """ + Internal wrapper around a torch optimizer. + + Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient + accumulation. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + device_placement (`bool`, *optional*, defaults to `True`): + Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of + `optimizer` on the right device. + scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*): + The scaler to use in the step function if training with mixed precision. + """ + + def __init__(self, optimizer, device_placement=True, scaler=None): + self.optimizer = optimizer + self.scaler = scaler + self.accelerator_state = AcceleratorState() + self.gradient_state = GradientState() + self.device_placement = device_placement + self._is_overflow = False + + # Handle device placement + if device_placement: + state_dict = self.optimizer.state_dict() + if self.accelerator_state.distributed_type == DistributedType.TPU: + xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) + else: + state_dict = move_to_device(state_dict, self.accelerator_state.device) + self.optimizer.load_state_dict(state_dict) + + @property + def state(self): + return self.optimizer.state + + @state.setter + def state(self, state): + self.optimizer.state = state + + @property + def param_groups(self): + return self.optimizer.param_groups + + @param_groups.setter + def param_groups(self, param_groups): + self.optimizer.param_groups = param_groups + + @property + def defaults(self): + return self.optimizer.defaults + + @defaults.setter + def defaults(self, defaults): + self.optimizer.defaults = defaults + + def add_param_group(self, param_group): + self.optimizer.add_param_group(param_group) + + def load_state_dict(self, state_dict): + if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement: + xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) + self.optimizer.load_state_dict(state_dict) + + def state_dict(self): + return self.optimizer.state_dict() + + def zero_grad(self, set_to_none=None): + if self.gradient_state.sync_gradients: + if is_torch_version("<", "1.7.0"): + if set_to_none is not None: + raise ValueError( + "`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for " + f"earlier versions (found version {torch.__version__})." + ) + self.optimizer.zero_grad() + else: + accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters + if accept_arg: + if set_to_none is None: + set_to_none = False + self.optimizer.zero_grad(set_to_none=set_to_none) + else: + if set_to_none is not None: + raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.") + self.optimizer.zero_grad() + + def step(self, closure=None): + if self.gradient_state.sync_gradients: + if self.accelerator_state.distributed_type == DistributedType.TPU: + optimizer_args = {"closure": closure} if closure is not None else {} + xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args) + elif self.scaler is not None: + scale_before = self.scaler.get_scale() + self.scaler.step(self.optimizer, closure) + self.scaler.update() + scale_after = self.scaler.get_scale() + # If we reduced the loss scale, it means the optimizer step was skipped because of gradient overflow. + self._is_overflow = scale_after < scale_before + else: + self.optimizer.step(closure) + + def _switch_parameters(self, parameters_map): + for param_group in self.optimizer.param_groups: + param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]] + + @property + def is_overflow(self): + """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" + warnings.warn( + "The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use " + "`optimizer.step_was_skipped` instead.", + FutureWarning, + ) + return self._is_overflow + + @property + def step_was_skipped(self): + """Whether or not the optimizer step was skipped.""" + return self._is_overflow + + def __getstate__(self): + return self.__dict__.copy() + + def __setstate__(self, state): + self.__dict__.update(state) diff --git a/testbed/huggingface__accelerate/src/accelerate/scheduler.py b/testbed/huggingface__accelerate/src/accelerate/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..2a88b1ea47c28dc9133b9b37a3a9c50fdcd1cb85 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/scheduler.py @@ -0,0 +1,96 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation + +import warnings + +from .state import AcceleratorState, GradientState + + +warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler") + + +class AcceleratedScheduler: + """ + A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful + to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed + precision training) + + When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always + step the scheduler to account for it. + + Args: + scheduler (`torch.optim.lr_scheduler._LRScheduler`): + The scheduler to wrap. + optimizers (one or a list of `torch.optim.Optimizer`): + The optimizers used. + step_with_optimizer (`bool`, *optional*, defaults to `True`): + Whether or not the scheduler should be stepped at each optimizer step. + split_batches (`bool`, *optional*, defaults to `False`): + Whether or not the dataloaders split one batch across the different processes (so batch size is the same + regardless of the number of processes) or create batches on each process (so batch size is the original + batch size multiplied by the number of processes). + """ + + def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False): + self.scheduler = scheduler + self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers] + self.split_batches = split_batches + self.step_with_optimizer = step_with_optimizer + self.gradient_state = GradientState() + + def step(self, *args, **kwargs): + if not self.step_with_optimizer: + # No link between scheduler and optimizer -> just step + self.scheduler.step(*args, **kwargs) + return + + # Otherwise, first make sure the optimizer was stepped. + if not self.gradient_state.sync_gradients: + return + + for opt in self.optimizers: + if opt.step_was_skipped: + return + if self.split_batches: + # Split batches -> the training dataloader batch size is not changed so one step per training step + self.scheduler.step(*args, **kwargs) + else: + # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do + # num_processes steps per training step + num_processes = AcceleratorState().num_processes + for _ in range(num_processes): + # Special case when using OneCycle and `drop_last` was not used + if hasattr(self.scheduler, "total_steps"): + if self.scheduler._step_count <= self.scheduler.total_steps: + self.scheduler.step(*args, **kwargs) + else: + self.scheduler.step(*args, **kwargs) + + # Passthroughs + def get_last_lr(self): + return self.scheduler.get_last_lr() + + def state_dict(self): + return self.scheduler.state_dict() + + def load_state_dict(self, state_dict): + self.scheduler.load_state_dict(state_dict) + + def get_lr(self): + return self.scheduler.get_lr() + + def print_lr(self, *args, **kwargs): + return self.scheduler.print_lr(*args, **kwargs) diff --git a/testbed/huggingface__accelerate/src/accelerate/state.py b/testbed/huggingface__accelerate/src/accelerate/state.py new file mode 100644 index 0000000000000000000000000000000000000000..d97327de42dad179edcb76204dcfbb05030b752b --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/state.py @@ -0,0 +1,353 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import warnings + +import torch + +from .utils import ( + DistributedType, + DynamoBackend, + get_ccl_version, + get_int_from_env, + is_ccl_available, + is_deepspeed_available, + is_tpu_available, + parse_choice_from_env, + parse_flag_from_env, +) +from .utils.dataclasses import SageMakerDistributedType + + +if is_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + + +def is_initialized() -> bool: + """ + Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`, + but works as a module method. + """ + return AcceleratorState._shared_state != {} + + +# Inspired by Alex Martelli's 'Borg'. +class AcceleratorState: + """ + Singleton class that has information about the current training environment. + + **Available attributes:** + + - **device** (`torch.device`) -- The device to use. + - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently + in use. + - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`. + - **local_process_index** (`int`) -- The index of the current process on the current server. + - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type + of mixed precision being performed. + - **num_processes** (`int`) -- The number of processes currently launched in parallel. + - **process_index** (`int`) -- The index of the current process. + """ + + _shared_state = {} + + def __init__( + self, + mixed_precision: str = None, + cpu: bool = False, + dynamo_backend=None, + deepspeed_plugin=None, + fsdp_plugin=None, + megatron_lm_plugin=None, + _from_accelerator: bool = False, + **kwargs, + ): + self.__dict__ = self._shared_state + if parse_flag_from_env("ACCELERATE_USE_CPU"): + cpu = True + self._check_initialized(mixed_precision, cpu) + if not self.initialized: + self.backend = None + self.deepspeed_plugin = None + mixed_precision = ( + parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no") + if mixed_precision is None + else mixed_precision.lower() + ) + dynamo_backend = ( + parse_choice_from_env("ACCELERATE_DYNAMO_BACKEND", "no") if dynamo_backend is None else dynamo_backend + ) + self.dynamo_backend = DynamoBackend(dynamo_backend.upper()) + if not _from_accelerator: + raise ValueError( + "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` " + "before using any functionality from the `accelerate` library." + ) + if ( + os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true" + and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO + and not cpu + ): + if os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL: + self.distributed_type = DistributedType.MULTI_GPU + import smdistributed.dataparallel.torch.torch_smddp # noqa + + if not torch.distributed.is_initialized(): + torch.distributed.init_process_group(backend="smddp") + self.backend = "smddp" + self.num_processes = torch.distributed.get_world_size() + self.process_index = torch.distributed.get_rank() + self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) + self.device = torch.device("cuda", self.local_process_index) + torch.cuda.set_device(self.device) + self._mixed_precision = mixed_precision + elif is_tpu_available() and not cpu: + self.distributed_type = DistributedType.TPU + self.num_processes = xm.xrt_world_size() + self.process_index = xm.get_ordinal() + self.local_process_index = xm.get_local_ordinal() + self.device = xm.xla_device() + if mixed_precision == "bf16": + if os.environ.get("ACCELERATE_DOWNCAST_BF16"): + os.environ["XLA_USE_BF16"] = str(0) + os.environ["XLA_DOWNCAST_BF16"] = str(1) + self.downcast_bfloat = True + else: + os.environ["XLA_USE_BF16"] = str(1) + os.environ["XLA_DOWNCAST_BF16"] = str(0) + self.downcast_bfloat = False + self._mixed_precision = mixed_precision + elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu: + assert ( + is_deepspeed_available() + ), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" + self.distributed_type = DistributedType.DEEPSPEED + if not torch.distributed.is_initialized(): + from .utils import compare_versions + + self.backend = "nccl" + if compare_versions("deepspeed", ">", "0.6.5"): + from deepspeed import comm as dist + + dist.init_distributed(dist_backend=self.backend) + else: + torch.distributed.init_process_group(backend="nccl", **kwargs) + + self.num_processes = torch.distributed.get_world_size() + self.process_index = torch.distributed.get_rank() + self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) + self.device = torch.device("cuda", self.local_process_index) + torch.cuda.set_device(self.device) + self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config + self.deepspeed_plugin = deepspeed_plugin + elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu: + self.distributed_type = DistributedType.MULTI_GPU + if not torch.distributed.is_initialized(): + torch.distributed.init_process_group(backend="nccl", **kwargs) + self.backend = "nccl" + self.num_processes = torch.distributed.get_world_size() + self.process_index = torch.distributed.get_rank() + self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) + self.device = torch.device("cuda", self.local_process_index) + torch.cuda.set_device(self.device) + self._mixed_precision = mixed_precision + if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": + self.distributed_type = DistributedType.FSDP + if self._mixed_precision != "no": + fsdp_plugin.set_mixed_precision(self._mixed_precision) + self.fsdp_plugin = fsdp_plugin + if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true": + self.distributed_type = DistributedType.MEGATRON_LM + megatron_lm_plugin.set_mixed_precision(self._mixed_precision) + self.megatron_lm_plugin = megatron_lm_plugin + elif get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1: + self.distributed_type = DistributedType.MULTI_CPU + if is_ccl_available() and get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0: + if get_ccl_version() >= "1.12": + import oneccl_bindings_for_pytorch # noqa: F401 + else: + import torch_ccl # noqa: F401 + backend = "ccl" + elif torch.distributed.is_mpi_available(): + backend = "mpi" + else: + backend = "gloo" + # Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH + rank = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) + size = get_int_from_env(["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1) + local_rank = get_int_from_env( + ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 + ) + local_size = get_int_from_env( + ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1 + ) + self.local_process_index = local_rank + os.environ["RANK"] = str(rank) + os.environ["WORLD_SIZE"] = str(size) + os.environ["LOCAL_RANK"] = str(local_rank) + if not os.environ.get("MASTER_PORT", None): + os.environ["MASTER_PORT"] = "29500" + if not os.environ.get("MASTER_ADDR", None): + if local_size != size and backend != "mpi": + raise ValueError( + "Looks like distributed multinode run but MASTER_ADDR env not set, " + "please try exporting rank 0's hostname as MASTER_ADDR" + ) + if not torch.distributed.is_initialized(): + torch.distributed.init_process_group(backend, rank=rank, world_size=size, **kwargs) + self.backend = backend + self.num_processes = torch.distributed.get_world_size() + self.process_index = torch.distributed.get_rank() + self.local_process_index = local_rank + self.device = torch.device("cpu") + self._mixed_precision = mixed_precision + else: + self.distributed_type = DistributedType.NO + self.num_processes = 1 + self.process_index = self.local_process_index = 0 + if parse_flag_from_env("ACCELERATE_USE_MPS_DEVICE") and not cpu: + if not torch.backends.mps.is_available(): + if not torch.backends.mps.is_built(): + raise AssertionError( + "MPS not available because the current PyTorch install was not " + "built with MPS enabled. Please install torch version >=1.12.0 on " + "your Apple silicon Mac running macOS 12.3 or later with a native " + "version (arm64) of Python" + ) + else: + raise AssertionError( + "MPS not available because the current MacOS version is not 12.3+ " + "and/or you do not have an MPS-enabled device on this machine." + ) + else: + from .utils import is_torch_version + + if not is_torch_version(">", "1.12.0"): + warnings.warn( + "We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing) on your MacOS machine. " + "It has major fixes related to model correctness and performance improvements for transformer based models. " + "Please refer to https://github.com/pytorch/pytorch/issues/82707 for more details." + ) + self.device = torch.device("mps") + elif cpu or not torch.cuda.is_available(): + self.device = torch.device("cpu") + else: + self.device = torch.device("cuda") + self._mixed_precision = mixed_precision + + if ( + self.dynamo_backend != DynamoBackend.NO + and self._mixed_precision == "no" + and self.device.type == "cuda" + ): + torch.backends.cuda.matmul.allow_tf32 = True + + self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) + + def __repr__(self): + repr = ( + f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" + f"Num processes: {self.num_processes}\n" + f"Process index: {self.process_index}\n" + f"Local process index: {self.local_process_index}\n" + f"Device: {self.device}\n" + f"Mixed precision type: {self.mixed_precision}\n" + ) + if self.distributed_type == DistributedType.DEEPSPEED: + repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n" + return repr + + # For backward compatibility + @property + def use_fp16(self): + return self._mixed_precision != "no" + + @property + def mixed_precision(self): + if self.distributed_type == DistributedType.DEEPSPEED: + config = self.deepspeed_plugin.deepspeed_config + if config.get("fp16", {}).get("enabled", False): + mixed_precision = "fp16" + elif config.get("bf16", {}).get("enabled", False): + mixed_precision = "bf16" + else: + mixed_precision = "no" + else: + mixed_precision = self._mixed_precision + return mixed_precision + + @staticmethod + def _reset_state(): + "Resets `_shared_state`, is used internally and should not be called" + AcceleratorState._shared_state = {} + + @property + def initialized(self) -> bool: + "Returns whether the `AcceleratorState` has been initialized" + return self._shared_state != {} + + def _check_initialized(self, mixed_precision=None, cpu=None): + "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized" + if self.initialized: + err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerate()`." + if cpu and self.device.type != "cpu": + raise ValueError(err.format(flag="cpu=True")) + if mixed_precision is not None and mixed_precision != self._mixed_precision: + raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'")) + + +class GradientState: + """ + Singleton class that has information related to gradient synchronization for gradient accumulation + + **Available attributes:** + + - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader + - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader + - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices + """ + + _shared_state = {} + + def __init__(self): + self.__dict__ = self._shared_state + if not self.initialized: + self.sync_gradients = True + self.end_of_dataloader = False + self.remainder = -1 + + @property + def initialized(self) -> bool: + "Returns whether the `GradientState` has been initialized" + return GradientState._shared_state != {} + + def __repr__(self): + return ( + f"Sync Gradients: {self.sync_gradients}\n" + f"At end of current dataloader: {self.end_of_dataloader}\n" + f"Extra samples added: {self.remainder}" + ) + + def _set_sync_gradients(self, sync_gradients): + "Private function that sets whether gradients should be synchronized. Users should not have to call this." + self.sync_gradients = sync_gradients + + def _set_end_of_dataloader(self, end_of_dataloader): + "Private function that sets whether the end of the current dataloader has been reached. Users should not have to call this." + self.end_of_dataloader = end_of_dataloader + + def _set_remainder(self, remainder): + "Private function that sets the number of remaining samples at the end of the dataloader. Users should not have to call this." + self.remainder = remainder diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/__init__.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b58b932b16b30a42626ab548cb5f79f1e06ddd15 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/test_utils/__init__.py @@ -0,0 +1,22 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from .testing import ( + are_the_same_tensors, + execute_subprocess_async, + require_cpu, + require_cuda, + require_huggingface_suite, + require_mps, + require_multi_gpu, + require_single_gpu, + require_torch_min_version, + require_tpu, + skip, + slow, +) +from .training import RegressionDataset, RegressionModel + + +from .scripts import test_script, test_sync # isort:skip diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/examples.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/examples.py new file mode 100644 index 0000000000000000000000000000000000000000..f459e03c4feb3895be159f84eb55297f95fda85d --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/test_utils/examples.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +A collection of utilities for comparing `examples/complete_*_example.py` scripts with the capabilities inside of each +`examples/by_feature` example. `compare_against_test` is the main function that should be used when testing, while the +others are used to either get the code that matters, or to preprocess them (such as stripping comments) +""" + +import os +from typing import List + + +def get_function_contents_by_name(lines: List[str], name: str): + """ + Extracts a function from `lines` of segmented source code with the name `name`. + + Args: + lines (`List[str]`): + Source code of a script seperated by line. + name (`str`): + The name of the function to extract. Should be either `training_function` or `main` + """ + if name != "training_function" and name != "main": + raise ValueError(f"Incorrect function name passed: {name}, choose either 'main' or 'training_function'") + good_lines, found_start = [], False + for line in lines: + if not found_start and f"def {name}" in line: + found_start = True + good_lines.append(line) + continue + if found_start: + if name == "training_function" and "def main" in line: + return good_lines + if name == "main" and "if __name__" in line: + return good_lines + good_lines.append(line) + + +def clean_lines(lines: List[str]): + """ + Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\n') + + Args: + lines (`List[str]`): + Source code of a script seperated by line. + """ + return [line for line in lines if not line.lstrip().startswith("#") and line != "\n"] + + +def compare_against_test(base_filename: str, feature_filename: str, parser_only: bool, secondary_filename: str = None): + """ + Tests whether the additional code inside of `feature_filename` was implemented in `base_filename`. This should be + used when testing to see if `complete_*_.py` examples have all of the implementations from each of the + `examples/by_feature/*` scripts. + + It utilizes `nlp_example.py` to extract out all of the repeated training code, so that only the new additional code + is examined and checked. If something *other* than `nlp_example.py` should be used, such as `cv_example.py` for the + `complete_cv_example.py` script, it should be passed in for the `secondary_filename` parameter. + + Args: + base_filename (`str` or `os.PathLike`): + The filepath of a single "complete" example script to test, such as `examples/complete_cv_example.py` + feature_filename (`str` or `os.PathLike`): + The filepath of a single feature example script. The contents of this script are checked to see if they + exist in `base_filename` + parser_only (`bool`): + Whether to compare only the `main()` sections in both files, or to compare the contents of + `training_loop()` + secondary_filename (`str`, *optional*): + A potential secondary filepath that should be included in the check. This function extracts the base + functionalities off of "examples/nlp_example.py", so if `base_filename` is a script other than + `complete_nlp_example.py`, the template script should be included here. Such as `examples/cv_example.py` + """ + with open(base_filename, "r") as f: + base_file_contents = f.readlines() + with open(os.path.abspath(os.path.join("examples", "nlp_example.py")), "r") as f: + full_file_contents = f.readlines() + with open(feature_filename, "r") as f: + feature_file_contents = f.readlines() + if secondary_filename is not None: + with open(secondary_filename, "r") as f: + secondary_file_contents = f.readlines() + + # This is our base, we remove all the code from here in our `full_filename` and `feature_filename` to find the new content + if parser_only: + base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "main")) + full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "main")) + feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "main")) + if secondary_filename is not None: + secondary_file_func = clean_lines(get_function_contents_by_name(secondary_file_contents, "main")) + else: + base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "training_function")) + full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "training_function")) + feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "training_function")) + if secondary_filename is not None: + secondary_file_func = clean_lines( + get_function_contents_by_name(secondary_file_contents, "training_function") + ) + + _dl_line = "train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n" + + # Specific code in our script that differs from the full version, aka what is new + new_feature_code = [] + passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement + it = iter(feature_file_func) + for i in range(len(feature_file_func) - 1): + if i not in passed_idxs: + line = next(it) + if (line not in full_file_func) and (line.lstrip() != _dl_line): + if "TESTING_MOCKED_DATALOADERS" not in line: + new_feature_code.append(line) + passed_idxs.append(i) + else: + # Skip over the `config['num_epochs'] = 2` statement + _ = next(it) + + # Extract out just the new parts from the full_file_training_func + new_full_example_parts = [] + passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement + for i, line in enumerate(base_file_func): + if i not in passed_idxs: + if (line not in full_file_func) and (line.lstrip() != _dl_line): + if "TESTING_MOCKED_DATALOADERS" not in line: + new_full_example_parts.append(line) + passed_idxs.append(i) + + # Finally, get the overall diff + diff_from_example = [line for line in new_feature_code if line not in new_full_example_parts] + if secondary_filename is not None: + diff_from_two = [line for line in full_file_contents if line not in secondary_file_func] + diff_from_example = [line for line in diff_from_example if line not in diff_from_two] + + return diff_from_example diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/__init__.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/__init__.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/test_checkpointing.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/test_checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..cde602dfa63e130aacbf85aba2712ef8af6eb8fb --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/test_checkpointing.py @@ -0,0 +1,269 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import json +import os + +import torch +from torch.optim import AdamW +from torch.utils.data import DataLoader + +import evaluate +from accelerate import Accelerator, DistributedType +from accelerate.utils.deepspeed import DummyOptim, DummyScheduler +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"): + """ + Creates a set of `DataLoader`s for the `glue` dataset. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + model_name (`str`, *optional*): + """ + tokenizer = AutoTokenizer.from_pretrained(model_name) + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +def evaluation_loop(accelerator, model, eval_dataloader, metric): + model.eval() + samples_seen = 0 + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + # It is slightly faster to call this once, than multiple times + predictions, references = accelerator.gather( + (predictions, batch["labels"]) + ) # If we are in a multiprocess environment, the last batch has duplicates + if accelerator.use_distributed: + if step == len(eval_dataloader) - 1: + predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] + references = references[: len(eval_dataloader.dataset) - samples_seen] + else: + samples_seen += references.shape[0] + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + return eval_metric["accuracy"] + + +def training_function(config, args): + # Initialize accelerator + accelerator = Accelerator() + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + model_name = args.model_name_or_path + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) + + # Instantiate optimizer + optimizer_cls = ( + AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(params=model.parameters(), lr=lr) + + if accelerator.state.deepspeed_plugin is not None: + gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[ + "gradient_accumulation_steps" + ] + else: + gradient_accumulation_steps = 1 + max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps + + # Instantiate scheduler + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=max_training_steps, + ) + else: + lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We need to keep track of how many total steps we have iterated over + overall_step = 0 + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + metric = evaluate.load("glue", "mrpc") + ending_epoch = num_epochs + + if args.partial_train_epoch is not None: + ending_epoch = args.partial_train_epoch + + if args.resume_from_checkpoint: + accelerator.load_state(args.resume_from_checkpoint) + epoch_string = args.resume_from_checkpoint.split("epoch_")[1] + state_epoch_num = "" + for char in epoch_string: + if char.isdigit(): + state_epoch_num += char + else: + break + starting_epoch = int(state_epoch_num) + 1 + accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric) + accelerator.print("resumed checkpoint performance:", accuracy) + accelerator.print("resumed checkpoint's scheduler's lr:", lr_scheduler.get_lr()[0]) + accelerator.print("resumed optimizers's lr:", optimizer.param_groups[0]["lr"]) + with open(os.path.join(args.output_dir, f"state_{starting_epoch-1}.json"), "r") as f: + resumed_state = json.load(f) + assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" + assert ( + resumed_state["lr"] == lr_scheduler.get_lr()[0] + ), "Scheduler learning rate mismatch, loading from checkpoint failed" + assert ( + resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] + ), "Optimizer learning rate mismatch, loading from checkpoint failed" + assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" + return + + # Now we train the model + state = {} + for epoch in range(starting_epoch, ending_epoch): + model.train() + for step, batch in enumerate(train_dataloader): + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + overall_step += 1 + output_dir = f"epoch_{epoch}" + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric) + state["accuracy"] = accuracy + state["lr"] = lr_scheduler.get_lr()[0] + state["optimizer_lr"] = optimizer.param_groups[0]["lr"] + state["epoch"] = epoch + state["step"] = overall_step + accelerator.print(f"epoch {epoch}:", state) + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + with open(os.path.join(args.output_dir, f"state_{epoch}.json"), "w") as f: + json.dump(state, f) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") + parser.add_argument( + "--model_name_or_path", + type=str, + default="bert-base-cased", + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help="If the training should continue from a checkpoint folder.", + ) + parser.add_argument( + "--partial_train_epoch", + type=int, + default=None, + help="If passed, the training will stop after this number of epochs.", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=2, + help="Number of train epochs.", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} + + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/test_metrics.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/test_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..c984b369a48d8cb72145f8ec265b2f2022775681 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/test_metrics.py @@ -0,0 +1,177 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from copy import deepcopy + +import torch +from torch.utils.data import DataLoader + +import datasets +import evaluate +import transformers +from accelerate import Accelerator +from accelerate.test_utils import RegressionDataset, RegressionModel +from accelerate.utils import is_tpu_available, set_seed +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer + + +def get_basic_setup(accelerator, num_samples=82, batch_size=16): + "Returns everything needed to perform basic training" + set_seed(42) + model = RegressionModel() + ddp_model = deepcopy(model) + dset = RegressionDataset(length=num_samples) + dataloader = DataLoader(dset, batch_size=batch_size) + model.to(accelerator.device) + ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) + return model, ddp_model, dataloader + + +def get_dataloader(accelerator: Accelerator, use_longest=False): + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased") + dataset = load_dataset("glue", "mrpc", split="validation") + + def tokenize_function(examples): + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + with accelerator.main_process_first(): + tokenized_datasets = dataset.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + if use_longest: + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + + return DataLoader(tokenized_datasets, shuffle=False, collate_fn=collate_fn, batch_size=16) + + +def get_mrpc_setup(dispatch_batches, split_batches): + accelerator = Accelerator(dispatch_batches=dispatch_batches, split_batches=split_batches) + dataloader = get_dataloader(accelerator, not dispatch_batches) + model = AutoModelForSequenceClassification.from_pretrained( + "hf-internal-testing/mrpc-bert-base-cased", return_dict=True + ) + ddp_model, ddp_dataloader = accelerator.prepare(model, dataloader) + return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator + + +def generate_predictions(model, dataloader, accelerator): + logits_and_targets = [] + for batch in dataloader: + input, target = batch.values() + with torch.no_grad(): + logit = model(input) + logit, target = accelerator.gather_for_metrics((logit, target)) + logits_and_targets.append((logit, target)) + logits, targs = [], [] + for (logit, targ) in logits_and_targets: + logits.append(logit) + targs.append(targ) + logits, targs = torch.cat(logits), torch.cat(targs) + return logits, targs + + +def test_torch_metrics( + accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False, batch_size=16 +): + model, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size) + logits, targs = generate_predictions(ddp_model, dataloader, accelerator) + assert ( + len(logits) == num_samples + ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}" + + +def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False): + metric = evaluate.load("glue", "mrpc") + setup, accelerator = get_mrpc_setup(dispatch_batches, split_batches) + # First do baseline + model, dataloader, device = setup["no"] + model.to(device) + model.eval() + for batch in dataloader: + batch.to(device) + with torch.inference_mode(): + outputs = model(**batch) + preds = outputs.logits.argmax(dim=-1) + metric.add_batch(predictions=preds, references=batch["labels"]) + baseline = metric.compute() + + # Then do distributed + model, dataloader, device = setup["ddp"] + model.eval() + for batch in dataloader: + with torch.inference_mode(): + outputs = model(**batch) + preds = outputs.logits.argmax(dim=-1) + references = batch["labels"] + preds, references = accelerator.gather_for_metrics((preds, references)) + metric.add_batch(predictions=preds, references=references) + distributed = metric.compute() + + for key in "accuracy f1".split(): + assert math.isclose( + baseline[key], distributed[key] + ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" + + +def main(): + accelerator = Accelerator(split_batches=False, dispatch_batches=False) + if accelerator.is_local_main_process: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_warning() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + # These are a bit slower so they should only be ran on the GPU or TPU + if torch.cuda.is_available() or is_tpu_available(): + if accelerator.is_local_main_process: + print("**Testing gather_for_metrics**") + for split_batches in [True, False]: + for dispatch_batches in [True, False]: + if accelerator.is_local_main_process: + print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`") + test_mrpc(dispatch_batches, split_batches) + accelerator.state._reset_state() + if accelerator.is_local_main_process: + print("**Test torch metrics**") + for split_batches in [True, False]: + for dispatch_batches in [True, False]: + accelerator = Accelerator(split_batches=split_batches, dispatch_batches=dispatch_batches) + if accelerator.is_local_main_process: + print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99") + test_torch_metrics(accelerator, 99) + accelerator.state._reset_state() + if accelerator.is_local_main_process: + print("**Test last batch is not dropped when perfectly divisible**") + accelerator = Accelerator() + test_torch_metrics(accelerator, 512) + accelerator.state._reset_state() + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py new file mode 100644 index 0000000000000000000000000000000000000000..7bb5ca3bf415a2c0ae4d33357a53be44e5547281 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py @@ -0,0 +1,258 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import gc +import json +import os + +import torch +from torch.optim import AdamW +from torch.utils.data import DataLoader + +from accelerate import Accelerator, DistributedType +from accelerate.utils.deepspeed import DummyOptim, DummyScheduler +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +# Converting Bytes to Megabytes +def b2mb(x): + return int(x / 2**20) + + +# This context manager is used to track the peak memory usage of the process +class TorchTracemalloc: + def __enter__(self): + gc.collect() + torch.cuda.empty_cache() + torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.cuda.memory_allocated() + return self + + def __exit__(self, *exc): + gc.collect() + torch.cuda.empty_cache() + self.end = torch.cuda.memory_allocated() + self.peak = torch.cuda.max_memory_allocated() + self.used = b2mb(self.end - self.begin) + self.peaked = b2mb(self.peak - self.begin) + # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") + + +def get_dataloaders( + accelerator: Accelerator, + batch_size: int = 16, + model_name: str = "bert-base-cased", + n_train: int = 320, + n_val: int = 160, +): + """ + Creates a set of `DataLoader`s for the `glue` dataset. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + model_name (`str`, *optional*): + The name of the model to use. + n_train (`int`, *optional*): + The number of training examples to use. + n_val (`int`, *optional*): + The number of validation examples to use. + """ + tokenizer = AutoTokenizer.from_pretrained(model_name) + datasets = load_dataset( + "glue", "mrpc", split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"} + ) + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +def training_function(config, args): + # Initialize accelerator + accelerator = Accelerator() + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + model_name = args.model_name_or_path + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name, args.n_train, args.n_val) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) + + # Instantiate optimizer + optimizer_cls = ( + AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(params=model.parameters(), lr=lr) + + if accelerator.state.deepspeed_plugin is not None: + gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[ + "gradient_accumulation_steps" + ] + else: + gradient_accumulation_steps = 1 + max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps + + # Instantiate scheduler + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=max_training_steps, + ) + else: + lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We need to keep track of how many total steps we have iterated over + overall_step = 0 + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + + # Now we train the model + train_total_peak_memory = {} + for epoch in range(starting_epoch, num_epochs): + with TorchTracemalloc() as tracemalloc: + model.train() + for step, batch in enumerate(train_dataloader): + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + overall_step += 1 + + # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage + accelerator.print("Memory before entering the train : {}".format(b2mb(tracemalloc.begin))) + accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used)) + accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked)) + accelerator.print( + "Total Peak Memory consumed during the train (max): {}".format( + tracemalloc.peaked + b2mb(tracemalloc.begin) + ) + ) + train_total_peak_memory[f"epoch-{epoch}"] = tracemalloc.peaked + b2mb(tracemalloc.begin) + if args.peak_memory_upper_bound is not None: + assert ( + train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound + ), "Peak memory usage exceeded the upper bound" + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + with open(os.path.join(args.output_dir, "peak_memory_utilization.json"), "w") as f: + json.dump(train_total_peak_memory, f) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") + parser.add_argument( + "--model_name_or_path", + type=str, + default="bert-base-cased", + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--peak_memory_upper_bound", + type=float, + default=None, + help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.", + ) + parser.add_argument( + "--n_train", + type=int, + default=320, + help="Number of training examples to use.", + ) + parser.add_argument( + "--n_val", + type=int, + default=160, + help="Number of validation examples to use.", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=1, + help="Number of train epochs.", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/test_performance.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/test_performance.py new file mode 100644 index 0000000000000000000000000000000000000000..324a1854ecb6f749772946b5d4268227ddc4020f --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/external_deps/test_performance.py @@ -0,0 +1,231 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import json +import os + +import torch +from torch.optim import AdamW +from torch.utils.data import DataLoader + +import evaluate +from accelerate import Accelerator, DistributedType +from accelerate.utils.deepspeed import DummyOptim, DummyScheduler +from datasets import load_dataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"): + """ + Creates a set of `DataLoader`s for the `glue` dataset. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + model_name (`str`, *optional*): + """ + tokenizer = AutoTokenizer.from_pretrained(model_name) + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +def training_function(config, args): + # Initialize accelerator + accelerator = Accelerator() + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + model_name = args.model_name_or_path + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) + + # Instantiate optimizer + optimizer_cls = ( + AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(params=model.parameters(), lr=lr) + + if accelerator.state.deepspeed_plugin is not None: + gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[ + "gradient_accumulation_steps" + ] + else: + gradient_accumulation_steps = 1 + max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps + + # Instantiate scheduler + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=max_training_steps, + ) + else: + lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We need to keep track of how many total steps we have iterated over + overall_step = 0 + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + + # Now we train the model + metric = evaluate.load("glue", "mrpc") + best_performance = 0 + performance_metric = {} + for epoch in range(starting_epoch, num_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + overall_step += 1 + + model.eval() + samples_seen = 0 + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + # It is slightly faster to call this once, than multiple times + predictions, references = accelerator.gather( + (predictions, batch["labels"]) + ) # If we are in a multiprocess environment, the last batch has duplicates + if accelerator.use_distributed: + if step == len(eval_dataloader) - 1: + predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] + references = references[: len(eval_dataloader.dataset) - samples_seen] + else: + samples_seen += references.shape[0] + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"] + + if best_performance < eval_metric["accuracy"]: + best_performance = eval_metric["accuracy"] + + if args.performance_lower_bound is not None: + assert ( + args.performance_lower_bound <= best_performance + ), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}" + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: + json.dump(performance_metric, f) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") + parser.add_argument( + "--model_name_or_path", + type=str, + default="bert-base-cased", + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--performance_lower_bound", + type=float, + default=None, + help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=3, + help="Number of train epochs.", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/test_cli.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/test_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..491410e5fc33e663d977d70fdb6aef168ddcffc7 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/test_cli.py @@ -0,0 +1,13 @@ +import torch + + +def main(): + if torch.cuda.is_available(): + num_gpus = torch.cuda.device_count() + else: + num_gpus = 0 + print(f"Successfully ran on {num_gpus} GPUs") + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/test_distributed_data_loop.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/test_distributed_data_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..6576e0358eb24ecfde0c61c6ba1c7cbfb74ec26e --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/test_distributed_data_loop.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import warnings +from typing import List +from unittest.mock import Mock + +import torch +from torch.utils.data import DataLoader, IterableDataset, TensorDataset + +from accelerate.accelerator import Accelerator +from accelerate.utils.dataclasses import DistributedType + + +class DummyIterableDataset(IterableDataset): + def __init__(self, data): + self.data = data + + def __iter__(self): + for element in self.data: + yield element + + +def create_accelerator(even_batches=True): + accelerator = Accelerator(even_batches=even_batches) + assert accelerator.num_processes == 2, "this script expects that two GPUs are available" + return accelerator + + +def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int, iterable: bool = False): + """ + Create a simple DataLoader to use during the test cases + """ + if iterable: + dataset = DummyIterableDataset(torch.as_tensor(range(dataset_size))) + else: + dataset = TensorDataset(torch.as_tensor(range(dataset_size))) + + dl = DataLoader(dataset, batch_size=batch_size) + dl = accelerator.prepare(dl) + + return dl + + +def verify_dataloader_batch_sizes( + accelerator: Accelerator, + dataset_size: int, + batch_size: int, + process_0_expected_batch_sizes: List[int], + process_1_expected_batch_sizes: List[int], +): + """ + A helper function for verifying the batch sizes coming from a prepared dataloader in each process + """ + dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size) + + batch_sizes = [len(batch[0]) for batch in dl] + + if accelerator.process_index == 0: + assert batch_sizes == process_0_expected_batch_sizes + elif accelerator.process_index == 1: + assert batch_sizes == process_1_expected_batch_sizes + + +def test_default_ensures_even_batch_sizes(): + + accelerator = create_accelerator() + + # without padding, we would expect a different number of batches + verify_dataloader_batch_sizes( + accelerator, + dataset_size=3, + batch_size=1, + process_0_expected_batch_sizes=[1, 1], + process_1_expected_batch_sizes=[1, 1], + ) + + # without padding, we would expect the same number of batches, but different sizes + verify_dataloader_batch_sizes( + accelerator, + dataset_size=7, + batch_size=2, + process_0_expected_batch_sizes=[2, 2], + process_1_expected_batch_sizes=[2, 2], + ) + + +def test_can_disable_even_batches(): + accelerator = create_accelerator(even_batches=False) + + verify_dataloader_batch_sizes( + accelerator, + dataset_size=3, + batch_size=1, + process_0_expected_batch_sizes=[1, 1], + process_1_expected_batch_sizes=[1], + ) + + verify_dataloader_batch_sizes( + accelerator, + dataset_size=7, + batch_size=2, + process_0_expected_batch_sizes=[2, 2], + process_1_expected_batch_sizes=[2, 1], + ) + + +def test_can_join_uneven_inputs(): + accelerator = create_accelerator(even_batches=False) + + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + + dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + batch_idxs = [] + with accelerator.join_uneven_inputs([ddp_model]): + for batch_idx, batch in enumerate(dl): + output = ddp_model(batch[0].float()) + loss = output.sum() + loss.backward() + batch_idxs.append(batch_idx) + + accelerator.wait_for_everyone() + + if accelerator.process_index == 0: + assert batch_idxs == [0, 1] + elif accelerator.process_index == 1: + assert batch_idxs == [0] + + +def test_join_raises_warning_for_non_ddp_distributed(accelerator): + + with warnings.catch_warnings(record=True) as w: + with accelerator.join_uneven_inputs([Mock()]): + pass + + assert issubclass(w[-1].category, UserWarning) + assert "only supported for multi-GPU" in str(w[-1].message) + + +def test_join_can_override_even_batches(): + default_even_batches = True + overridden_even_batches = False + accelerator = create_accelerator(even_batches=default_even_batches) + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + train_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + valid_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): + train_dl_overridden_value = train_dl.batch_sampler.even_batches + valid_dl_overridden_value = valid_dl.batch_sampler.even_batches + + assert train_dl_overridden_value == overridden_even_batches + assert valid_dl_overridden_value == overridden_even_batches + assert train_dl.batch_sampler.even_batches == default_even_batches + assert valid_dl.batch_sampler.even_batches == default_even_batches + + +def test_join_can_override_for_mixed_type_dataloaders(): + default_even_batches = True + overridden_even_batches = False + accelerator = create_accelerator(even_batches=default_even_batches) + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) + batch_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + try: + with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): + batch_dl_overridden_value = batch_dl.batch_sampler.even_batches + except AttributeError: + # ensure attribute error is not raised when processing iterable dl + raise AssertionError + + assert batch_dl_overridden_value == overridden_even_batches + assert batch_dl.batch_sampler.even_batches == default_even_batches + + +def test_join_raises_warning_for_iterable_when_overriding_even_batches(): + accelerator = create_accelerator() + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) + + with warnings.catch_warnings(record=True) as w: + with accelerator.join_uneven_inputs([ddp_model], even_batches=False): + pass + + assert issubclass(w[-1].category, UserWarning) + assert "only supported for map-style datasets" in str(w[-1].message) + + +def main(): + accelerator = create_accelerator() + + accelerator.print("Test that even_batches variable ensures uniform batches across processes") + test_default_ensures_even_batch_sizes() + + accelerator.print("Run tests with even_batches disabled") + test_can_disable_even_batches() + + accelerator.print("Test joining uneven inputs") + test_can_join_uneven_inputs() + + accelerator.print("Test overriding even_batches when joining uneven inputs") + test_join_can_override_even_batches() + + accelerator.print("Test overriding even_batches for mixed dataloader types") + test_join_can_override_for_mixed_type_dataloaders() + + accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders") + test_join_raises_warning_for_iterable_when_overriding_even_batches() + + accelerator.print("Test join with non DDP distributed raises warning") + original_state = accelerator.state.distributed_type + accelerator.state.distributed_type = DistributedType.FSDP + test_join_raises_warning_for_non_ddp_distributed(accelerator) + accelerator.state.distributed_type = original_state + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/test_script.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/test_script.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb053a8a2e74667e5c5be1e67603dfd85e62194 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/test_script.py @@ -0,0 +1,336 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from torch.utils.data import DataLoader + +from accelerate import Accelerator +from accelerate.data_loader import prepare_data_loader +from accelerate.state import AcceleratorState +from accelerate.test_utils import RegressionDataset, RegressionModel, are_the_same_tensors +from accelerate.utils import ( + DistributedType, + gather, + is_bf16_available, + is_torch_version, + set_seed, + synchronize_rng_states, +) + + +def init_state_check(): + # Test we can instantiate this twice in a row. + state = AcceleratorState() + if state.local_process_index == 0: + print("Testing, testing. 1, 2, 3.") + print(state) + + +def rng_sync_check(): + state = AcceleratorState() + synchronize_rng_states(["torch"]) + assert are_the_same_tensors(torch.get_rng_state()), "RNG states improperly synchronized on CPU." + if state.distributed_type == DistributedType.MULTI_GPU: + synchronize_rng_states(["cuda"]) + assert are_the_same_tensors(torch.cuda.get_rng_state()), "RNG states improperly synchronized on GPU." + generator = torch.Generator() + synchronize_rng_states(["generator"], generator=generator) + assert are_the_same_tensors(generator.get_state()), "RNG states improperly synchronized in generator." + + if state.local_process_index == 0: + print("All rng are properly synched.") + + +def dl_preparation_check(): + state = AcceleratorState() + length = 32 * state.num_processes + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + + print(state.process_index, result, type(dl)) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + if state.process_index == 0: + print("Non-shuffled dataloader passing.") + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + if state.local_process_index == 0: + print("Shuffled dataloader passing.") + + +def central_dl_preparation_check(): + state = AcceleratorState() + length = 32 * state.num_processes + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader( + dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + dispatch_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + if state.process_index == 0: + print("Non-shuffled central dataloader passing.") + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader( + dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + dispatch_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + if state.local_process_index == 0: + print("Shuffled central dataloader passing.") + + +def mock_training(length, batch_size, generator): + set_seed(42) + generator.manual_seed(42) + train_set = RegressionDataset(length=length) + train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + for epoch in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + loss.backward() + optimizer.step() + return train_set, model + + +def training_check(): + state = AcceleratorState() + generator = torch.Generator() + batch_size = 8 + length = batch_size * 4 * state.num_processes + + train_set, old_model = mock_training(length, batch_size * state.num_processes, generator) + assert are_the_same_tensors(old_model.a), "Did not obtain the same model on both processes." + assert are_the_same_tensors(old_model.b), "Did not obtain the same model on both processes." + + accelerator = Accelerator() + train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for epoch in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + accelerator.print("Training yielded the same results on one CPU or distributed setup with no batch split.") + + accelerator = Accelerator(split_batches=True) + train_dl = DataLoader(train_set, batch_size=batch_size * state.num_processes, shuffle=True, generator=generator) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + accelerator.print("Training yielded the same results on one CPU or distributes setup with batch split.") + + if torch.cuda.is_available(): + # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16 + print("FP16 training check.") + AcceleratorState._reset_state() + accelerator = Accelerator(mixed_precision="fp16") + train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + # BF16 support is only for CPU + TPU, and some GPU + if is_bf16_available(): + # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16 + print("BF16 training check.") + AcceleratorState._reset_state() + accelerator = Accelerator(mixed_precision="bf16") + train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + +def main(): + accelerator = Accelerator() + state = accelerator.state + if state.local_process_index == 0: + print("**Initialization**") + init_state_check() + + if state.local_process_index == 0: + print("\n**Test random number generator synchronization**") + rng_sync_check() + + if state.local_process_index == 0: + print("\n**DataLoader integration test**") + dl_preparation_check() + if state.distributed_type != DistributedType.TPU and is_torch_version(">=", "1.8.0"): + central_dl_preparation_check() + + # Trainings are not exactly the same in DeepSpeed and CPU mode + if state.distributed_type == DistributedType.DEEPSPEED: + return + + if state.local_process_index == 0: + print("\n**Training integration test**") + training_check() + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/test_sync.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/test_sync.py new file mode 100644 index 0000000000000000000000000000000000000000..3e73409c90c0d6a846f0223933efd32e6d5cadfa --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/test_utils/scripts/test_sync.py @@ -0,0 +1,276 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from copy import deepcopy + +import torch +import torch.nn.functional as F +from torch.optim import AdamW +from torch.optim.lr_scheduler import LambdaLR +from torch.utils.data import DataLoader + +from accelerate import Accelerator +from accelerate.test_utils import RegressionDataset, RegressionModel +from accelerate.utils import DistributedType, set_seed + + +def check_model_parameters(model_a, model_b, did_step, iteration): + for param, grad_param in zip(model_a.parameters(), model_b.parameters()): + if not param.requires_grad: + continue + if not did_step: + # Grads should not be in sync + assert ( + torch.allclose(param.grad, grad_param.grad) is False + ), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})" + else: + # Grads should be in sync + assert ( + torch.allclose(param.grad, grad_param.grad) is True + ), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})" + + +def step_model(model, input, target, accelerator, do_backward=True): + model.train() + output = model(input) + loss = F.mse_loss(output, target.to(output.device)) + if not do_backward: + loss /= accelerator.gradient_accumulation_steps + loss.backward() + else: + accelerator.backward(loss) + + +def get_training_setup(accelerator, sched=False): + "Returns everything needed to perform basic training" + set_seed(42) + model = RegressionModel() + ddp_model = deepcopy(model) + dset = RegressionDataset(length=80) + dataloader = DataLoader(dset, batch_size=16) + model.to(accelerator.device) + if sched: + opt = AdamW(params=model.parameters(), lr=1e-3) + ddp_opt = AdamW(params=ddp_model.parameters(), lr=1e-3) + sched = LambdaLR(opt, lr_lambda=lambda epoch: epoch**0.65) + ddp_sched = LambdaLR(ddp_opt, lr_lambda=lambda epoch: epoch**0.65) + # Make a copy of `model` + if sched: + ddp_model, ddp_opt, ddp_sched, dataloader = accelerator.prepare(ddp_model, ddp_opt, ddp_sched, dataloader) + else: + ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) + if sched: + return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) + return model, ddp_model, dataloader + + +def test_noop_sync(accelerator): + # Test when on a single CPU or GPU that the context manager does nothing + model, ddp_model, dataloader = get_training_setup(accelerator) + # Use a single batch + ddp_input, ddp_target = next(iter(dataloader)).values() + for iteration in range(3): + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + # Do "gradient accumulation" (noop) + if iteration % 2 == 0: + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + else: + # Sync grads + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync + check_model_parameters(model, ddp_model, True, iteration) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + assert torch.allclose( + param.grad, ddp_param.grad + ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + + +def test_distributed_sync(accelerator): + # Test on distributed setup that context manager behaves properly + model, ddp_model, dataloader = get_training_setup(accelerator) + # Use a single batch + ddp_input, ddp_target = next(iter(dataloader)).values() + for iteration in range(3): + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + # Do "gradient accumulation" (noop) + if iteration % 2 == 0: + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + else: + # Sync grads + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # DDP model and model should only be in sync when not (iteration % 2 == 0) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + if iteration % 2 == 0: + # Grads should not be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is False + ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + else: + # Grads should be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is True + ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + + +def test_gradient_accumulation(split_batches=False, dispatch_batches=False): + accelerator = Accelerator( + gradient_accumulation_steps=2, split_batches=split_batches, dispatch_batches=dispatch_batches + ) + # Test that context manager behaves properly + model, ddp_model, dataloader = get_training_setup(accelerator) + for iteration, batch in enumerate(dataloader): + ddp_input, ddp_target = batch.values() + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator, False) + # Do "gradient accumulation" (noop) + with accelerator.accumulate(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # DDP model and model should only be in sync when not (iteration % 2 == 0) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + if ((iteration + 1) % 2 == 0) or (iteration == len(dataloader) - 1): + # Grads should be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is True + ), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + else: + # Grads should not be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is False + ), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + + +def test_gradient_accumulation_with_opt_and_scheduler(split_batches=False, dispatch_batches=False): + accelerator = Accelerator( + gradient_accumulation_steps=2, split_batches=split_batches, dispatch_batches=dispatch_batches + ) + # Test that context manager behaves properly + model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched = get_training_setup(accelerator, True) + for iteration, batch in enumerate(dataloader): + ddp_input, ddp_target = batch.values() + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + model.train() + ddp_model.train() + step_model(model, input, target, accelerator, False) + opt.step() + + if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(dataloader)): + if split_batches: + sched.step() + else: + for _ in range(accelerator.num_processes): + sched.step() + opt.zero_grad() + # Perform gradient accumulation under wrapper + with accelerator.accumulate(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + ddp_opt.step() + ddp_sched.step() + ddp_opt.zero_grad() + + # Learning rates should be the same + assert ( + opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] + ), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' + did_step = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(dataloader)) + if accelerator.num_processes > 1: + check_model_parameters(model, ddp_model, did_step, iteration) + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + + +def main(): + accelerator = Accelerator() + state = accelerator.state + if state.distributed_type == DistributedType.NO: + if state.local_process_index == 0: + print("**Test NOOP `no_sync` context manager**") + test_noop_sync(accelerator) + if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): + if state.local_process_index == 0: + print("**Test Distributed `no_sync` context manager**") + test_distributed_sync(accelerator) + if state.distributed_type == DistributedType.MULTI_GPU: + for split_batch in [True, False]: + for dispatch_batches in [True, False]: + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation, ", + f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**", + ) + test_gradient_accumulation(split_batch, dispatch_batches) + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", + "`split_batches=False`, `dispatch_batches=False`**", + ) + test_gradient_accumulation_with_opt_and_scheduler() + if state.distributed_type == DistributedType.MULTI_GPU: + for split_batch in [True, False]: + for dispatch_batches in [True, False]: + if not split_batch and not dispatch_batches: + continue + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", + f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**", + ) + test_gradient_accumulation_with_opt_and_scheduler(split_batch, dispatch_batches) + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/testing.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..3a681bf32b992f3c51be68b4a32d8cab628d6d5d --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/test_utils/testing.py @@ -0,0 +1,363 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import os +import shutil +import subprocess +import sys +import tempfile +import unittest +from distutils.util import strtobool +from functools import partial +from pathlib import Path +from typing import List, Union +from unittest import mock + +import torch + +from ..state import AcceleratorState +from ..utils import ( + gather, + is_comet_ml_available, + is_datasets_available, + is_deepspeed_available, + is_tensorboard_available, + is_torch_version, + is_tpu_available, + is_transformers_available, + is_wandb_available, +) + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = strtobool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) + + +def skip(test_case): + "Decorator that skips a test unconditionally" + return unittest.skip("Test was skipped")(test_case) + + +def slow(test_case): + """ + Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a + truthy value to run them. + """ + return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) + + +def require_cpu(test_case): + """ + Decorator marking a test that must be only ran on the CPU. These tests are skipped when a GPU is available. + """ + return unittest.skipUnless(not torch.cuda.is_available(), "test requires only a CPU")(test_case) + + +def require_cuda(test_case): + """ + Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available. + """ + return unittest.skipUnless(torch.cuda.is_available(), "test requires a GPU")(test_case) + + +def require_mps(test_case): + """ + Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn't support `mps` + backend. + """ + is_mps_supported = hasattr(torch.backends, "mps") and torch.backends.mps.is_available() + return unittest.skipUnless(is_mps_supported, "test requires a `mps` backend support in `torch`")(test_case) + + +def require_huggingface_suite(test_case): + """ + Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not. + """ + return unittest.skipUnless( + is_transformers_available() and is_datasets_available(), "test requires the Hugging Face suite" + )(test_case) + + +def require_tpu(test_case): + """ + Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available. + """ + return unittest.skipUnless(is_tpu_available(), "test requires TPU")(test_case) + + +def require_single_gpu(test_case): + """ + Decorator marking a test that requires CUDA on a single GPU. These tests are skipped when there are no GPU + available or number of GPUs is more than one. + """ + return unittest.skipUnless(torch.cuda.device_count() == 1, "test requires a GPU")(test_case) + + +def require_multi_gpu(test_case): + """ + Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple + GPUs. + """ + return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case) + + +def require_deepspeed(test_case): + """ + Decorator marking a test that requires DeepSpeed installed. These tests are skipped when DeepSpeed isn't installed + """ + return unittest.skipUnless(is_deepspeed_available(), "test requires DeepSpeed")(test_case) + + +def require_fsdp(test_case): + """ + Decorator marking a test that requires FSDP installed. These tests are skipped when FSDP isn't installed + """ + return unittest.skipUnless(is_torch_version(">=", "1.12.0"), "test requires torch version >= 1.12.0")(test_case) + + +def require_torch_min_version(test_case=None, version=None): + """ + Decorator marking that a test requires a particular torch version to be tested. These tests are skipped when an + installed torch version is less than the required one. + """ + if test_case is None: + return partial(require_torch_min_version, version=version) + return unittest.skipUnless(is_torch_version(">=", version), f"test requires torch version >= {version}")(test_case) + + +def require_tensorboard(test_case): + """ + Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't + installed + """ + return unittest.skipUnless(is_tensorboard_available(), "test requires Tensorboard")(test_case) + + +def require_wandb(test_case): + """ + Decorator marking a test that requires wandb installed. These tests are skipped when wandb isn't installed + """ + return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case) + + +def require_comet_ml(test_case): + """ + Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed + """ + return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case) + + +_atleast_one_tracker_available = ( + any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() +) + + +def require_trackers(test_case): + """ + Decorator marking that a test requires at least one tracking library installed. These tests are skipped when none + are installed + """ + return unittest.skipUnless( + _atleast_one_tracker_available, + "test requires at least one tracker to be available and for `comet_ml` to not be installed", + )(test_case) + + +class TempDirTestCase(unittest.TestCase): + """ + A TestCase class that keeps a single `tempfile.TemporaryDirectory` open for the duration of the class, wipes its + data at the start of a test, and then destroyes it at the end of the TestCase. + + Useful for when a class or API requires a single constant folder throughout it's use, such as Weights and Biases + + The temporary directory location will be stored in `self.tmpdir` + """ + + clear_on_setup = True + + @classmethod + def setUpClass(cls): + "Creates a `tempfile.TemporaryDirectory` and stores it in `cls.tmpdir`" + cls.tmpdir = tempfile.mkdtemp() + + @classmethod + def tearDownClass(cls): + "Remove `cls.tmpdir` after test suite has finished" + if os.path.exists(cls.tmpdir): + shutil.rmtree(cls.tmpdir) + + def setUp(self): + "Destroy all contents in `self.tmpdir`, but not `self.tmpdir`" + if self.clear_on_setup: + for path in Path(self.tmpdir).glob("**/*"): + if path.is_file(): + path.unlink() + elif path.is_dir(): + shutil.rmtree(path) + + +class MockingTestCase(unittest.TestCase): + """ + A TestCase class designed to dynamically add various mockers that should be used in every test, mimicking the + behavior of a class-wide mock when defining one normally will not do. + + Useful when a mock requires specific information available only initialized after `TestCase.setUpClass`, such as + setting an environment variable with that information. + + The `add_mocks` function should be ran at the end of a `TestCase`'s `setUp` function, after a call to + `super().setUp()` such as: + ```python + def setUp(self): + super().setUp() + mocks = mock.patch.dict(os.environ, {"SOME_ENV_VAR", "SOME_VALUE"}) + self.add_mocks(mocks) + ``` + """ + + def add_mocks(self, mocks: Union[mock.Mock, List[mock.Mock]]): + """ + Add custom mocks for tests that should be repeated on each test. Should be called during + `MockingTestCase.setUp`, after `super().setUp()`. + + Args: + mocks (`mock.Mock` or list of `mock.Mock`): + Mocks that should be added to the `TestCase` after `TestCase.setUpClass` has been run + """ + self.mocks = mocks if isinstance(mocks, (tuple, list)) else [mocks] + for m in self.mocks: + m.start() + self.addCleanup(m.stop) + + +def are_the_same_tensors(tensor): + state = AcceleratorState() + tensor = tensor[None].clone().to(state.device) + tensors = gather(tensor).cpu() + tensor = tensor[0].cpu() + for i in range(tensors.shape[0]): + if not torch.equal(tensors[i], tensor): + return False + return True + + +class _RunOutput: + def __init__(self, returncode, stdout, stderr): + self.returncode = returncode + self.stdout = stdout + self.stderr = stderr + + +async def _read_stream(stream, callback): + while True: + line = await stream.readline() + if line: + callback(line) + else: + break + + +async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput: + if echo: + print("\nRunning: ", " ".join(cmd)) + + p = await asyncio.create_subprocess_exec( + cmd[0], + *cmd[1:], + stdin=stdin, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + env=env, + ) + + # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe + # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait + # + # If it starts hanging, will need to switch to the following code. The problem is that no data + # will be seen until it's done and if it hangs for example there will be no debug info. + # out, err = await p.communicate() + # return _RunOutput(p.returncode, out, err) + + out = [] + err = [] + + def tee(line, sink, pipe, label=""): + line = line.decode("utf-8").rstrip() + sink.append(line) + if not quiet: + print(label, line, file=pipe) + + # XXX: the timeout doesn't seem to make any difference here + await asyncio.wait( + [ + _read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")), + _read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")), + ], + timeout=timeout, + ) + return _RunOutput(await p.wait(), out, err) + + +def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput: + + loop = asyncio.get_event_loop() + result = loop.run_until_complete( + _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo) + ) + + cmd_str = " ".join(cmd) + if result.returncode > 0: + stderr = "\n".join(result.stderr) + raise RuntimeError( + f"'{cmd_str}' failed with returncode {result.returncode}\n\n" + f"The combined stderr from workers follows:\n{stderr}" + ) + + return result + + +class SubprocessCallException(Exception): + pass + + +def run_command(command: List[str], return_stdout=False): + """ + Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture + if an error occured while running `command` + """ + try: + output = subprocess.check_output(command, stderr=subprocess.STDOUT) + if return_stdout: + if hasattr(output, "decode"): + output = output.decode("utf-8") + return output + except subprocess.CalledProcessError as e: + raise SubprocessCallException( + f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}" + ) from e diff --git a/testbed/huggingface__accelerate/src/accelerate/test_utils/training.py b/testbed/huggingface__accelerate/src/accelerate/test_utils/training.py new file mode 100644 index 0000000000000000000000000000000000000000..7345b93cb819a1262df45339fafd84ebd56be56c --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/test_utils/training.py @@ -0,0 +1,88 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import torch +from torch.utils.data import DataLoader + +from accelerate.utils.dataclasses import DistributedType + + +class RegressionDataset: + def __init__(self, a=2, b=3, length=64, seed=None): + if seed is not None: + np.random.seed(seed) + self.length = length + self.x = np.random.normal(size=(length,)).astype(np.float32) + self.y = a * self.x + b + np.random.normal(scale=0.1, size=(length,)).astype(np.float32) + + def __len__(self): + return self.length + + def __getitem__(self, i): + return {"x": self.x[i], "y": self.y[i]} + + +class RegressionModel(torch.nn.Module): + def __init__(self, a=0, b=0, double_output=False): + super().__init__() + self.a = torch.nn.Parameter(torch.tensor(a).float()) + self.b = torch.nn.Parameter(torch.tensor(b).float()) + self.first_batch = True + + def forward(self, x=None): + if self.first_batch: + print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}") + self.first_batch = False + return x * self.a + self.b + + +def mocked_dataloaders(accelerator, batch_size: int = 16): + from datasets import load_dataset + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + data_files = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"} + datasets = load_dataset("csv", data_files=data_files) + label_list = datasets["train"].unique("label") + + label_to_id = {v: i for i, v in enumerate(label_list)} + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer( + examples["sentence1"], examples["sentence2"], truncation=True, max_length=None, padding="max_length" + ) + if "label" in examples: + outputs["labels"] = [label_to_id[l] for l in examples["label"]] + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["sentence1", "sentence2", "label"], + ) + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=2) + eval_dataloader = DataLoader(tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=1) + + return train_dataloader, eval_dataloader diff --git a/testbed/huggingface__accelerate/src/accelerate/tracking.py b/testbed/huggingface__accelerate/src/accelerate/tracking.py new file mode 100644 index 0000000000000000000000000000000000000000..379d6454ed95ce8651294f170382cd58484abfca --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/tracking.py @@ -0,0 +1,604 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Expectation: +# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`} + +import json +import os +import time +from abc import ABCMeta, abstractmethod, abstractproperty +from typing import Any, Dict, List, Optional, Union + +import yaml + +from .logging import get_logger +from .utils import ( + LoggerType, + is_aim_available, + is_comet_ml_available, + is_mlflow_available, + is_tensorboard_available, + is_wandb_available, +) + + +_available_trackers = [] + +if is_tensorboard_available(): + try: + from torch.utils import tensorboard + except ModuleNotFoundError: + import tensorboardX as tensorboard + + _available_trackers.append(LoggerType.TENSORBOARD) + +if is_wandb_available(): + import wandb + + _available_trackers.append(LoggerType.WANDB) + +if is_comet_ml_available(): + from comet_ml import Experiment + + _available_trackers.append(LoggerType.COMETML) + +if is_aim_available(): + from aim import Run + + _available_trackers.append(LoggerType.AIM) + +if is_mlflow_available(): + import mlflow + + _available_trackers.append(LoggerType.MLFLOW) + +logger = get_logger(__name__) + + +def get_available_trackers(): + "Returns a list of all supported available trackers in the system" + return _available_trackers + + +class GeneralTracker(object, metaclass=ABCMeta): + """ + A base Tracker class to be used for all logging integration implementations. + + Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to + [`Accelerator`] + """ + + @abstractproperty + def name(self): + "String representation of the python class name" + pass + + @abstractproperty + def requires_logging_directory(self): + """ + Whether the logger requires a directory to store their logs. Should either return `True` or `False`. + """ + pass + + @abstractmethod + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration + functionality of a tracking API. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + pass + + @abstractmethod + def log(self, values: dict, step: Optional[int], **kwargs): + """ + Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with + special behavior for the `step parameter. + + Args: + values (Dictionary `str` to `str`, `float`, or `int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + """ + pass + + def finish(self): + """ + Should run any finalizing functions within the tracking API. If the API should not have one, just don't + overwrite that method. + """ + pass + + @abstractproperty + def tracker(self): + """ + Should return internal tracking mechanism used by a tracker class (such as the `run` for wandb) + """ + pass + + +class TensorBoardTracker(GeneralTracker): + """ + A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run + logging_dir (`str`, `os.PathLike`): + Location for TensorBoard logs to be stored. + kwargs: + Additional key word arguments passed along to the `tensorboard.SummaryWriter.__init__` method. + """ + + name = "tensorboard" + requires_logging_directory = True + + def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = None, **kwargs): + self.run_name = run_name + self.logging_dir = os.path.join(logging_dir, run_name) + self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs) + logger.debug(f"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.writer + + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the + hyperparameters in a yaml file for future use. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + self.writer.add_hparams(values, metric_dict={}) + self.writer.flush() + project_run_name = time.time() + dir_name = os.path.join(self.logging_dir, str(project_run_name)) + os.makedirs(dir_name, exist_ok=True) + with open(os.path.join(dir_name, "hparams.yml"), "w") as outfile: + try: + yaml.dump(values, outfile) + except yaml.representer.RepresenterError: + logger.error("Serialization to store hyperparameters failed") + raise + logger.debug("Stored initial configuration hyperparameters to TensorBoard and hparams yaml file") + + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of + `str` to `float`/`int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to either `SummaryWriter.add_scaler`, + `SummaryWriter.add_text`, or `SummaryWriter.add_scalers` method based on the contents of `values`. + """ + for k, v in values.items(): + if isinstance(v, (int, float)): + self.writer.add_scalar(k, v, global_step=step, **kwargs) + elif isinstance(v, str): + self.writer.add_text(k, v, global_step=step, **kwargs) + elif isinstance(v, dict): + self.writer.add_scalars(k, v, global_step=step, **kwargs) + self.writer.flush() + logger.debug("Successfully logged to TensorBoard") + + def finish(self): + """ + Closes `TensorBoard` writer + """ + self.writer.close() + logger.debug("TensorBoard writer closed") + + +class WandBTracker(GeneralTracker): + """ + A `Tracker` class that supports `wandb`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run. + kwargs: + Additional key word arguments passed along to the `wandb.init` method. + """ + + name = "wandb" + requires_logging_directory = False + + def __init__(self, run_name: str, **kwargs): + self.run_name = run_name + self.run = wandb.init(project=self.run_name, **kwargs) + logger.debug(f"Initialized WandB project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.run + + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + wandb.config.update(values) + logger.debug("Stored initial configuration hyperparameters to WandB") + + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of + `str` to `float`/`int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `wandb.log` method. + """ + self.run.log(values, step=step, **kwargs) + logger.debug("Successfully logged to WandB") + + def finish(self): + """ + Closes `wandb` writer + """ + self.run.finish() + logger.debug("WandB run closed") + + +class CometMLTracker(GeneralTracker): + """ + A `Tracker` class that supports `comet_ml`. Should be initialized at the start of your script. + + API keys must be stored in a Comet config file. + + Args: + run_name (`str`): + The name of the experiment run. + kwargs: + Additional key word arguments passed along to the `Experiment.__init__` method. + """ + + name = "comet_ml" + requires_logging_directory = False + + def __init__(self, run_name: str, **kwargs): + self.run_name = run_name + self.writer = Experiment(project_name=run_name, **kwargs) + logger.debug(f"Initialized CometML project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.writer + + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + self.writer.log_parameters(values) + logger.debug("Stored initial configuration hyperparameters to CometML") + + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of + `str` to `float`/`int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to either `Experiment.log_metric`, `Experiment.log_other`, + or `Experiment.log_metrics` method based on the contents of `values`. + """ + if step is not None: + self.writer.set_step(step) + for k, v in values.items(): + if isinstance(v, (int, float)): + self.writer.log_metric(k, v, step=step, **kwargs) + elif isinstance(v, str): + self.writer.log_other(k, v, **kwargs) + elif isinstance(v, dict): + self.writer.log_metrics(v, step=step, **kwargs) + logger.debug("Successfully logged to CometML") + + def finish(self): + """ + Closes `comet-ml` writer + """ + self.writer.end() + logger.debug("CometML run closed") + + +class AimTracker(GeneralTracker): + """ + A `Tracker` class that supports `aim`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run. + kwargs: + Additional key word arguments passed along to the `Run.__init__` method. + """ + + name = "aim" + requires_logging_directory = True + + def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs): + self.run_name = run_name + self.writer = Run(repo=logging_dir, **kwargs) + self.writer.name = self.run_name + logger.debug(f"Initialized Aim project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.writer + + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (`dict`): + Values to be stored as initial hyperparameters as key-value pairs. + """ + self.writer["hparams"] = values + + def log(self, values: dict, step: Optional[int], **kwargs): + """ + Logs `values` to the current run. + + Args: + values (`dict`): + Values to be logged as key-value pairs. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `Run.track` method. + """ + # Note: replace this with the dictionary support when merged + for key, value in values.items(): + self.writer.track(value, name=key, step=step, **kwargs) + + def finish(self): + """ + Closes `aim` writer + """ + self.writer.close() + + +class MLflowTracker(GeneralTracker): + """ + A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script. + + Args: + experiment_name (`str`, *optional*): + Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument. + logging_dir (`str` or `os.PathLike`, defaults to `"."`): + Location for mlflow logs to be stored. + run_id (`str`, *optional*): + If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s + end time is unset and its status is set to running, but the run’s other attributes (source_version, + source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument. + tags (`Dict[str, str]`, *optional*): + An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a + run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are + set on the new run. Environment variable MLFLOW_TAGS has priority over this argument. + nested_run (`bool`, *optional*, defaults to `False`): + Controls whether run is nested in parent run. True creates a nested run. Environment variable + MLFLOW_NESTED_RUN has priority over this argument. + run_name (`str`, *optional*): + Name of new run (stored as a mlflow.runName tag). Used only when `run_id` is unspecified. + description (`str`, *optional*): + An optional string that populates the description box of the run. If a run is being resumed, the + description is set on the resumed run. If a new run is being created, the description is set on the new + run. + """ + + name = "mlflow" + requires_logging_directory = True + + def __init__( + self, + experiment_name: str = None, + logging_dir: Optional[Union[str, os.PathLike]] = ".", + run_id: Optional[str] = None, + tags: Optional[Union[Dict[str, Any], str]] = None, + nested_run: Optional[bool] = False, + run_name: Optional[str] = None, + description: Optional[str] = None, + ): + experiment_name = os.getenv("MLFLOW_EXPERIMENT_NAME", experiment_name) + run_id = os.getenv("MLFLOW_RUN_ID", run_id) + tags = os.getenv("MLFLOW_TAGS", tags) + if isinstance(tags, str): + tags = json.loads(tags) + + nested_run = os.getenv("MLFLOW_NESTED_RUN", nested_run) + + experiment_id = mlflow.create_experiment( + name=experiment_name, + artifact_location=logging_dir, + tags=tags, + ) + + self.active_run = mlflow.start_run( + run_id=run_id, + experiment_id=experiment_id, + run_name=run_name, + nested=nested_run, + tags=tags, + description=description, + ) + + logger.debug(f"Initialized mlflow experiment {experiment_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.active_run + + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (`dict`): + Values to be stored as initial hyperparameters as key-value pairs. + """ + + for name, value in list(values.items()): + # internally, all values are converted to str in MLflow + if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH: + logger.warning( + f'Trainer is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow\'s' + f" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute." + ) + del values[name] + + values_list = list(values.items()) + + # MLflow cannot log more than 100 values in one go, so we have to split it + for i in range(0, len(values_list), mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH): + mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH])) + + logger.debug("Stored initial configuration hyperparameters to MLflow") + + def log(self, values: dict, step: Optional[int]): + """ + Logs `values` to the current run. + + Args: + values (`dict`): + Values to be logged as key-value pairs. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + """ + metrics = {} + for k, v in values.items(): + if isinstance(v, (int, float)): + metrics[k] = v + else: + logger.warning( + f'MLflowTracker is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. ' + "MLflow's log_metric() only accepts float and int types so we dropped this attribute." + ) + + mlflow.log_metrics(metrics, step=step) + logger.debug("Successfully logged to mlflow") + + def finish(self): + """ + End the active MLflow run. + """ + mlflow.end_run() + + +LOGGER_TYPE_TO_CLASS = { + "aim": AimTracker, + "comet_ml": CometMLTracker, + "mlflow": MLflowTracker, + "tensorboard": TensorBoardTracker, + "wandb": WandBTracker, +} + + +def filter_trackers( + log_with: List[Union[str, LoggerType, GeneralTracker]], logging_dir: Union[str, os.PathLike] = None +): + """ + Takes in a list of potential tracker types and checks that: + - The tracker wanted is available in that environment + - Filters out repeats of tracker types + - If `all` is in `log_with`, will return all trackers in the environment + - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None` + + Args: + log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*): + A list of loggers to be setup for experiment tracking. Should be one or several of: + + - `"all"` + - `"tensorboard"` + - `"wandb"` + - `"comet_ml"` + - `"mlflow"` + If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can + also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`. + logging_dir (`str`, `os.PathLike`, *optional*): + A path to a directory for storing logs of locally-compatible loggers. + """ + loggers = [] + if log_with is not None: + if not isinstance(log_with, (list, tuple)): + log_with = [log_with] + if "all" in log_with or LoggerType.ALL in log_with: + loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers() + else: + for log_type in log_with: + if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker): + raise ValueError(f"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}") + if issubclass(type(log_type), GeneralTracker): + loggers.append(log_type) + else: + log_type = LoggerType(log_type) + if log_type not in loggers: + if log_type in get_available_trackers(): + tracker_init = LOGGER_TYPE_TO_CLASS[str(log_type)] + if getattr(tracker_init, "requires_logging_directory"): + if logging_dir is None: + raise ValueError( + f"Logging with `{log_type}` requires a `logging_dir` to be passed in." + ) + loggers.append(log_type) + else: + logger.debug(f"Tried adding logger {log_type}, but package is unavailable in the system.") + + return loggers diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/__init__.py b/testbed/huggingface__accelerate/src/accelerate/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a8a579570199a655d5ef3252c5e5bf9c8feae68b --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/__init__.py @@ -0,0 +1,134 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all + +from .constants import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS +from .dataclasses import ( + ComputeEnvironment, + DeepSpeedPlugin, + DistributedDataParallelKwargs, + DistributedType, + DynamoBackend, + FullyShardedDataParallelPlugin, + GradScalerKwargs, + InitProcessGroupKwargs, + KwargsHandler, + LoggerType, + MegatronLMPlugin, + PrecisionType, + ProjectConfiguration, + RNGType, + SageMakerDistributedType, + TensorInformation, +) +from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env +from .imports import ( + get_ccl_version, + is_aim_available, + is_apex_available, + is_bf16_available, + is_boto3_available, + is_ccl_available, + is_comet_ml_available, + is_datasets_available, + is_deepspeed_available, + is_megatron_lm_available, + is_mlflow_available, + is_rich_available, + is_safetensors_available, + is_sagemaker_available, + is_tensorboard_available, + is_tpu_available, + is_transformers_available, + is_wandb_available, +) +from .modeling import ( + check_device_map, + compute_module_sizes, + convert_file_size_to_int, + dtype_byte_size, + find_tied_parameters, + get_balanced_memory, + get_max_layer_size, + get_max_memory, + infer_auto_device_map, + load_checkpoint_in_model, + load_offloaded_weights, + named_module_tensors, + retie_parameters, + set_module_tensor_to_device, +) +from .offload import ( + OffloadedWeightsLoader, + PrefixedDataset, + extract_submodules_state_dict, + load_offloaded_weight, + offload_state_dict, + offload_weight, + save_offload_index, +) +from .operations import ( + broadcast, + broadcast_object_list, + concatenate, + convert_outputs_to_fp32, + convert_to_fp32, + find_batch_size, + find_device, + gather, + gather_object, + get_data_structure, + honor_type, + initialize_tensors, + is_tensor_information, + is_torch_tensor, + pad_across_processes, + recursively_apply, + reduce, + send_to_device, + slice_tensors, +) +from .versions import compare_versions, is_torch_version + + +if is_deepspeed_available(): + from .deepspeed import ( + DeepSpeedEngineWrapper, + DeepSpeedOptimizerWrapper, + DeepSpeedSchedulerWrapper, + DummyOptim, + DummyScheduler, + HfDeepSpeedConfig, + ) + +from .launch import PrepareForLaunch, _filter_args, get_launch_prefix +from .megatron_lm import ( + AbstractTrainStep, + BertTrainStep, + GPTTrainStep, + MegatronEngine, + MegatronLMDummyDataLoader, + MegatronLMDummyScheduler, + MegatronLMOptimizerWrapper, + MegatronLMSchedulerWrapper, + T5TrainStep, + avg_losses_across_data_parallel_group, + gather_across_data_parallel_groups, +) +from .megatron_lm import initialize as megatron_lm_initialize +from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader +from .megatron_lm import prepare_model as megatron_lm_prepare_model +from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer +from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler +from .memory import find_executable_batch_size, release_memory +from .other import ( + extract_model_from_parallel, + get_pretty_name, + patch_environment, + save, + wait_for_everyone, + write_basic_config, +) +from .random import set_seed, synchronize_rng_state, synchronize_rng_states +from .torch_xla import install_xla +from .tqdm import tqdm diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/constants.py b/testbed/huggingface__accelerate/src/accelerate/utils/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..f31027ee11de8c64397f8dad4c5e0ec9b148d32a --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/constants.py @@ -0,0 +1,62 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import operator as op + + +SCALER_NAME = "scaler.pt" +MODEL_NAME = "pytorch_model" +RNG_STATE_NAME = "random_states" +OPTIMIZER_NAME = "optimizer" +SCHEDULER_NAME = "scheduler" +SAGEMAKER_PYTORCH_VERSION = "1.10.2" +SAGEMAKER_PYTHON_VERSION = "py38" +SAGEMAKER_TRANSFORMERS_VERSION = "4.17.0" +SAGEMAKER_PARALLEL_EC2_INSTANCES = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"] +FSDP_SHARDING_STRATEGY = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD"] +FSDP_AUTO_WRAP_POLICY = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"] +FSDP_BACKWARD_PREFETCH = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"] +FSDP_STATE_DICT_TYPE = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"] +DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich"] + +STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt} + +# These are the args for `torch.distributed.launch` for pytorch < 1.9 +TORCH_LAUNCH_PARAMS = [ + "nnodes", + "nproc_per_node", + "rdzv_backend", + "rdzv_endpoint", + "rdzv_id", + "rdzv_conf", + "standalone", + "max_restarts", + "monitor_interval", + "start_method", + "role", + "module", + "m", + "no_python", + "run_path", + "log_dir", + "r", + "redirects", + "t", + "tee", + "node_rank", + "master_addr", + "master_port", +] + +CUDA_DISTRIBUTED_TYPES = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"] diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/dataclasses.py b/testbed/huggingface__accelerate/src/accelerate/utils/dataclasses.py new file mode 100644 index 0000000000000000000000000000000000000000..01f174a10bbd2cf02896939dc55bcd3787cd80f2 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/dataclasses.py @@ -0,0 +1,1226 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +General namespace and dataclass related classes +""" + +import argparse +import copy +import enum +import functools +import os +import typing +import warnings +from contextlib import contextmanager +from dataclasses import dataclass, field +from datetime import timedelta +from distutils.util import strtobool +from typing import Any, Callable, Dict, Iterable, List, Optional + +import torch + +from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_STATE_DICT_TYPE, MODEL_NAME, OPTIMIZER_NAME +from .versions import is_torch_version + + +class KwargsHandler: + """ + Internal mixin that implements a `to_kwargs()` method for a dataclass. + """ + + def to_dict(self): + return copy.deepcopy(self.__dict__) + + def to_kwargs(self): + """ + Returns a dictionary containing the attributes with values different from the default of this class. + """ + default_dict = self.__class__().to_dict() + this_dict = self.to_dict() + return {k: v for k, v in this_dict.items() if default_dict[k] != v} + + +@dataclass +class DistributedDataParallelKwargs(KwargsHandler): + """ + Use this object in your [`Accelerator`] to customize how your model is wrapped in a + `torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this + [wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more + information on each argument. + + + + `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions. + + `static_graph` is only available in PyTorch 1.11.0 and later versions. + + + + Example: + + ```python + from accelerate import Accelerator + from accelerate.utils import DistributedDataParallelKwargs + + kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) + accelerator = Accelerator(kwargs_handlers=[kwargs]) + ``` + """ + + dim: int = 0 + broadcast_buffers: bool = True + bucket_cap_mb: int = 25 + find_unused_parameters: bool = False + check_reduction: bool = False + gradient_as_bucket_view: bool = False + static_graph: bool = False + + +@dataclass +class GradScalerKwargs(KwargsHandler): + """ + Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the + `torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this + [scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument. + + + + `GradScaler` is only available in PyTorch 1.5.0 and later versions. + + + + Example: + + ```python + from accelerate import Accelerator + from accelerate.utils import GradScalerKwargs + + kwargs = GradScalerKwargs(backoff_filter=0.25) + accelerator = Accelerator(kwargs_handlers=[kwargs]) + ``` + """ + + init_scale: float = 65536.0 + growth_factor: float = 2.0 + backoff_factor: float = 0.5 + growth_interval: int = 2000 + enabled: bool = True + + +@dataclass +class InitProcessGroupKwargs(KwargsHandler): + """ + Use this object in your [`Accelerator`] to customize the initialization of the distributed processes. Please refer + to the documentation of this + [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more + information on each argument. + + ```python + from datetime import timedelta + from accelerate import Accelerator + from accelerate.utils import InitProcessGroupKwargs + + kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800)) + accelerator = Accelerator(kwargs_handlers=[kwargs]) + ``` + """ + + init_method: Optional[str] = None + timeout: timedelta = timedelta(seconds=1800) + + +class DistributedType(str, enum.Enum): + """ + Represents a type of distributed environment. + + Values: + + - **NO** -- Not a distributed environment, just a single process. + - **MULTI_CPU** -- Distributed on multiple CPU nodes. + - **MULTI_GPU** -- Distributed on multiple GPUs. + - **DEEPSPEED** -- Using DeepSpeed. + - **TPU** -- Distributed on TPUs. + """ + + # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box. + NO = "NO" + MULTI_CPU = "MULTI_CPU" + MULTI_GPU = "MULTI_GPU" + DEEPSPEED = "DEEPSPEED" + FSDP = "FSDP" + TPU = "TPU" + MPS = "MPS" + MEGATRON_LM = "MEGATRON_LM" + + +class SageMakerDistributedType(str, enum.Enum): + """ + Represents a type of distributed environment. + + Values: + + - **NO** -- Not a distributed environment, just a single process. + - **DATA_PARALLEL** -- using sagemaker distributed data parallelism. + - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism. + """ + + # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box. + NO = "NO" + DATA_PARALLEL = "DATA_PARALLEL" + MODEL_PARALLEL = "MODEL_PARALLEL" + + +class ComputeEnvironment(str, enum.Enum): + """ + Represents a type of the compute environment. + + Values: + + - **LOCAL_MACHINE** -- private/custom cluster hardware. + - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment. + """ + + # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box. + LOCAL_MACHINE = "LOCAL_MACHINE" + AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER" + + +class DynamoBackend(str, enum.Enum): + """ + Represents a dynamo backend (see https://github.com/pytorch/torchdynamo). + + Values: + + - **NO** -- Do not use torch dynamo. + - **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo + issues. + - **AOT_EAGER** -- Uses AotAutograd with no compiler, i.e, just using PyTorch eager for the AotAutograd's + extracted forward and backward graphs. This is useful for debugging, and unlikely to give speedups. + - **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton + kernels. [Read + more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747) + - **NVFUSER** -- nvFuser with TorchScript. [Read + more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593) + - **AOT_NVFUSER** -- nvFuser with AotAutograd. [Read + more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593) + - **AOT_CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read + more](https://github.com/pytorch/torchdynamo/pull/757) + - **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read + more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html) + - **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read + more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst) + - **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/) + - **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read + more](https://github.com/intel/intel-extension-for-pytorch). + + """ + + # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box. + NO = "NO" + EAGER = "EAGER" + AOT_EAGER = "AOT_EAGER" + INDUCTOR = "INDUCTOR" + NVFUSER = "NVFUSER" + AOT_NVFUSER = "AOT_NVFUSER" + AOT_CUDAGRAPHS = "AOT_CUDAGRAPHS" + OFI = "OFI" + FX2TRT = "FX2TRT" + ONNXRT = "ONNXRT" + IPEX = "IPEX" + + +class EnumWithContains(enum.EnumMeta): + "A metaclass that adds the ability to check if `self` contains an item with the `in` operator" + + def __contains__(cls, item): + try: + cls(item) + except ValueError: + return False + return True + + +class BaseEnum(enum.Enum, metaclass=EnumWithContains): + "An enum class that can get the value of an item with `str(Enum.key)`" + + def __str__(self): + return self.value + + @classmethod + def list(cls): + "Method to list all the possible items in `cls`" + return list(map(str, cls)) + + +class LoggerType(BaseEnum): + """Represents a type of supported experiment tracker + + Values: + + - **ALL** -- all available trackers in the environment that are supported + - **TENSORBOARD** -- TensorBoard as an experiment tracker + - **WANDB** -- wandb as an experiment tracker + - **COMETML** -- comet_ml as an experiment tracker + """ + + ALL = "all" + AIM = "aim" + TENSORBOARD = "tensorboard" + WANDB = "wandb" + COMETML = "comet_ml" + MLFLOW = "mlflow" + + +class PrecisionType(BaseEnum): + """Represents a type of precision used on floating point values + + Values: + + - **NO** -- using full precision (FP32) + - **FP16** -- using half precision + - **BF16** -- using brain floating point precision + """ + + NO = "no" + FP16 = "fp16" + BF16 = "bf16" + + +class RNGType(BaseEnum): + TORCH = "torch" + CUDA = "cuda" + XLA = "xla" + GENERATOR = "generator" + + +# data classes + + +@dataclass +class TensorInformation: + shape: torch.Size + dtype: torch.dtype + + +@dataclass +class ProjectConfiguration: + """ + Configuration for the Accelerator object based on inner-project needs. + """ + + project_dir: str = field(default=None, metadata={"help": "A path to a directory for storing data."}) + logging_dir: str = field( + default=None, + metadata={ + "help": "A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`." + }, + ) + automatic_checkpoint_naming: bool = field( + default=False, + metadata={"help": "Whether saved states should be automatically iteratively named."}, + ) + + total_limit: int = field( + default=None, + metadata={"help": "The maximum number of total saved states to keep."}, + ) + + iteration: int = field( + default=0, + metadata={"help": "The current save iteration."}, + ) + + def __post_init__(self): + if self.logging_dir is None: + self.logging_dir = self.project_dir + + +@dataclass +class DeepSpeedPlugin: + """ + This plugin is used to integrate DeepSpeed. + """ + + hf_ds_config: Any = field( + default=None, + metadata={ + "help": "path to DeepSpeed config file or dict or an object of class `accelerate.utils.deepspeed.HfDeepSpeedConfig`." + }, + ) + gradient_accumulation_steps: int = field( + default=None, metadata={"help": "Number of steps to accumulate gradients before updating optimizer states"} + ) + gradient_clipping: float = field(default=None, metadata={"help": "Enable gradient clipping with value"}) + zero_stage: int = field( + default=None, + metadata={"help": "Possible options are 0,1,2,3; Default will be taken from environment variable"}, + ) + is_train_batch_min: str = field( + default=True, + metadata={"help": "If both train & eval dataloaders are specified, this will decide the train_batch_size"}, + ) + offload_optimizer_device: bool = field( + default=None, + metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stages 2 and 3."}, + ) + offload_param_device: bool = field( + default=None, + metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stage 3."}, + ) + zero3_init_flag: bool = field( + default=None, + metadata={ + "help": "Flag to indicate whether to enable `deepspeed.zero.Init` for constructing massive models." + "Only applicable with ZeRO Stage-3." + }, + ) + zero3_save_16bit_model: bool = field( + default=None, + metadata={"help": "Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3."}, + ) + + def __post_init__(self): + from .deepspeed import HfDeepSpeedConfig + + if self.gradient_accumulation_steps is None: + self.gradient_accumulation_steps = int(os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", 1)) + + if self.gradient_clipping is None: + gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "none") + if gradient_clipping != "none": + self.gradient_clipping = float(gradient_clipping) + + if self.zero_stage is None: + self.zero_stage = int(os.environ.get("ACCELERATE_DEEPSPEED_ZERO_STAGE", 2)) + + if self.offload_optimizer_device is None: + self.offload_optimizer_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none") + + if self.offload_param_device is None: + self.offload_param_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none") + + if self.zero3_save_16bit_model is None: + self.zero3_save_16bit_model = ( + os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false") == "true" + ) + + if self.hf_ds_config is None: + self.hf_ds_config = os.environ.get("ACCELERATE_DEEPSPEED_CONFIG_FILE", "none") + if ( + isinstance(self.hf_ds_config, dict) + or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != "none") + or isinstance(self.hf_ds_config, HfDeepSpeedConfig) + ): + if not isinstance(self.hf_ds_config, HfDeepSpeedConfig): + self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config) + if "gradient_accumulation_steps" not in self.hf_ds_config.config: + self.hf_ds_config.config["gradient_accumulation_steps"] = 1 + if "zero_optimization" not in self.hf_ds_config.config: + raise ValueError("Please specify the ZeRO optimization config in the DeepSpeed config.") + + self._deepspeed_config_checks() + kwargs = { + "gradient_accumulation_steps": self.gradient_accumulation_steps, + "gradient_clipping": self.gradient_clipping if self.gradient_clipping else 1.0, + "zero_optimization.stage": self.zero_stage, + "zero_optimization.offload_optimizer.device": self.offload_optimizer_device, + "zero_optimization.offload_param.device": self.offload_param_device, + "zero_optimization.stage3_gather_16bit_weights_on_model_save": self.zero3_save_16bit_model, + } + for key in kwargs.keys(): + self.fill_match(key, **kwargs, must_match=False) + self.hf_ds_config.set_stage_and_offload() + else: + config = { + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "gradient_accumulation_steps": self.gradient_accumulation_steps, + "zero_optimization": { + "stage": self.zero_stage, + "offload_optimizer": { + "device": self.offload_optimizer_device, + }, + "offload_param": { + "device": self.offload_param_device, + }, + "stage3_gather_16bit_weights_on_model_save": self.zero3_save_16bit_model, + }, + } + if self.gradient_clipping: + config["gradient_clipping"] = self.gradient_clipping + self.hf_ds_config = HfDeepSpeedConfig(config) + + self.deepspeed_config = self.hf_ds_config.config + self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout + if self.zero3_init_flag is None: + self.zero3_init_flag = ( + strtobool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1 + ) + if self.zero3_init_flag and not self.hf_ds_config.is_zero3(): + warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.") + self.zero3_init_flag = False + + def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs): + mismatches = [] if mismatches is None else mismatches + config, ds_key = self.hf_ds_config.find_config_node(ds_key_long) + if config is None: + return + + if config.get(ds_key) == "auto": + if ds_key_long in kwargs: + config[ds_key] = kwargs[ds_key_long] + return + else: + raise ValueError( + f"`{ds_key_long}` not found in kwargs. " + f"Please specify `{ds_key_long}` without `auto`(set to correct value) in the DeepSpeed config file or " + "pass it in kwargs." + ) + + if not must_match: + return + + ds_val = config.get(ds_key) + if ds_val is not None and ds_key_long in kwargs: + if ds_val != kwargs[ds_key_long]: + mismatches.append(f"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}") + + def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs): + """Process the DeepSpeed config with the values from the kwargs.""" + mismatches = [] if mismatches is None else mismatches + if config is None: + config = self.deepspeed_config + for key, value in config.items(): + if isinstance(value, dict): + self.deepspeed_config_process( + prefix=prefix + key + ".", mismatches=mismatches, config=value, must_match=must_match, **kwargs + ) + else: + self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs) + if len(mismatches) > 0 and prefix == "": + mismatches_msg = "\n".join(mismatches) + raise ValueError( + "Please correct the following DeepSpeed config values that mismatch kwargs " + f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'." + ) + + def set_mixed_precision(self, mixed_precision): + ds_config = self.deepspeed_config + kwargs = { + "fp16.enabled": mixed_precision == "fp16", + "bf16.enabled": mixed_precision == "bf16", + } + if mixed_precision == "fp16": + if "fp16" not in ds_config: + ds_config["fp16"] = {"enabled": True, "auto_cast": True} + elif mixed_precision == "bf16": + if "bf16" not in ds_config: + ds_config["bf16"] = {"enabled": True} + + if mixed_precision != "no": + diff_dtype = "bf16" if mixed_precision == "fp16" else "fp16" + if str(ds_config.get(diff_dtype, {}).get("enabled", "False")).lower() == "true": + raise ValueError( + f"`--mixed_precision` arg cannot be set to `{mixed_precision}` when `{diff_dtype}` is set in the DeepSpeed config file." + ) + for dtype in ["fp16", "bf16"]: + if dtype not in ds_config: + ds_config[dtype] = {"enabled": False} + self.fill_match("fp16.enabled", must_match=False, **kwargs) + self.fill_match("bf16.enabled", must_match=False, **kwargs) + + def set_deepspeed_weakref(self): + from .imports import is_transformers_available + + if self.zero3_init_flag: + if not is_transformers_available(): + raise Exception( + "When `zero3_init_flag` is set, it requires Transformers to be installed. " + "Please run `pip install transformers`." + ) + ds_config = copy.deepcopy(self.deepspeed_config) + if "gradient_accumulation_steps" not in ds_config or ds_config["gradient_accumulation_steps"] == "auto": + ds_config["gradient_accumulation_steps"] = 1 + if ( + "train_micro_batch_size_per_gpu" not in ds_config + or ds_config["train_micro_batch_size_per_gpu"] == "auto" + ): + ds_config["train_micro_batch_size_per_gpu"] = 1 + if ds_config["train_batch_size"] == "auto": + del ds_config["train_batch_size"] + + from transformers.deepspeed import HfDeepSpeedConfig + + self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa + + def is_zero3_init_enabled(self): + return self.zero3_init_flag + + @contextmanager + def zero3_init_context_manager(self, enable=False): + old = self.zero3_init_flag + if old == enable: + yield + else: + self.zero3_init_flag = enable + self.dschf = None + self.set_deepspeed_weakref() + yield + self.zero3_init_flag = old + self.dschf = None + self.set_deepspeed_weakref() + + def _deepspeed_config_checks(self): + env_variable_names_to_ignore = [ + "ACCELERATE_GRADIENT_ACCUMULATION_STEPS", + "ACCELERATE_GRADIENT_CLIPPING", + "ACCELERATE_DEEPSPEED_ZERO_STAGE", + "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", + "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", + "ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", + "ACCELERATE_MIXED_PRECISION", + ] + env_variable_names_to_ignore = [ + name.replace("ACCELERATE_", "").replace("DEEPSPEED_", "").lower() for name in env_variable_names_to_ignore + ] + + deepspeed_fields_from_accelerate_config = os.environ.get("ACCELERATE_CONFIG_DS_FIELDS", "").split(",") + + if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config): + raise ValueError( + f"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\n" + "Please specify them appropriately in the DeepSpeed config file.\n" + "If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n" + "The easiest method is to create a new config following the questionnaire via `accelerate config`.\n" + "It will only ask for the necessary config variables when using `deepspeed_config_file`." + ) + + +@dataclass +class FullyShardedDataParallelPlugin: + """ + This plugin is used to enable fully sharded data parallelism. + """ + + sharding_strategy: "typing.Any" = field( + default=None, + metadata={ + "help": "FSDP Sharding Strategy of type `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`" + }, + ) + backward_prefetch: "typing.Any" = field( + default=None, + metadata={ + "help": "FSDP Backward Prefetch of type `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch`" + }, + ) + mixed_precision_policy: "typing.Any" = field( + default=None, + metadata={ + "help": "A config to enable mixed precision training with FullyShardedDataParallel. " + "The 3 flags that are set are `param_dtype`, `reduce_dtype`, `buffer_dtype`. " + "Each flag expects `torch.dtype` as the value. " + "It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.MixedPrecision`." + }, + ) + auto_wrap_policy: Optional[Callable] = field( + default=None, + metadata={"help": "A callable specifying a policy to recursively wrap layers with FSDP"}, + ) + cpu_offload: "typing.Any" = field( + default=None, + metadata={ + "help": "Decides Whether to offload parameters and gradients to CPU. " + "It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload`." + }, + ) + ignored_modules: Optional[Iterable[torch.nn.Module]] = field( + default=None, + metadata={"help": "A list of modules to ignore for FSDP."}, + ) + + state_dict_type: "typing.Any" = field( + default=None, + metadata={ + "help": "FSDP State Dict Type of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictType`" + }, + ) + + state_dict_config: "typing.Any" = field( + default=None, + metadata={ + "help": "FSDP State Dict Config of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictConfig`" + }, + ) + + limit_all_gathers: bool = field( + default=False, + metadata={ + "help": "If False, then FSDP allows the CPU thread to schedule all-gathers " + "without any extra synchronization. If True, then FSDP explicitly synchronizes the CPU thread to prevent " + "too many in-flight all-gathers. This bool only affects the sharded strategies that schedule all-gathers. " + "Enabling this can help lower the number of CUDA malloc retries." + }, + ) + + def __post_init__(self): + from torch.distributed.fsdp.fully_sharded_data_parallel import ( + BackwardPrefetch, + CPUOffload, + FullStateDictConfig, + ShardingStrategy, + StateDictType, + ) + + if self.sharding_strategy is None: + self.sharding_strategy = ShardingStrategy(int(os.environ.get("FSDP_SHARDING_STRATEGY", 1))) + + if self.cpu_offload is None: + if os.environ.get("FSDP_OFFLOAD_PARAMS", "false") == "true": + self.cpu_offload = CPUOffload(offload_params=True) + else: + self.cpu_offload = CPUOffload(offload_params=False) + + if self.backward_prefetch is None: + prefetch_policy = os.environ.get("FSDP_BACKWARD_PREFETCH", "NO_PREFETCH") + if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]: + self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1) + + if self.state_dict_type is None: + state_dict_type_policy = os.environ.get("FSDP_STATE_DICT_TYPE", "FULL_STATE_DICT") + self.state_dict_type = StateDictType(FSDP_STATE_DICT_TYPE.index(state_dict_type_policy) + 1) + + if self.state_dict_type == StateDictType.FULL_STATE_DICT and self.state_dict_config is None: + self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) + + @staticmethod + def get_module_class_from_name(module, name): + """ + Gets a class from a module by its name. + + Args: + module (`torch.nn.Module`): The module to get the class from. + name (`str`): The name of the class. + """ + modules_children = list(module.children()) + if module.__class__.__name__ == name: + return module.__class__ + elif len(modules_children) == 0: + return + else: + for child_module in modules_children: + module_class = FullyShardedDataParallelPlugin.get_module_class_from_name(child_module, name) + if module_class is not None: + return module_class + + def set_auto_wrap_policy(self, model): + from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy + + if self.auto_wrap_policy is None: + auto_wrap_policy = os.environ.get("FSDP_AUTO_WRAP_POLICY", "NO_WRAP") + if auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[0]: + transformer_cls_to_wrap = os.environ.get("FSDP_TRANSFORMER_CLS_TO_WRAP", "") + transformer_cls_to_wrap = FullyShardedDataParallelPlugin.get_module_class_from_name( + model, transformer_cls_to_wrap + ) + if transformer_cls_to_wrap is None: + raise Exception("Could not find the transformer layer class to wrap in the model.") + self.auto_wrap_policy = functools.partial( + transformer_auto_wrap_policy, + # Transformer layer class to wrap + transformer_layer_cls={transformer_cls_to_wrap}, + ) + elif auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[1]: + min_num_params = int(os.environ.get("FSDP_MIN_NUM_PARAMS", 0)) + if min_num_params > 0: + self.auto_wrap_policy = functools.partial( + size_based_auto_wrap_policy, min_num_params=min_num_params + ) + + def set_mixed_precision(self, mixed_precision): + if mixed_precision == "fp16": + dtype = torch.float16 + elif mixed_precision == "bf16": + dtype = torch.bfloat16 + else: + raise ValueError(f"Unknown mixed precision value: {mixed_precision}") + from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision + + if self.mixed_precision_policy is None: + self.mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype) + + def save_model(self, accelerator, model, output_dir, model_index=0): + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType + + if is_torch_version("<=", "1.13.5"): + with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config): + state_dict = model.state_dict() + else: + FSDP.set_state_dict_type(model, self.state_dict_type, self.state_dict_config) + state_dict = model.state_dict() + + if self.state_dict_type == StateDictType.FULL_STATE_DICT: + weights_name = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin" + output_model_file = os.path.join(output_dir, weights_name) + if accelerator.process_index == 0: + print(f"Saving model to {output_model_file}") + torch.save(state_dict, output_model_file) + print(f"Model saved to {output_model_file}") + else: + weights_name = ( + f"{MODEL_NAME}_rank{accelerator.process_index}.bin" + if model_index == 0 + else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" + ) + output_model_file = os.path.join(output_dir, weights_name) + print(f"Saving model to {output_model_file}") + torch.save(state_dict, output_model_file) + print(f"Model saved to {output_model_file}") + + def load_model(self, accelerator, model, input_dir, model_index=0): + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType + + accelerator.wait_for_everyone() + + if self.state_dict_type == StateDictType.FULL_STATE_DICT: + weights_name = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin" + input_model_file = os.path.join(input_dir, weights_name) + accelerator.print(f"Loading model from {input_model_file}") + state_dict = torch.load(input_model_file) + accelerator.print(f"Model loaded from {input_model_file}") + else: + weights_name = ( + f"{MODEL_NAME}_rank{accelerator.process_index}.bin" + if model_index == 0 + else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" + ) + input_model_file = os.path.join(input_dir, weights_name) + print(f"Loading model from {input_model_file}") + state_dict = torch.load(input_model_file) + print(f"Model loaded from {input_model_file}") + + if is_torch_version("<=", "1.13.5"): + with FSDP.state_dict_type(model, self.state_dict_type, self.state_dict_config): + model.load_state_dict(state_dict) + else: + FSDP.set_state_dict_type(model, self.state_dict_type, self.state_dict_config) + model.load_state_dict(state_dict) + + def save_optimizer(self, accelerator, optimizer, model, output_dir, optimizer_index=0, optim_input=None): + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + + optim_state = FSDP.full_optim_state_dict(model, optimizer, optim_input=optim_input) + if accelerator.process_index == 0: + optim_state_name = ( + f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" + ) + output_optimizer_file = os.path.join(output_dir, optim_state_name) + print(f"Saving Optimizer state to {output_optimizer_file}") + torch.save(optim_state, output_optimizer_file) + print(f"Optimizer state saved in {output_optimizer_file}") + + def load_optimizer(self, accelerator, optimizer, model, input_dir, optimizer_index=0): + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + + accelerator.wait_for_everyone() + full_osd = None + if accelerator.process_index == 0: + optimizer_name = ( + f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin" + ) + input_optimizer_file = os.path.join(input_dir, optimizer_name) + print(f"Loading Optimizer state from {input_optimizer_file}") + full_osd = torch.load(input_optimizer_file) + print(f"Optimizer state loaded from {input_optimizer_file}") + # called from all ranks, though only rank0 has a valid param for full_osd + sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, model) + optimizer.load_state_dict(sharded_osd) + + +@dataclass +class MegatronLMPlugin: + """ + Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective + activation recomputation and optimized fused kernels. + """ + + tp_degree: int = field(default=None, metadata={"help": "tensor parallelism degree."}) + pp_degree: int = field(default=None, metadata={"help": "pipeline parallelism degree."}) + num_micro_batches: int = field(default=None, metadata={"help": "number of micro-batches."}) + gradient_clipping: float = field( + default=None, metadata={"help": "gradient clipping value based on global L2 Norm (0 to disable)"} + ) + sequence_parallelism: bool = field( + default=None, + metadata={"help": "enable sequence parallelism"}, + ) + recompute_activation: bool = field( + default=None, + metadata={"help": "enable selective activation recomputation"}, + ) + use_distributed_optimizer: bool = field( + default=None, + metadata={"help": "enable distributed optimizer"}, + ) + pipeline_model_parallel_split_rank: int = field( + default=None, metadata={"help": "Rank where encoder and decoder should be split."} + ) + num_layers_per_virtual_pipeline_stage: int = field( + default=None, metadata={"help": "Number of layers per virtual pipeline stage."} + ) + is_train_batch_min: str = field( + default=True, + metadata={"help": "If both train & eval dataloaders are specified, this will decide the micro_batch_size"}, + ) + train_iters: int = field( + default=None, + metadata={ + "help": "Total number of iterations to train over all training runs. " + "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`" + }, + ) + train_samples: int = field( + default=None, + metadata={ + "help": "Total number of samples to train over all training runs. " + "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`" + }, + ) + weight_decay_incr_style: str = field( + default="constant", + metadata={"help": 'Weight decay increment function. choices=["constant", "linear", "cosine"]. '}, + ) + start_weight_decay: float = field( + default=None, + metadata={"help": "Initial weight decay coefficient for L2 regularization."}, + ) + end_weight_decay: float = field( + default=None, + metadata={"help": "End of run weight decay coefficient for L2 regularization."}, + ) + lr_decay_style: str = field( + default="linear", + metadata={"help": "Learning rate decay function. choices=['constant', 'linear', 'cosine']."}, + ) + lr_decay_iters: int = field( + default=None, + metadata={"help": "Number of iterations for learning rate decay. If None defaults to `train_iters`."}, + ) + lr_decay_samples: int = field( + default=None, + metadata={"help": "Number of samples for learning rate decay. If None defaults to `train_samples`."}, + ) + lr_warmup_iters: int = field( + default=None, + metadata={"help": "number of iterations to linearly warmup learning rate over."}, + ) + lr_warmup_samples: int = field( + default=None, + metadata={"help": "number of samples to linearly warmup learning rate over."}, + ) + lr_warmup_fraction: float = field( + default=None, + metadata={"help": "fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over."}, + ) + min_lr: float = field( + default=0, + metadata={"help": "Minumum value for learning rate. The scheduler clip values below this threshold."}, + ) + consumed_samples: List[int] = field( + default=None, + metadata={ + "help": "Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call." + }, + ) + no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to disable weight decay."}) + scale_lr_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to scale learning rate."}) + lr_mult: float = field(default=1.0, metadata={"help": "Learning rate multiplier."}) + megatron_dataset_flag: bool = field( + default=False, + metadata={"help": "Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format."}, + ) + seq_length: int = field( + default=None, + metadata={"help": "Maximum sequence length to process."}, + ) + encoder_seq_length: int = field( + default=None, + metadata={"help": "Maximum sequence length to process for the encoder."}, + ) + decoder_seq_length: int = field( + default=None, + metadata={"help": "Maximum sequence length to process for the decoder."}, + ) + tensorboard_dir: str = field( + default=None, + metadata={"help": "Path to save tensorboard logs."}, + ) + set_all_logging_options: bool = field( + default=False, + metadata={"help": "Whether to set all logging options."}, + ) + eval_iters: int = field( + default=100, metadata={"help": "Number of iterations to run for evaluation validation/test for."} + ) + eval_interval: int = field( + default=1000, metadata={"help": "Interval between running evaluation on validation set."} + ) + return_logits: bool = field( + default=False, + metadata={"help": "Whether to return logits from the model."}, + ) + + # custom train step args + custom_train_step_class: Optional[Any] = field( + default=None, + metadata={"help": "Custom train step class."}, + ) + custom_train_step_kwargs: Optional[Dict[str, Any]] = field( + default=None, + metadata={"help": "Custom train step kwargs."}, + ) + + # custom model args + custom_model_provider_function: Optional[Callable] = field( + default=None, + metadata={"help": "Custom model provider function."}, + ) + custom_prepare_model_function: Optional[Callable] = field( + default=None, + metadata={"help": "Custom prepare model function."}, + ) + + # remaining args such as enabling Alibi/ROPE positional embeddings, + # wandb logging, Multi-Query Attention, etc. + other_megatron_args: Optional[Dict[str, Any]] = field( + default=None, + metadata={"help": "Other Megatron-LM arguments. Please refer Megatron-LM"}, + ) + + def __post_init__(self): + prefix = "MEGATRON_LM_" + if self.tp_degree is None: + self.tp_degree = int(os.environ.get(prefix + "TP_DEGREE", 1)) + if self.pp_degree is None: + self.pp_degree = int(os.environ.get(prefix + "PP_DEGREE", 1)) + if self.num_micro_batches is None: + self.num_micro_batches = int(os.environ.get(prefix + "NUM_MICRO_BATCHES", 1)) + if self.gradient_clipping is None: + self.gradient_clipping = float(os.environ.get(prefix + "GRADIENT_CLIPPING", 1.0)) + if self.recompute_activation is None: + self.recompute_activation = strtobool(os.environ.get(prefix + "RECOMPUTE_ACTIVATION", "False")) == 1 + if self.use_distributed_optimizer is None: + self.use_distributed_optimizer = ( + strtobool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1 + ) + if self.sequence_parallelism is None: + self.sequence_parallelism = strtobool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1 + + if self.pp_degree > 1 or self.use_distributed_optimizer: + self.DDP_impl = "local" + else: + self.DDP_impl = "torch" + + if self.consumed_samples is not None: + if len(self.consumed_samples) == 1: + self.consumed_samples.extend([0, 0]) + elif len(self.consumed_samples) == 2: + self.consumed_samples.append(0) + + self.megatron_lm_default_args = { + "tensor_model_parallel_size": self.tp_degree, + "pipeline_model_parallel_size": self.pp_degree, + "pipeline_model_parallel_split_rank": self.pipeline_model_parallel_split_rank, + "num_layers_per_virtual_pipeline_stage": self.num_layers_per_virtual_pipeline_stage, + "DDP_impl": self.DDP_impl, + "use_distributed_optimizer": self.use_distributed_optimizer, + "sequence_parallel": self.sequence_parallelism, + "clip_grad": self.gradient_clipping, + "num_micro_batches": self.num_micro_batches, + "consumed_samples": self.consumed_samples, + "no_wd_decay_cond": self.no_wd_decay_cond, + "scale_lr_cond": self.scale_lr_cond, + "lr_mult": self.lr_mult, + "megatron_dataset_flag": self.megatron_dataset_flag, + "eval_iters": self.eval_iters, + "eval_interval": self.eval_interval, + } + if self.recompute_activation: + self.megatron_lm_default_args["recompute_granularity"] = "selective" + if self.tensorboard_dir is not None: + self.megatron_lm_default_args["tensorboard_dir"] = self.tensorboard_dir + if self.set_all_logging_options: + self.set_tensorboard_logging_options() + if self.other_megatron_args is not None: + self.megatron_lm_default_args.update(self.other_megatron_args) + + def set_network_size_args(self, model, batch_data=None): + # Check if the model is either BERT, GPT or T5 else raise error + # set 'num_layers', 'hidden_size', 'num_attention_heads', 'max_position_embeddings' + if "megatron-bert" in model.config.model_type.lower(): + model_type_name = "bert" + num_layers = model.config.num_hidden_layers + hidden_size = model.config.hidden_size + num_attention_heads = model.config.num_attention_heads + max_position_embeddings = model.config.max_position_embeddings + num_labels = model.config.num_labels + orig_vocab_size = model.config.vocab_size + if "maskedlm" in model.__class__.__name__.lower(): + pretraining_flag = True + if self.seq_length is not None: + if self.encoder_seq_length is not None: + warnings.warn("Both `seq_length` and `encoder_seq_length` are set. Using `encoder_seq_length`.") + self.seq_length = self.encoder_seq_length + elif self.encoder_seq_length is not None: + self.seq_length = self.encoder_seq_length + elif batch_data is not None: + self.seq_length = batch_data["input_ids"].shape[1] + else: + self.seq_length = max_position_embeddings + self.megatron_lm_default_args["seq_length"] = self.seq_length + elif "gpt2" in model.config.model_type.lower(): + model_type_name = "gpt" + num_layers = model.config.n_layer + hidden_size = model.config.n_embd + num_attention_heads = model.config.n_head + max_position_embeddings = model.config.n_positions + orig_vocab_size = model.config.vocab_size + pretraining_flag = True + if self.seq_length is not None: + if self.decoder_seq_length is not None: + warnings.warn("Both `seq_length` and `decoder_seq_length` are set. Using `decoder_seq_length`.") + self.seq_length = self.decoder_seq_length + elif self.decoder_seq_length is not None: + self.seq_length = self.decoder_seq_length + elif batch_data is not None: + self.seq_length = batch_data["input_ids"].shape[1] + else: + self.seq_length = max_position_embeddings + self.megatron_lm_default_args["seq_length"] = self.seq_length + self.megatron_lm_default_args["return_logits"] = self.return_logits + self.megatron_lm_default_args["tokenizer_type"] = "GPT2BPETokenizer" + elif "t5" in model.config.model_type.lower(): + model_type_name = "t5" + num_layers = model.config.num_layers + hidden_size = model.config.d_model + num_attention_heads = model.config.num_heads + max_position_embeddings = model.config.n_positions if hasattr(model.config, "n_positions") else 1024 + orig_vocab_size = model.config.vocab_size + pretraining_flag = True + if self.encoder_seq_length is None: + if batch_data is not None: + self.encoder_seq_length = batch_data["input_ids"].shape[1] + else: + self.encoder_seq_length = max_position_embeddings + if self.decoder_seq_length is None: + if batch_data is not None: + self.decoder_seq_length = batch_data["labels"].shape[1] + else: + self.decoder_seq_length = max_position_embeddings + + self.megatron_lm_default_args["encoder_seq_length"] = self.encoder_seq_length + self.megatron_lm_default_args["decoder_seq_length"] = self.decoder_seq_length + else: + raise ValueError( + "🤗 Accelerate Megatron-LM integration supports only BERT, GPT and T5 model. " + "Please check the model you are using is one of those." + ) + + self.megatron_lm_default_args["model_type_name"] = model_type_name + self.megatron_lm_default_args["num_layers"] = num_layers + self.megatron_lm_default_args["hidden_size"] = hidden_size + self.megatron_lm_default_args["num_attention_heads"] = num_attention_heads + self.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings + self.megatron_lm_default_args["pretraining_flag"] = pretraining_flag + self.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size + self.megatron_lm_default_args["model_return_dict"] = model.config.return_dict + if model_type_name == "bert": + self.megatron_lm_default_args["num_labels"] = num_labels + + def set_mixed_precision(self, mixed_precision): + if mixed_precision == "fp16": + self.megatron_lm_default_args["fp16"] = True + elif mixed_precision == "bf16": + self.megatron_lm_default_args["bf16"] = True + self.DDP_impl = "local" + self.megatron_lm_default_args["DDP_impl"] = self.DDP_impl + + def set_training_args(self, micro_batch_size, dp_degree): + self.data_parallel_size = dp_degree + self.micro_batch_size = micro_batch_size + self.global_batch_size = dp_degree * micro_batch_size * self.num_micro_batches + self.megatron_lm_default_args["data_parallel_size"] = self.data_parallel_size + self.megatron_lm_default_args["micro_batch_size"] = self.micro_batch_size + self.megatron_lm_default_args["global_batch_size"] = self.global_batch_size + + def set_optimizer_type(self, optimizer): + optimizer_name = optimizer.__class__.__name__.lower() + if "adam" in optimizer_name: + self.megatron_lm_default_args["optimizer"] = "adam" + self.megatron_lm_default_args["adam_beta1"] = optimizer.defaults["betas"][0] + self.megatron_lm_default_args["adam_beta2"] = optimizer.defaults["betas"][1] + self.megatron_lm_default_args["adam_eps"] = optimizer.defaults["eps"] + elif "sgd" in optimizer_name: + self.megatron_lm_default_args["optimizer"] = "sgd" + self.megatron_lm_default_args["sgd_momentum"] = optimizer.defaults["momentum"] + else: + raise ValueError(f"Optimizer {optimizer_name} is not supported by Megatron-LM") + + self.megatron_lm_default_args["lr"] = optimizer.defaults["lr"] + self.megatron_lm_default_args["weight_decay"] = optimizer.defaults["weight_decay"] + + def set_scheduler_args(self, scheduler): + if self.train_iters is None: + self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args["data_parallel_size"] + if self.train_samples is not None: + self.train_samples = None + warnings.warn( + "Ignoring `train_samples` as `train_iters` based on scheduler is being used for training." + ) + if self.lr_warmup_iters is None: + self.lr_warmup_iters = scheduler.warmup_num_steps // self.megatron_lm_default_args["data_parallel_size"] + if self.lr_warmup_samples is not None: + warnings.warn( + "Ignoring `lr_warmup_samples` as `lr_warmup_iters` based on scheduler is being used for training." + ) + self.lr_warmup_samples = 0 + + self.megatron_lm_default_args["train_iters"] = self.train_iters + self.megatron_lm_default_args["lr_warmup_iters"] = self.lr_warmup_iters + self.megatron_lm_default_args["train_samples"] = self.train_samples + self.megatron_lm_default_args["lr_warmup_samples"] = self.lr_warmup_samples + self.megatron_lm_default_args["lr_decay_iters"] = self.lr_decay_iters + self.megatron_lm_default_args["lr_decay_samples"] = self.lr_decay_samples + self.megatron_lm_default_args["lr_warmup_fraction"] = self.lr_warmup_fraction + self.megatron_lm_default_args["lr_decay_style"] = self.lr_decay_style + self.megatron_lm_default_args["weight_decay_incr_style"] = self.weight_decay_incr_style + self.megatron_lm_default_args["start_weight_decay"] = self.start_weight_decay + self.megatron_lm_default_args["end_weight_decay"] = self.end_weight_decay + self.megatron_lm_default_args["min_lr"] = self.min_lr + + def set_tensorboard_logging_options(self): + from megatron.arguments import _add_logging_args + + parser = argparse.ArgumentParser() + parser = _add_logging_args(parser) + logging_args = parser.parse_known_args() + self.dataset_args = vars(logging_args[0]) + for key, value in self.dataset_args.items(): + if key.startswith("log_"): + self.megatron_lm_default_args[key] = True + elif key.startswith("no_log_"): + self.megatron_lm_default_args[key.replace("no_", "")] = True diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/deepspeed.py b/testbed/huggingface__accelerate/src/accelerate/utils/deepspeed.py new file mode 100644 index 0000000000000000000000000000000000000000..892718990256615693fbe76f8f56ed60f791139e --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/deepspeed.py @@ -0,0 +1,267 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import io +import json +import os +from copy import deepcopy + +from ..optimizer import AcceleratedOptimizer +from ..scheduler import AcceleratedScheduler + + +class HfDeepSpeedConfig: + """ + This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage. + + A `weakref` of this object is stored in the module's globals to be able to access the config from areas where + things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore + it's important that this object remains alive while the program is still running. + + [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration + with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic + the DeepSpeed configuration is not modified in any way. + + Args: + config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict. + + """ + + def __init__(self, config_file_or_dict): + + if isinstance(config_file_or_dict, dict): + # Don't modify user's data should they want to reuse it (e.g. in tests), because once we + # modified it, it will not be accepted here again, since `auto` values would have been overridden + config = deepcopy(config_file_or_dict) + elif os.path.exists(config_file_or_dict): + with io.open(config_file_or_dict, "r", encoding="utf-8") as f: + config = json.load(f) + else: + try: + config_decoded = base64.urlsafe_b64decode(config_file_or_dict).decode("utf-8") + config = json.loads(config_decoded) + except (UnicodeDecodeError, AttributeError): + raise ValueError( + f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config}" + ) + + self.config = config + + self.set_stage_and_offload() + + def set_stage_and_offload(self): + # zero stage - this is done as early as possible, before model is created, to allow + # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object + # during ``zero.Init()`` which needs to know the dtype, and some other hparams. + self._stage = self.get_value("zero_optimization.stage", -1) + + # offload + self._offload = False + if self.is_zero2() or self.is_zero3(): + offload_devices_valid = set(["cpu", "nvme"]) + offload_devices = set( + [ + self.get_value("zero_optimization.offload_optimizer.device"), + self.get_value("zero_optimization.offload_param.device"), + ] + ) + if len(offload_devices & offload_devices_valid) > 0: + self._offload = True + + def find_config_node(self, ds_key_long): + config = self.config + + # find the config node of interest if it exists + nodes = ds_key_long.split(".") + ds_key = nodes.pop() + for node in nodes: + config = config.get(node) + if config is None: + return None, ds_key + + return config, ds_key + + def get_value(self, ds_key_long, default=None): + """ + Returns the set value or `default` if no value is set + """ + config, ds_key = self.find_config_node(ds_key_long) + if config is None: + return default + return config.get(ds_key, default) + + def del_config_sub_tree(self, ds_key_long, must_exist=False): + """ + Deletes a sub-section of the config file if it's found. + + Unless `must_exist` is `True` the section doesn't have to exist. + """ + config = self.config + + # find the config node of interest if it exists + nodes = ds_key_long.split(".") + for node in nodes: + parent_config = config + config = config.get(node) + if config is None: + if must_exist: + raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}") + else: + return + + # if found remove it + if parent_config is not None: + parent_config.pop(node) + + def is_true(self, ds_key_long): + """ + Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very + specific question of whether the value is set to `True` (and it's not set to `False`` or isn't set). + + """ + value = self.get_value(ds_key_long) + return False if value is None else bool(value) + + def is_false(self, ds_key_long): + """ + Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very + specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set). + """ + value = self.get_value(ds_key_long) + return False if value is None else not bool(value) + + def is_zero2(self): + return self._stage == 2 + + def is_zero3(self): + return self._stage == 3 + + def is_offload(self): + return self._offload + + +class DeepSpeedEngineWrapper: + """ + Internal wrapper for deepspeed.runtime.engine.DeepSpeedEngine. This is used to follow conventional training loop. + + Args: + engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap + """ + + def __init__(self, engine): + self.engine = engine + + def backward(self, loss): + # runs backpropagation and handles mixed precision + self.engine.backward(loss) + + # Deepspeed's `engine.step` performs the following operations: + # - gradient accumulation check + # - gradient clipping + # - optimizer step + # - zero grad + # - checking overflow + # - lr_scheduler step (only if engine.lr_scheduler is not None) + self.engine.step() + # and this plugin overrides the above calls with no-ops when Accelerate runs under + # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple + # training loop that works transparently under many training regimes. + + +class DeepSpeedOptimizerWrapper(AcceleratedOptimizer): + """ + Internal wrapper around a deepspeed optimizer. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + """ + + def __init__(self, optimizer): + super().__init__(optimizer, device_placement=False, scaler=None) + + def zero_grad(self, set_to_none=None): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + def step(self): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + @property + def step_was_skipped(self): + """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" + return self.optimizer.overflow + + +class DeepSpeedSchedulerWrapper(AcceleratedScheduler): + """ + Internal wrapper around a deepspeed scheduler. + + Args: + scheduler (`torch.optim.lr_scheduler.LambdaLR`): + The scheduler to wrap. + optimizers (one or a list of `torch.optim.Optimizer`): + """ + + def __init__(self, scheduler, optimizers): + super().__init__(scheduler, optimizers) + + def step(self): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + +class DummyOptim: + """ + Dummy optimizer presents model parameters or param groups, this is primarily used to follow conventional training + loop when optimizer config is specified in the deepspeed config file. + + Args: + lr (float): + Learning rate. + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + weight_decay (float): + Weight decay. + **kwargs: + Other arguments. + """ + + def __init__(self, params, lr=0.001, weight_decay=0, **kwargs): + self.params = params + self.lr = lr + self.weight_decay = weight_decay + self.kwargs = kwargs + + +class DummyScheduler: + """ + Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training + loop when scheduler config is specified in the deepspeed config file. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + total_num_steps (int): + Total number of steps. + warmup_num_steps (int): + Number of steps for warmup. + **kwargs: + Other arguments. + """ + + def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs): + self.optimizer = optimizer + self.total_num_steps = total_num_steps + self.warmup_num_steps = warmup_num_steps + self.kwargs = kwargs diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/environment.py b/testbed/huggingface__accelerate/src/accelerate/utils/environment.py new file mode 100644 index 0000000000000000000000000000000000000000..9247bb4fedad7a5de341015af9fd6dfc380dd0ce --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/environment.py @@ -0,0 +1,36 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from distutils.util import strtobool + + +def get_int_from_env(env_keys, default): + """Returns the first positive env value found in the `env_keys` list or the default.""" + for e in env_keys: + val = int(os.environ.get(e, -1)) + if val >= 0: + return val + return default + + +def parse_flag_from_env(key, default=False): + """Returns truthy value for `key` from the env if available else the default.""" + value = os.environ.get(key, str(default)) + return strtobool(value) == 1 # As its name indicates `strtobool` actually returns an int... + + +def parse_choice_from_env(key, default="no"): + value = os.environ.get(key, str(default)) + return value diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/imports.py b/testbed/huggingface__accelerate/src/accelerate/utils/imports.py new file mode 100644 index 0000000000000000000000000000000000000000..cfd35f62b73afa1598206aecf3a900ca1142fa9b --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/imports.py @@ -0,0 +1,158 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import os +import sys +import warnings +from distutils.util import strtobool +from functools import lru_cache + +import torch + +from packaging.version import parse + +from .environment import parse_flag_from_env +from .versions import compare_versions, is_torch_version + + +# The package importlib_metadata is in a different place, depending on the Python version. +if sys.version_info < (3, 8): + import importlib_metadata +else: + import importlib.metadata as importlib_metadata + + +try: + import torch_xla.core.xla_model as xm # noqa: F401 + + _tpu_available = True +except ImportError: + _tpu_available = False + + +def is_ccl_available(): + return ( + importlib.util.find_spec("torch_ccl") is not None + or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None + ) + + +def get_ccl_version(): + return importlib_metadata.version("oneccl_bind_pt") + + +def is_apex_available(): + return importlib.util.find_spec("apex") is not None + + +@lru_cache() +def is_tpu_available(check_device=True): + "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" + if _tpu_available and check_device: + try: + # Will raise a RuntimeError if no XLA configuration is found + _ = xm.xla_device() + return True + except RuntimeError: + return False + return _tpu_available + + +def is_deepspeed_available(): + package_exists = importlib.util.find_spec("deepspeed") is not None + # Check we're not importing a "deepspeed" directory somewhere but the actual library by trying to grab the version + # AND checking it has an author field in the metadata that is HuggingFace. + if package_exists: + try: + _ = importlib_metadata.metadata("deepspeed") + return True + except importlib_metadata.PackageNotFoundError: + return False + + +def is_bf16_available(ignore_tpu=False): + "Checks if bf16 is supported, optionally ignoring the TPU" + if is_tpu_available(): + return not ignore_tpu + if is_torch_version(">=", "1.10"): + if torch.cuda.is_available(): + return torch.cuda.is_bf16_supported() + return True + return False + + +def is_megatron_lm_available(): + if strtobool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1: + package_exists = importlib.util.find_spec("megatron") is not None + if package_exists: + megatron_version = parse(importlib_metadata.version("megatron-lm")) + return compare_versions(megatron_version, ">=", "2.2.0") + return False + + +def is_safetensors_available(): + return importlib.util.find_spec("safetensors") is not None + + +def is_transformers_available(): + return importlib.util.find_spec("transformers") is not None + + +def is_datasets_available(): + return importlib.util.find_spec("datasets") is not None + + +def is_aim_available(): + return importlib.util.find_spec("aim") is not None + + +def is_tensorboard_available(): + return importlib.util.find_spec("tensorboard") is not None or importlib.util.find_spec("tensorboardX") is not None + + +def is_wandb_available(): + return importlib.util.find_spec("wandb") is not None + + +def is_comet_ml_available(): + return importlib.util.find_spec("comet_ml") is not None + + +def is_boto3_available(): + return importlib.util.find_spec("boto3") is not None + + +def is_rich_available(): + if importlib.util.find_spec("rich") is not None: + if parse_flag_from_env("DISABLE_RICH"): + warnings.warn( + "The `DISABLE_RICH` flag is deprecated and will be removed in version 0.17.0 of 🤗 Accelerate. Use `ACCELERATE_DISABLE_RICH` instead.", + FutureWarning, + ) + return not parse_flag_from_env("DISABLE_RICH") + return not parse_flag_from_env("ACCELERATE_DISABLE_RICH") + return False + + +def is_sagemaker_available(): + return importlib.util.find_spec("sagemaker") is not None + + +def is_tqdm_available(): + return importlib.util.find_spec("tqdm") is not None + + +def is_mlflow_available(): + return importlib.util.find_spec("mlflow") is not None diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/launch.py b/testbed/huggingface__accelerate/src/accelerate/utils/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..078dc14116cee2c5ebb136feca19b219f3072010 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/launch.py @@ -0,0 +1,97 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +import torch + +from ..utils import is_torch_version +from .dataclasses import DistributedType + + +def get_launch_prefix(): + """ + Grabs the correct launcher for starting a distributed command, such as either `torchrun`, `python -m + torch.distributed.run`, etc + """ + if is_torch_version(">=", "1.10.0"): + cmd = ["torchrun"] + elif is_torch_version(">=", "1.9.0"): + cmd = [sys.executable, "-m", "torch.distributed.run"] + else: + cmd = [sys.executable, "-m", "torch.distributed.launch", "--use_env"] + return cmd + + +def _filter_args(args): + """ + Filters out all `accelerate` specific args + """ + if is_torch_version(">=", "1.9.1"): + import torch.distributed.run as distrib_run + distrib_args = distrib_run.get_args_parser() + new_args, _ = distrib_args.parse_known_args() + + for key, value in vars(args).items(): + if key in vars(new_args).keys(): + setattr(new_args, key, value) + return new_args + + +def env_var_path_add(env_var_name, path_to_add): + """ + Extends a path-based environment variable's value with a new path and returns the updated value. It's up to the + caller to set it in os.environ. + """ + paths = [p for p in os.environ.get(env_var_name, "").split(":") if len(p) > 0] + paths.append(str(path_to_add)) + return ":".join(paths) + + +class PrepareForLaunch: + """ + Prepare a function that will launched in a distributed setup. + + Args: + launcher (`Callable`): + The function to launch. + distributed_type ([`~state.DistributedType`]): + The distributed type to prepare for. + debug (`bool`, *optional*, defaults to `False`): + Whether or not this is a debug launch. + """ + + def __init__(self, launcher, distributed_type="NO", debug=False): + self.launcher = launcher + self.distributed_type = DistributedType(distributed_type) + self.debug = debug + + def __call__(self, index, *args): + if self.debug: + world_size = int(os.environ.get("WORLD_SIZE")) + rdv_file = os.environ.get("ACCELERATE_DEBUG_RDV_FILE") + torch.distributed.init_process_group( + "gloo", + rank=index, + store=torch.distributed.FileStore(rdv_file, world_size), + world_size=world_size, + ) + elif self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): + # Prepare the environment for torch.distributed + os.environ["LOCAL_RANK"] = str(index) + os.environ["RANK"] = str(index) + + os.environ["FORK_LAUNCHED"] = str(1) + self.launcher(*args) diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/megatron_lm.py b/testbed/huggingface__accelerate/src/accelerate/utils/megatron_lm.py new file mode 100644 index 0000000000000000000000000000000000000000..ab3bd8c370ca99e9324ed2f073570ca29a91a770 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/megatron_lm.py @@ -0,0 +1,1441 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import math +from abc import ABC +from functools import partial + +import torch +import torch.nn.functional as F +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP + +from ..optimizer import AcceleratedOptimizer +from ..scheduler import AcceleratedScheduler +from .imports import is_megatron_lm_available, is_transformers_available +from .operations import recursively_apply, send_to_device + + +if is_transformers_available(): + from transformers.modeling_outputs import ( + CausalLMOutputWithCrossAttentions, + Seq2SeqLMOutput, + SequenceClassifierOutput, + ) + + +if is_megatron_lm_available(): + from megatron import ( + get_args, + get_num_microbatches, + get_tensorboard_writer, + get_timers, + get_tokenizer, + mpu, + print_rank_0, + print_rank_last, + ) + from megatron.arguments import _add_data_args, _add_validation_args, parse_args, validate_args + from megatron.checkpointing import load_args_from_checkpoint, load_checkpoint, save_checkpoint + from megatron.data.data_samplers import MegatronPretrainingRandomSampler, MegatronPretrainingSampler + from megatron.global_vars import set_global_variables + from megatron.initialize import ( + _compile_dependencies, + _init_autoresume, + _set_random_seed, + set_jit_fusion_options, + write_args_to_tensorboard, + ) + from megatron.model import BertModel + from megatron.model import DistributedDataParallel as LocalDDP + from megatron.model import Float16Module, GPTModel, ModelType, T5Model + from megatron.model.classification import Classification + from megatron.optimizer import get_megatron_optimizer + from megatron.schedules import get_forward_backward_func + from megatron.text_generation.communication import broadcast_int_list, broadcast_tensor + from megatron.text_generation.generation import ( + beam_search_and_return_on_first_stage, + generate_tokens_probs_and_return_on_first_stage, + ) + from megatron.tokenizer.tokenizer import _vocab_size_with_padding + from megatron.training import get_model, get_optimizer_param_scheduler, training_log + from megatron.utils import ( + average_losses_across_data_parallel_group, + calc_params_l2_norm, + get_ltor_masks_and_position_ids, + unwrap_model, + ) + + +# model utilities +def model_provider_func(pre_process=True, post_process=True, add_encoder=True, add_decoder=True): + """Build the model.""" + args = get_args() + mode = "pre-training" if args.pretraining_flag else "fine-tuning" + if args.rank == 0: + print(f"Building {args.model_type_name} model in the {mode} mode.") + print( + "The Megatron LM model weights are initialized at random in `accelerator.prepare`. " + "Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup." + ) + if args.model_type_name == "bert": + if args.pretraining_flag: + num_tokentypes = 2 if args.bert_binary_head else 0 + model = BertModel( + num_tokentypes=num_tokentypes, + add_binary_head=args.bert_binary_head, + parallel_output=True, + pre_process=pre_process, + post_process=post_process, + ) + else: + + model = Classification( + num_classes=args.num_labels, num_tokentypes=2, pre_process=pre_process, post_process=post_process + ) + elif args.model_type_name == "gpt": + model = GPTModel(num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process) + elif args.model_type_name == "t5": + model = T5Model( + num_tokentypes=0, + parallel_output=True, + pre_process=pre_process, + post_process=post_process, + add_encoder=add_encoder, + add_decoder=add_decoder, + ) + else: + raise ValueError(f"Unsupported model type: {args.model_type_name}") + return model + + +def prepare_model(accelerator): + accelerator.print("Preparing model") + args = get_args() + if accelerator.state.megatron_lm_plugin.custom_prepare_model_function is not None: + if accelerator.state.megatron_lm_plugin.custom_model_provider_function is None: + raise ValueError( + "You must provide a `custom_model_provider_function` when using a `custom_prepare_model_function`." + ) + custom_model_provider_func = accelerator.state.megatron_lm_plugin.custom_model_provider_function + model = accelerator.state.megatron_lm_plugin.custom_prepare_model_function(custom_model_provider_func) + else: + if args.model_type_name in ("bert", "gpt"): + model_type = ModelType.encoder_or_decoder + elif args.model_type_name == "t5": + model_type = ModelType.encoder_and_decoder + if args.pipeline_model_parallel_split_rank is None and args.pipeline_model_parallel_size > 1: + args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2 + model = get_model(model_provider_func, model_type) + return model + + +# dataloader utilities +class MegatronLMDummyDataLoader: + """ + Dummy dataloader presents model parameters or param groups, this is primarily used to follow conventional training + + Args: + **dataset_kwargs: Megatron data arguments. + """ + + def __init__(self, **dataset_kwargs): + parser = argparse.ArgumentParser() + parser = _add_data_args(parser) + parser = _add_validation_args(parser) + data_args = parser.parse_known_args() + self.dataset_args = vars(data_args[0]) + self.dataset_args.update(dataset_kwargs) + self.dataset_args["megatron_dataset_flag"] = True + + def set_megatron_data_args(self): + args = get_args() + for key, value in self.dataset_args.items(): + setattr(args, key, value) + + def get_train_valid_test_datasets_provider(self): + def train_valid_test_datasets_provider(train_val_test_num_samples): + """Build train, valid, and test datasets.""" + args = get_args() + dataset_args = { + "data_prefix": args.data_path, + "data_impl": args.data_impl, + "splits_string": args.split, + "train_valid_test_num_samples": train_val_test_num_samples, + "skip_warmup": (not args.mmap_warmup), + "seed": args.seed, + } + if args.model_type_name == "bert": + dataset_args.update( + { + "max_seq_length": args.seq_length, + "masked_lm_prob": args.mask_prob, + "short_seq_prob": args.short_seq_prob, + "binary_head": args.bert_binary_head, + } + ) + elif args.model_type_name == "gpt": + dataset_args.update( + { + "seq_length": args.seq_length, + } + ) + elif args.model_type_name == "t5": + dataset_args.update( + { + "max_seq_length": args.encoder_seq_length, + "max_seq_length_dec": args.decoder_seq_length, + "masked_lm_prob": args.mask_prob, + "short_seq_prob": args.short_seq_prob, + "dataset_type": "t5", + } + ) + else: + raise ValueError(f"Unsupported model type: {args.model_type_name}") + if args.model_type_name == "gpt": + from megatron.data.gpt_dataset import build_train_valid_test_datasets + else: + from megatron.data.dataset_utils import build_train_valid_test_datasets + train_ds, valid_ds, test_ds = build_train_valid_test_datasets(**dataset_args) + return train_ds, valid_ds, test_ds + + return train_valid_test_datasets_provider + + def build_pretraining_data_loader(self, dataset, consumed_samples): + if dataset is None: + return None + args = get_args() + micro_batch_size = args.micro_batch_size * args.num_micro_batches + + # Megatron sampler + if args.dataloader_type == "single": + batch_sampler = MegatronPretrainingSampler( + total_samples=len(dataset), + consumed_samples=consumed_samples, + micro_batch_size=micro_batch_size, + data_parallel_rank=mpu.get_data_parallel_rank(), + data_parallel_size=mpu.get_data_parallel_world_size(), + ) + elif args.dataloader_type == "cyclic": + batch_sampler = MegatronPretrainingRandomSampler( + dataset, + total_samples=len(dataset), + consumed_samples=consumed_samples, + micro_batch_size=micro_batch_size, + data_parallel_rank=mpu.get_data_parallel_rank(), + data_parallel_size=mpu.get_data_parallel_world_size(), + data_sharding=args.data_sharding, + ) + else: + raise Exception("{} dataloader type is not supported.".format(args.dataloader_type)) + + # Torch dataloader. + return torch.utils.data.DataLoader( + dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True + ) + + def build_train_valid_test_data_iterators(self): + def cyclic_iter(iter): + while True: + for x in iter: + yield x + + args = get_args() + + (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None) + + print_rank_0("> building train, validation, and test datasets ...") + + # Backward compatibility, assume fixed batch size. + if args.iteration > 0 and args.consumed_train_samples == 0: + assert args.train_samples is None, "only backward compatiblity support for iteration-based training" + args.consumed_train_samples = args.iteration * args.global_batch_size + if args.iteration > 0 and args.consumed_valid_samples == 0: + if args.train_samples is None: + args.consumed_valid_samples = ( + (args.iteration // args.eval_interval) * args.eval_iters * args.global_batch_size + ) + + # Data loader only on rank 0 of each model parallel group. + if mpu.get_tensor_model_parallel_rank() == 0: + + # Number of train/valid/test samples. + if args.train_samples: + train_samples = args.train_samples + else: + train_samples = args.train_iters * args.global_batch_size + eval_iters = (args.train_iters // args.eval_interval + 1) * args.eval_iters + test_iters = args.eval_iters + train_val_test_num_samples = [ + train_samples, + eval_iters * args.global_batch_size, + test_iters * args.global_batch_size, + ] + print_rank_0(" > datasets target sizes (minimum size):") + print_rank_0(" train: {}".format(train_val_test_num_samples[0])) + print_rank_0(" validation: {}".format(train_val_test_num_samples[1])) + print_rank_0(" test: {}".format(train_val_test_num_samples[2])) + + # Build the datasets. + train_valid_test_datasets_provider = self.get_train_valid_test_datasets_provider() + train_ds, valid_ds, test_ds = train_valid_test_datasets_provider(train_val_test_num_samples) + + # Build dataloders. + train_dataloader = self.build_pretraining_data_loader(train_ds, args.consumed_train_samples) + valid_dataloader = self.build_pretraining_data_loader(valid_ds, args.consumed_valid_samples) + test_dataloader = self.build_pretraining_data_loader(test_ds, 0) + + # Flags to know if we need to do training/validation/testing. + do_train = train_dataloader is not None and args.train_iters > 0 + do_valid = valid_dataloader is not None and args.eval_iters > 0 + do_test = test_dataloader is not None and args.eval_iters > 0 + # Need to broadcast num_tokens and num_type_tokens. + flags = torch.cuda.LongTensor([int(do_train), int(do_valid), int(do_test)]) + else: + flags = torch.cuda.LongTensor([0, 0, 0]) + + # Broadcast num tokens. + torch.distributed.broadcast( + flags, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group() + ) + args.do_train = flags[0].item() + args.do_valid = flags[1].item() + args.do_test = flags[2].item() + + # Build iterators. + dl_type = args.dataloader_type + assert dl_type in ["single", "cyclic"] + + if train_dataloader is not None: + train_data_iterator = ( + iter(train_dataloader) if dl_type == "single" else iter(cyclic_iter(train_dataloader)) + ) + else: + train_data_iterator = None + + if valid_dataloader is not None: + valid_data_iterator = ( + iter(valid_dataloader) if dl_type == "single" else iter(cyclic_iter(valid_dataloader)) + ) + else: + valid_data_iterator = None + + if test_dataloader is not None: + test_data_iterator = iter(test_dataloader) if dl_type == "single" else iter(cyclic_iter(test_dataloader)) + else: + test_data_iterator = None + + return train_data_iterator, valid_data_iterator, test_data_iterator + + +def prepare_data_loader(accelerator, dataloader): + accelerator.print("Preparing dataloader") + args = get_args() + if not args.megatron_dataset_flag: + from ..data_loader import _PYTORCH_DATALOADER_KWARGS, prepare_data_loader + + args = get_args() + micro_batch_size = args.micro_batch_size * args.num_micro_batches + kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS} + if kwargs["batch_size"] is None: + if isinstance(kwargs["sampler"], torch.utils.data.BatchSampler): + kwargs["sampler"].batch_size = micro_batch_size + else: + del kwargs["sampler"] + del kwargs["shuffle"] + del kwargs["batch_size"] + kwargs["batch_sampler"].batch_size = micro_batch_size + else: + del kwargs["batch_sampler"] + kwargs["batch_size"] = micro_batch_size + + dataloader = torch.utils.data.DataLoader(dataloader.dataset, **kwargs) + return prepare_data_loader( + dataloader, + accelerator.device, + num_processes=mpu.get_data_parallel_world_size(), + process_index=mpu.get_data_parallel_rank(), + split_batches=accelerator.split_batches, + put_on_device=True, + rng_types=accelerator.rng_types.copy(), + dispatch_batches=accelerator.dispatch_batches, + ) + else: + if args.consumed_samples is not None: + ( + args.consumed_train_samples, + args.consumed_valid_samples, + args.consumed_test_samples, + ) = args.consumed_samples + else: + args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples = 0, 0, 0 + ( + train_data_iterator, + valid_data_iterator, + test_data_iterator, + ) = dataloader.build_train_valid_test_data_iterators() + return train_data_iterator, valid_data_iterator, test_data_iterator + + +# optimizer utilities +class MegatronLMOptimizerWrapper(AcceleratedOptimizer): + def __init__(self, optimizer): + super().__init__(optimizer, device_placement=False, scaler=None) + + def zero_grad(self, set_to_none=None): + pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed + + def step(self): + pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed + + @property + def step_was_skipped(self): + """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" + return self.optimizer.skipped_iter + + +def prepare_optimizer(accelerator, model): + accelerator.print("Preparing optimizer") + args = get_args() + optimizer = get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult) + return optimizer + + +# scheduler utilities +class MegatronLMDummyScheduler: + """ + Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training + loop when scheduler config is specified in the deepspeed config file. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + total_num_steps (int): + Total number of steps. + warmup_num_steps (int): + Number of steps for warmup. + **kwargs: + Other arguments. + """ + + def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs): + self.optimizer = optimizer + self.total_num_steps = total_num_steps + self.warmup_num_steps = warmup_num_steps + self.kwargs = kwargs + + +class MegatronLMSchedulerWrapper(AcceleratedScheduler): + def __init__(self, scheduler, optimizers): + super().__init__(scheduler, optimizers) + + def step(self, *args, **kwargs): + return # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed + + +def prepare_scheduler(accelerator, optimizer, scheduler): + accelerator.print("Preparing scheduler") + scheduler = get_optimizer_param_scheduler(optimizer) + return scheduler + + +class AbstractTrainStep(ABC): + """Abstract class for batching, forward pass and loss handler.""" + + def __init__(self, name): + super().__init__() + self.name = name + + def get_batch_func(self): + pass + + def get_forward_step_func(self): + pass + + def get_loss_func(self): + pass + + +class BertTrainStep(AbstractTrainStep): + """ + Bert train step class. + + Args: + args (`argparse.Namespace`): Megatron-LM arguments. + """ + + def __init__(self, args): + super().__init__("BertTrainStep") + self.get_batch = self.get_batch_func(args.megatron_dataset_flag) + self.loss_func = self.get_loss_func(args.pretraining_flag, args.num_labels) + self.forward_step = self.get_forward_step_func(args.pretraining_flag, args.bert_binary_head) + if not args.model_return_dict: + self.model_output_class = None + else: + self.model_output_class = SequenceClassifierOutput + + def get_batch_func(self, megatron_dataset_flag): + def get_batch_megatron(data_iterator): + """Build the batch.""" + + # Items and their type. + keys = ["text", "types", "labels", "is_random", "loss_mask", "padding_mask"] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = mpu.broadcast_data(keys, data, datatype) + + # Unpack. + tokens = data_b["text"].long() + types = data_b["types"].long() + sentence_order = data_b["is_random"].long() + loss_mask = data_b["loss_mask"].float() + lm_labels = data_b["labels"].long() + padding_mask = data_b["padding_mask"].long() + + return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask + + def get_batch_transformer(data_iterator): + """Build the batch.""" + data = next(data_iterator) + data = send_to_device(data, torch.cuda.current_device()) + + # Unpack. + tokens = data["input_ids"].long() + padding_mask = data["attention_mask"].long() + if "token_type_ids" in data: + types = data["token_type_ids"].long() + else: + types = None + if "labels" in data: + lm_labels = data["labels"].long() + loss_mask = (data["labels"] != -100).to(torch.float) + else: + lm_labels = None + loss_mask = None + if "next_sentence_label" in data: + sentence_order = data["next_sentence_label"].long() + else: + sentence_order = None + + return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask + + if megatron_dataset_flag: + return get_batch_megatron + else: + return get_batch_transformer + + def get_loss_func(self, pretraining_flag, num_labels): + def loss_func_pretrain(loss_mask, sentence_order, output_tensor): + lm_loss_, sop_logits = output_tensor + + lm_loss_ = lm_loss_.float() + loss_mask = loss_mask.float() + lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() + + if sop_logits is not None: + sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), sentence_order.view(-1), ignore_index=-1) + sop_loss = sop_loss.float() + loss = lm_loss + sop_loss + averaged_losses = average_losses_across_data_parallel_group([lm_loss, sop_loss]) + return loss, {"lm loss": averaged_losses[0], "sop loss": averaged_losses[1]} + + else: + loss = lm_loss + averaged_losses = average_losses_across_data_parallel_group([lm_loss]) + return loss, {"lm loss": averaged_losses[0]} + + def loss_func_finetune(labels, logits): + if num_labels == 1: + # We are doing regression + loss_fct = MSELoss() + loss = loss_fct(logits.view(-1), labels.view(-1)) + elif self.num_labels > 1 and (labels.dtype in (torch.long, torch.int)): + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, num_labels), labels.view(-1)) + else: + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + averaged_losses = average_losses_across_data_parallel_group([loss]) + return loss, {"loss": averaged_losses[0]} + + if pretraining_flag: + return loss_func_pretrain + else: + return loss_func_finetune + + def get_forward_step_func(self, pretraining_flag, bert_binary_head): + def forward_step(data_iterator, model): + """Forward step.""" + tokens, types, sentence_order, loss_mask, labels, padding_mask = self.get_batch(data_iterator) + if not bert_binary_head: + types = None + # Forward pass through the model. + if pretraining_flag: + output_tensor = model(tokens, padding_mask, tokentype_ids=types, lm_labels=labels) + return output_tensor, partial(self.loss_func, loss_mask, sentence_order) + else: + logits = model(tokens, padding_mask, tokentype_ids=types) + return logits, partial(self.loss_func, labels) + + return forward_step + + +class GPTTrainStep(AbstractTrainStep): + """ + GPT train step class. + + Args: + args (`argparse.Namespace`): Megatron-LM arguments. + """ + + def __init__(self, args): + super().__init__("GPTTrainStep") + self.get_batch = self.get_batch_func(args.megatron_dataset_flag) + self.loss_func = self.get_loss_func() + self.forward_step = self.get_forward_step_func() + self.eod_token = args.padded_vocab_size - 1 + if args.vocab_file is not None: + tokenizer = get_tokenizer() + self.eod_token = tokenizer.eod + self.reset_position_ids = args.reset_position_ids + self.reset_attention_mask = args.reset_attention_mask + self.eod_mask_loss = args.eod_mask_loss + if not args.model_return_dict: + self.model_output_class = None + else: + self.model_output_class = CausalLMOutputWithCrossAttentions + + def get_batch_func(self, megatron_dataset_flag): + def get_batch_megatron(data_iterator): + """Generate a batch""" + # Items and their type. + keys = ["text"] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = mpu.broadcast_data(keys, data, datatype) + + # Unpack. + tokens_ = data_b["text"].long() + labels = tokens_[:, 1:].contiguous() + tokens = tokens_[:, :-1].contiguous() + + # Get the masks and postition ids. + attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss + ) + + return tokens, labels, loss_mask, attention_mask, position_ids + + def get_batch_transformer(data_iterator): + data = next(data_iterator) + data = {"input_ids": data["input_ids"]} + data = send_to_device(data, torch.cuda.current_device()) + + tokens_ = data["input_ids"].long() + padding = torch.zeros((tokens_.shape[0], 1), dtype=tokens_.dtype, device=tokens_.device) + self.eod_token + tokens_ = torch.concat([tokens_, padding], dim=1) + labels = tokens_[:, 1:].contiguous() + tokens = tokens_[:, :-1].contiguous() + # Get the masks and postition ids. + attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids( + tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, True + ) + return tokens, labels, loss_mask, attention_mask, position_ids + + if megatron_dataset_flag: + return get_batch_megatron + else: + return get_batch_transformer + + def get_loss_func(self): + args = get_args() + + def loss_func(loss_mask, output_tensor): + if args.return_logits: + losses, logits = output_tensor + else: + losses = output_tensor + losses = losses.float() + loss_mask = loss_mask.view(-1).float() + loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum() + + # Reduce loss for logging. + averaged_loss = average_losses_across_data_parallel_group([loss]) + + output_dict = {"lm loss": averaged_loss[0]} + if args.return_logits: + output_dict.update({"logits": logits}) + return loss, output_dict + + return loss_func + + def get_forward_step_func(self): + def forward_step(data_iterator, model): + """Forward step.""" + # Get the batch. + tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator) + output_tensor = model(tokens, position_ids, attention_mask, labels=labels) + + return output_tensor, partial(self.loss_func, loss_mask) + + return forward_step + + +class T5TrainStep(AbstractTrainStep): + """ + T5 train step class. + + Args: + args (`argparse.Namespace`): Megatron-LM arguments. + """ + + def __init__(self, args): + super().__init__("T5TrainStep") + self.get_batch = self.get_batch_func(args.megatron_dataset_flag) + self.loss_func = self.get_loss_func() + self.forward_step = self.get_forward_step_func() + if not args.model_return_dict: + self.model_output_class = None + else: + self.model_output_class = Seq2SeqLMOutput + + @staticmethod + def attn_mask_postprocess(attention_mask): + # We create a 3D attention mask from a 2D tensor mask. + # [b, 1, s] + attention_mask_b1s = attention_mask.unsqueeze(1) + # [b, s, 1] + attention_mask_bs1 = attention_mask.unsqueeze(2) + # [b, s, s] + attention_mask_bss = attention_mask_b1s * attention_mask_bs1 + # Convert attention mask to binary: + extended_attention_mask = attention_mask_bss < 0.5 + return extended_attention_mask + + @staticmethod + def get_decoder_mask(seq_length, device): + attention_mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device)) + attention_mask = attention_mask < 0.5 + return attention_mask + + @staticmethod + def get_enc_dec_mask(attention_mask, dec_seq_length, device): + batch_size, _ = attention_mask.shape + # We create a 3D attention mask from a 2D tensor mask. + # [b, 1, s] + attention_mask_b1s = attention_mask.unsqueeze(1) + # [b, s, 1] + attention_mask_bs1 = torch.ones((batch_size, dec_seq_length, 1), device=device) + attention_mask_bss = attention_mask_bs1 * attention_mask_b1s + extended_attention_mask = attention_mask_bss < 0.5 + return extended_attention_mask + + def get_batch_func(self, megatron_dataset_flag): + def get_batch_megatron(data_iterator): + """Build the batch.""" + + keys = ["text_enc", "text_dec", "labels", "loss_mask", "enc_mask", "dec_mask", "enc_dec_mask"] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is not None: + data = next(data_iterator) + else: + data = None + data_b = mpu.broadcast_data(keys, data, datatype) + + # Unpack. + tokens_enc = data_b["text_enc"].long() + tokens_dec = data_b["text_dec"].long() + labels = data_b["labels"].long() + loss_mask = data_b["loss_mask"].float() + + enc_mask = data_b["enc_mask"] < 0.5 + dec_mask = data_b["dec_mask"] < 0.5 + enc_dec_mask = data_b["enc_dec_mask"] < 0.5 + + return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask + + def get_batch_transformer(data_iterator): + """Build the batch.""" + data = next(data_iterator) + data = send_to_device(data, torch.cuda.current_device()) + + tokens_enc = data["input_ids"].long() + labels = data["labels"].long() + loss_mask = (labels != -100).to(torch.float) + if "decoder_input_ids" in data: + tokens_dec = data["decoder_input_ids"].long() + else: + tokens_dec = labels.new_zeros(labels.shape, device=labels.device, dtype=torch.long) + tokens_dec[..., 1:] = labels[..., :-1].clone() + tokens_dec[..., 0] = 0 + tokens_dec.masked_fill_(tokens_dec == -100, 0) + enc_mask = T5TrainStep.attn_mask_postprocess(data["attention_mask"].long()) + dec_mask = T5TrainStep.get_decoder_mask(tokens_dec.shape[1], tokens_dec.device) + enc_dec_mask = T5TrainStep.get_enc_dec_mask( + data["attention_mask"].long(), tokens_dec.shape[1], tokens_dec.device + ) + + return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask + + if megatron_dataset_flag: + return get_batch_megatron + else: + return get_batch_transformer + + def get_loss_func(self): + def loss_func(loss_mask, output_tensor): + lm_loss_ = output_tensor.float() + lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() + + loss = lm_loss + averaged_losses = average_losses_across_data_parallel_group([lm_loss]) + + return loss, {"lm loss": averaged_losses[0]} + + return loss_func + + def get_forward_step_func(self): + def forward_step(data_iterator, model): + """Forward step.""" + # Get the batch. + tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask = self.get_batch( + data_iterator + ) + # Forward model lm_labels + output_tensor = model( + tokens_enc, tokens_dec, enc_mask, dec_mask, enc_dec_mask, tokentype_ids=None, lm_labels=lm_labels + ) + + return output_tensor, partial(self.loss_func, loss_mask) + + return forward_step + + +# intialize megatron setup +def initialize(accelerator, extra_args_provider=None, args_defaults={}): + accelerator.print("Initializing Megatron-LM") + assert torch.cuda.is_available(), "Megatron requires CUDA." + + # Parse arguments + args = parse_args(extra_args_provider, ignore_unknown_args=True) + + # Set defaults + for key, value in args_defaults.items(): + if getattr(args, key, None) is not None: + if args.rank == 0: + print( + "WARNING: overriding default arguments for {key}:{v} \ + with {key}:{v2}".format( + key=key, v=getattr(args, key), v2=value + ), + flush=True, + ) + setattr(args, key, value) + + if args.use_checkpoint_args or args_defaults.get("use_checkpoint_args", False): + assert args.load is not None, "--use-checkpoints-args requires --load argument" + load_args_from_checkpoint(args) + + validate_args(args) + + # set global args, build tokenizer, and set adlr-autoresume, + # tensorboard-writer, and timers. + set_global_variables(args) + + # torch.distributed initialization + def finish_mpu_init(): + args = get_args() + # Pytorch distributed. + device_count = torch.cuda.device_count() + args.rank = torch.distributed.get_rank() + args.world_size = torch.distributed.get_world_size() + if device_count > 0: + device = args.rank % device_count + if args.local_rank is not None: + assert args.local_rank == device, "expected local-rank to be the same as rank % device-count." + else: + args.local_rank = device + + # Set the tensor model-parallel, pipeline model-parallel, and + # data-parallel communicators. + if mpu.model_parallel_is_initialized(): + print("model parallel is already initialized") + else: + mpu.initialize_model_parallel( + args.tensor_model_parallel_size, + args.pipeline_model_parallel_size, + args.virtual_pipeline_model_parallel_size, + args.pipeline_model_parallel_split_rank, + ) + + # Random seeds for reproducibility. + if args.rank == 0: + print("> setting random seeds to {} ...".format(args.seed)) + _set_random_seed(args.seed, args.data_parallel_random_init) + + args = get_args() + + # Megatron's MPU is the master. Complete initialization right away. + finish_mpu_init() + + # Autoresume. + _init_autoresume() + + # Compile dependencies. + _compile_dependencies() + + # Set pytorch JIT layer fusion options and warmup JIT functions. + set_jit_fusion_options() + args = get_args() + args.padded_vocab_size = _vocab_size_with_padding(args.orig_vocab_size, args) + if args.model_type_name == "bert" and args.pretraining_flag and args.num_labels == 2: + args.bert_binary_head = True + else: + args.bert_binary_head = False + args.iteration = 0 + + +class MegatronEngine(torch.nn.Module): + """ + Megatron-LM model wrapper + + Args: + accelerator (:class:`~accelerate.Accelerator`): The accelerator object to use. + model: Megatron-LM model + optimizer: Megatron-LM optimizer + lr_scheduler: Megatron-LM lr scheduler + """ + + def __init__(self, accelerator, model, optimizer, scheduler): + super(MegatronEngine, self).__init__() + self.module = model + self.base_model = model[0] + self.optimizer = optimizer + self.scheduler = scheduler + args = get_args() + if accelerator.state.megatron_lm_plugin.custom_train_step_class is not None: + self.train_step_handler = accelerator.state.megatron_lm_plugin.custom_train_step_class( + args, **accelerator.state.megatron_lm_plugin.custom_train_step_kwargs + ) + elif args.model_type_name == "bert": + self.train_step_handler = BertTrainStep(args) + elif args.model_type_name == "gpt": + self.train_step_handler = GPTTrainStep(args) + elif args.model_type_name == "t5": + self.train_step_handler = T5TrainStep(args) + else: + raise ValueError(f"Unsupported model type: {args.model_type_name}") + self.optimizer.skipped_iter = False + + # Tracking loss. + self.total_loss_dict = {} + self.eval_total_loss_dict = {} + self.iteration = 0 + self.report_memory_flag = True + if args.tensorboard_dir is not None: + write_args_to_tensorboard() + + def train(self): + for model_module in self.module: + model_module.train() + self.log_eval_results() + + def eval(self): + for model_module in self.module: + model_module.eval() + + def train_step(self, **batch_data): + """ + Training step for Megatron-LM + + Args: + batch_data (:obj:`dict`): The batch data to train on. + """ + + args = get_args() + timers = get_timers() + + if len(batch_data) > 0: + data_chunks = [] + if args.num_micro_batches > 1: + for i in range(0, args.num_micro_batches): + data_chunks.append( + { + k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size] + for k, v in batch_data.items() + } + ) + else: + data_chunks = [batch_data] + + if len(self.module) > 1: + batch_data_iterator = ( + [iter(data_chunks) for _ in range(len(self.module))] + if len(batch_data) > 0 + else [None] * len(self.module) + ) + else: + batch_data_iterator = iter(data_chunks) if len(batch_data) > 0 else None + + # Set grad to zero. + if args.DDP_impl == "local" and args.use_contiguous_buffers_in_local_ddp: + for partition in self.module: + partition.zero_grad_buffer() + self.optimizer.zero_grad() + + # Forward pass. + forward_backward_func = get_forward_backward_func() + losses_reduced = forward_backward_func( + self.train_step_handler.forward_step, + batch_data_iterator, + self.module, + self.optimizer, + None, + forward_only=False, + ) + + # Empty unused memory. + if args.empty_unused_memory_level >= 1: + torch.cuda.empty_cache() + + # Reduce gradients. + timers("backward-reduce-model-grads").start() + self.optimizer.reduce_model_grads(args, timers) + timers("backward-reduce-model-grads").stop() + + # Update parameters. + timers("optimizer").start() + update_successful, grad_norm, num_zeros_in_grad = self.optimizer.step(args, timers) + timers("optimizer").stop() + + # Gather params. + if update_successful: + timers("backward-gather-model-params").start() + self.optimizer.gather_model_params(args, timers) + timers("backward-gather-model-params").stop() + + # Update learning rate. + if update_successful: + if self.scheduler is not None: + increment = get_num_microbatches() * args.micro_batch_size * args.data_parallel_size + self.scheduler.step(increment=increment) + skipped_iter = 0 + else: + skipped_iter = 1 + + self.optimizer.skipped_iter = not update_successful + + # Empty unused memory. + if args.empty_unused_memory_level >= 2: + torch.cuda.empty_cache() + + args.consumed_train_samples += ( + mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches() + ) + + if mpu.is_pipeline_last_stage(ignore_virtual=True): + # Average loss across microbatches. + loss_reduced = {} + for key in losses_reduced[0]: + losses_reduced_for_key = [x[key] for x in losses_reduced] + if len(losses_reduced_for_key[0].shape) == 0: + loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key) + else: + loss_reduced[key] = torch.concat(losses_reduced_for_key) + return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad + return {}, skipped_iter, grad_norm, num_zeros_in_grad + + def eval_step(self, **batch_data): + """ + Evaluation step for Megatron-LM + + Args: + batch_data (:obj:`dict`): The batch data to evaluate on. + """ + + args = get_args() + data_chunks = [] + if args.num_micro_batches > 1: + for i in range(0, args.num_micro_batches): + data_chunks.append( + {k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size] for k, v in batch_data.items()} + ) + else: + data_chunks = [batch_data] + + if len(self.module) > 1: + batch_data_iterator = [iter(data_chunks) for _ in range(len(self.module))] + else: + batch_data_iterator = iter(data_chunks) + forward_backward_func = get_forward_backward_func() + loss_dicts = forward_backward_func( + self.train_step_handler.forward_step, + batch_data_iterator, + self.module, + optimizer=None, + timers=None, + forward_only=True, + ) + # Empty unused memory + if args.empty_unused_memory_level >= 1: + torch.cuda.empty_cache() + + args.consumed_valid_samples += ( + mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches() + ) + + if mpu.is_pipeline_last_stage(ignore_virtual=True): + # Average loss across microbatches. + loss_reduced = {} + for key in loss_dicts[0]: + losses_reduced_for_key = [x[key] for x in loss_dicts] + if len(losses_reduced_for_key[0].shape) == 0: + loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key) + else: + loss_reduced[key] = torch.concat(losses_reduced_for_key) + return loss_reduced + else: + return {} + + def forward(self, **batch_data): + # During training, we use train_step() + # model(**batch_data) performs following operations by delegating it to `self.train_step`: + # 1. Prepare **batch_data for Tendor, Pipeline and Model Parallelism + # 2. Set grad to zero. + # 3. forward pass and backward pass using Pipeline Parallelism + # 4. Empty unused memory. + # 5. Reduce gradients. + # 6. Update parameters. + # 7. Gather params when using Distributed Optimizer (Data Parallelism). + # 8. Update learning rate if scheduler is specified. + # 9. Empty unused memory. + # 10. Average loss across microbatches and across DP ranks. + # + # During evaluation, we use eval_step() + args = get_args() + if self.module[0].training: + loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = self.train_step(**batch_data) + self.iteration += 1 + if args.tensorboard_dir is not None: + # Logging. + loss_scale = self.optimizer.get_loss_scale().item() + params_norm = None + if args.log_params_norm: + params_norm = calc_params_l2_norm(self.model) + self.report_memory_flag = training_log( + loss_dict, + self.total_loss_dict, + self.optimizer.param_groups[0]["lr"], + self.iteration, + loss_scale, + self.report_memory_flag, + skipped_iter, + grad_norm, + params_norm, + num_zeros_in_grad, + ) + else: + loss_dict = self.eval_step(**batch_data) + if args.tensorboard_dir is not None: + for key in loss_dict: + self.eval_total_loss_dict[key] = ( + self.eval_total_loss_dict.get(key, torch.cuda.FloatTensor([0.0])) + loss_dict[key] + ) + self.eval_total_loss_dict[key + "_num_iters"] = self.eval_total_loss_dict.get( + key + "_num_iters", torch.cuda.FloatTensor([0.0]) + ) + torch.cuda.FloatTensor([1.0]) + + loss = torch.tensor(0.0, device=args.local_rank) + for key in loss_dict: + if len(loss_dict[key].shape) == 0: + loss += loss_dict[key] + + logits = None + if "logits" in loss_dict: + logits = loss_dict["logits"] + # loss = reduce(loss) + if self.train_step_handler.model_output_class is not None: + return self.train_step_handler.model_output_class(loss=loss, logits=logits) + return loss + + def log_eval_results(self): + args = get_args() + if args.tensorboard_dir is None or self.iteration == 0: + return + args = get_args() + writer = get_tensorboard_writer() + string = f"validation loss at iteration {self.iteration} | " + for key in self.eval_total_loss_dict: + if key.endswith("_num_iters"): + continue + value = self.eval_total_loss_dict[key] / self.eval_total_loss_dict[key + "_num_iters"] + string += f"{key} value: {value} | " + ppl = math.exp(min(20, value.item())) + if args.pretraining_flag: + string += f"{key} PPL: {ppl} | " + if writer: + writer.add_scalar(f"{key} validation", value.item(), self.iteration) + if args.pretraining_flag: + writer.add_scalar(f"{key} validation ppl", ppl, self.iteration) + + length = len(string) + 1 + print_rank_last("-" * length) + print_rank_last(string) + print_rank_last("-" * length) + self.eval_total_loss_dict = {} + + def save_checkpoint(self, output_dir): + self.log_eval_results() + args = get_args() + args.save = output_dir + torch.distributed.barrier() + save_checkpoint(self.iteration, self.module, self.optimizer, self.scheduler) + torch.distributed.barrier() + + def load_checkpoint(self, input_dir): + args = get_args() + args.load = input_dir + args.consumed_train_samples = 0 + args.consumed_valid_samples = 0 + torch.distributed.barrier() + iteration = load_checkpoint(self.module, self.optimizer, self.scheduler) + torch.distributed.barrier() + self.iteration = iteration + if args.fp16 and self.iteration == 0: + self.optimizer.reload_model_params() + + def megatron_generate( + self, + inputs, + attention_mask=None, + max_length=None, + max_new_tokens=None, + num_beams=None, + temperature=None, + top_k=None, + top_p=None, + length_penalty=None, + **kwargs, + ): + """ + Generate method for GPT2 model. This method is used for inference. Supports both greedy and beam search along + with sampling. Refer the Megatron-LM repo for more details + + Args: + inputs (torch.Tensor): input ids + attention_mask (torch.Tensor, optional): attention mask. Defaults to None. + max_length (int, optional): max length of the generated sequence. Defaults to None. + Either this or max_new_tokens should be provided. + max_new_tokens (int, optional): max number of tokens to be generated. Defaults to None. + Either this or max_length should be provided. + num_beams (int, optional): number of beams to use for beam search. Defaults to None. + temperature (float, optional): temperature for sampling. Defaults to 1.0. + top_k (int, optional): top k tokens to consider for sampling. Defaults to 0.0. + top_p (float, optional): tokens in top p probability are considered for sampling. Defaults to 0.0. + length_penalty (float, optional): length penalty for beam search. Defaults to None. + kwargs: additional key-value arguments + """ + + # checking if required arguments are passed + args = get_args() + if args.model_type_name != "gpt": + raise NotImplementedError("Generate method is not implemented for this model") + + if args.data_parallel_size > 1: + raise ValueError("Generate method requires data parallelism to be 1") + + if args.sequence_parallel: + raise ValueError("Generate method requires sequence parallelism to be False") + + if args.recompute_granularity is not None: + raise ValueError("Checkpoint activations cannot be set for inference") + + if args.vocab_file is None: + raise ValueError("Vocab file is required for inference") + + # Prepare inputs + if max_length is None and max_new_tokens is None: + raise ValueError("`max_length` or `max_new_tokens` are required for inference") + + if temperature is None: + temperature = 1.0 + elif not (0.0 < temperature <= 100.0): + raise ValueError("temperature must be a positive number less than or equal to 100.0") + + if top_k is None: + top_k = 0 + elif not (0 <= top_k <= 1000): + raise ValueError("top_k must be a positive number less than or equal to 1000") + + if top_p is None: + top_p = 0.0 + elif top_p > 0.0 and top_k > 0.0: + raise ValueError("top_p and top_k sampling cannot be set together") + else: + if not (0.0 <= top_p <= 1.0): + raise ValueError("top_p must be less than or equal to 1.0") + + top_p_decay = kwargs.get("top_p_decay", 0.0) + if not (0.0 <= top_p_decay <= 1.0): + raise ValueError("top_p_decay must be less than or equal to 1.0") + + top_p_bound = kwargs.get("top_p_bound", 0.0) + if not (0.0 <= top_p_bound <= 1.0): + raise ValueError("top_p_bound must be less than or equal to 1.0") + + add_BOS = kwargs.get("add_BOS", False) + if not (isinstance(add_BOS, bool)): + raise ValueError("add_BOS must be a boolean") + + beam_width = num_beams + if beam_width is not None: + if not isinstance(beam_width, int): + raise ValueError("beam_width must be an integer") + if beam_width < 1: + raise ValueError("beam_width must be greater than 0") + if inputs.shape[0] > 1: + return "When doing beam_search, batch size must be 1" + + tokenizer = get_tokenizer() + + stop_token = kwargs.get("stop_token", tokenizer.eod) + if stop_token is not None: + if not isinstance(stop_token, int): + raise ValueError("stop_token must be an integer") + + if length_penalty is None: + length_penalty = 1.0 + + sizes_list = None + prompts_tokens_tensor = None + prompts_length_tensor = None + if torch.distributed.get_rank() == 0: + # Get the prompts length. + if attention_mask is None: + prompts_length_tensor = torch.cuda.LongTensor([inputs.shape[1]] * inputs.shape[0]) + else: + prompts_length_tensor = attention_mask.sum(axis=-1).cuda() + + if max_new_tokens is None: + max_new_tokens = max_length - inputs.shape[1] + if max_new_tokens <= 0: + raise ValueError("max_new_tokens must be greater than 0") + + if add_BOS: + max_length = max_new_tokens + inputs.shape[1] + 1 + # making sure that `max_length` is a multiple of 4 to leverage fused kernels + max_length = 4 * math.ceil(max_length / 4) + max_new_tokens = max_length - (inputs.shape[1] + 1) + padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0]) + prompts_tokens_tensor = torch.concat( + [torch.unsqueeze(padding[:, 0], axis=-1), inputs.cuda(), padding], axis=-1 + ) + else: + # making sure that `max_length` is a multiple of 4 to leverage fused kernels + max_length = max_new_tokens + inputs.shape[1] + max_length = 4 * math.ceil(max_length / 4) + max_new_tokens = max_length - inputs.shape[1] + padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0]) + prompts_tokens_tensor = torch.concat([inputs.cuda(), padding], axis=-1) + + # We need the sizes of these tensors for the boradcast + sizes_list = [ + prompts_tokens_tensor.size(0), # Batch size + prompts_tokens_tensor.size(1), + ] # Sequence lenght + + # First, broadcast the sizes. + sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=0) + + # Now that we have the sizes, we can boradcast the tokens + # and length tensors. + sizes = sizes_tensor.tolist() + context_tokens_tensor = broadcast_tensor(sizes, torch.int64, tensor=prompts_tokens_tensor, rank=0) + context_length_tensor = broadcast_tensor(sizes[0], torch.int64, tensor=prompts_length_tensor, rank=0) + + # Run the inference + random_seed = kwargs.get("random_seed", 0) + torch.random.manual_seed(random_seed) + unwrapped_model = unwrap_model(self.base_model, (torchDDP, LocalDDP, Float16Module)) + if beam_width is not None: + tokens, _ = beam_search_and_return_on_first_stage( + unwrapped_model, + context_tokens_tensor, + context_length_tensor, + beam_width, + stop_token=stop_token, + num_return_gen=1, + length_penalty=length_penalty, + ) + else: + tokens, _, _ = generate_tokens_probs_and_return_on_first_stage( + unwrapped_model, + context_tokens_tensor, + context_length_tensor, + return_output_log_probs=False, + top_k=top_k, + top_p=top_p, + top_p_decay=top_p_decay, + top_p_bound=top_p_bound, + temperature=temperature, + use_eod_token_for_early_termination=True, + ) + return tokens + + +# other utilities +def avg_losses_across_data_parallel_group(losses): + """ + Average losses across data parallel group. + + Args: + losses (List[Tensor]): List of losses to average across data parallel group. + """ + + return average_losses_across_data_parallel_group(losses) + + +def gather_across_data_parallel_groups(tensor): + """ + Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to gather across data parallel ranks. + + """ + + def _gpu_gather_one(tensor): + if tensor.ndim == 0: + tensor = tensor.clone()[None] + output_tensors = [ + tensor.clone() for _ in range(torch.distributed.get_world_size(group=mpu.get_data_parallel_group())) + ] + torch.distributed.all_gather(output_tensors, tensor, group=mpu.get_data_parallel_group()) + return torch.cat(output_tensors, dim=0) + + return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True) diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/memory.py b/testbed/huggingface__accelerate/src/accelerate/utils/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..54bfdf8afc6f809ebc032cfd0614538e3434ffc2 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/memory.py @@ -0,0 +1,119 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A collection of utilities for ensuring that training can always occur. Heavily influenced by the +[toma](https://github.com/BlackHC/toma) library. +""" + +import functools +import gc +import inspect + +import torch + + +def release_memory(*objects): + """ + Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`. + Returned objects should be reassigned to the same variables. + + Args: + objects (`Iterable`): + An iterable of objects + Returns: + A list of `None` objects to replace `objects` + + Example: + + ```python + >>> import torch + >>> from accelerate.utils import release_memory + + >>> a = torch.ones(1000, 1000).cuda() + >>> b = torch.ones(1000, 1000).cuda() + >>> a, b = release_memory(a, b) + ``` + """ + if not isinstance(objects, list): + objects = list(objects) + for i in range(len(objects)): + objects[i] = None + gc.collect() + torch.cuda.empty_cache() + return objects + + +def should_reduce_batch_size(exception: Exception) -> bool: + """ + Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory + + Args: + exception (`Exception`): + An exception + """ + _statements = [ + "CUDA out of memory.", # CUDA OOM + "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU + "DefaultCPUAllocator: can't allocate memory", # CPU OOM + ] + if isinstance(exception, RuntimeError) and len(exception.args) == 1: + return any(err in exception.args[0] for err in _statements) + return False + + +def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128): + """ + A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or + CUDNN, the batch size is cut in half and passed to `function` + + `function` must take in a `batch_size` parameter as its first argument. + + Args: + function (`callable`, *optional*): + A function to wrap + starting_batch_size (`int`, *optional*): + The batch size to try and fit into memory + """ + if function is None: + return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size) + + batch_size = starting_batch_size + + def decorator(*args, **kwargs): + nonlocal batch_size + gc.collect() + torch.cuda.empty_cache() + params = list(inspect.signature(function).parameters.keys()) + # Guard against user error + if len(params) < (len(args) + 1): + arg_str = ", ".join([f"{arg}={value}" for arg, value in zip(params[1:], args[1:])]) + raise TypeError( + f"Batch size was passed into `{function.__name__}` as the first argument when called." + f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" + ) + while True: + if batch_size == 0: + raise RuntimeError("No executable batch size found, reached zero.") + try: + return function(batch_size, *args, **kwargs) + except Exception as e: + if should_reduce_batch_size(e): + gc.collect() + torch.cuda.empty_cache() + batch_size //= 2 + else: + raise + + return decorator diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/modeling.py b/testbed/huggingface__accelerate/src/accelerate/utils/modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..ee9f1e507a62c43ed9bb41e57218e7a9d5399fc7 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/modeling.py @@ -0,0 +1,750 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import json +import os +import re +import shutil +import tempfile +from collections import defaultdict +from typing import Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from .offload import load_offloaded_weight, offload_weight, save_offload_index + + +WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json" + + +def convert_file_size_to_int(size: Union[int, str]): + """ + Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes). + + Args: + size (`int` or `str`): The size to convert. Will be directly returned if an `int`. + + Example: + + ```py + >>> convert_file_size_to_int("1MiB") + 1048576 + ``` + """ + if isinstance(size, int): + return size + if size.upper().endswith("GIB"): + return int(size[:-3]) * (2**30) + if size.upper().endswith("MIB"): + return int(size[:-3]) * (2**20) + if size.upper().endswith("KIB"): + return int(size[:-3]) * (2**10) + if size.upper().endswith("GB"): + int_size = int(size[:-2]) * (10**9) + return int_size // 8 if size.endswith("b") else int_size + if size.upper().endswith("MB"): + int_size = int(size[:-2]) * (10**6) + return int_size // 8 if size.endswith("b") else int_size + if size.upper().endswith("KB"): + int_size = int(size[:-2]) * (10**3) + return int_size // 8 if size.endswith("b") else int_size + raise ValueError("`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.") + + +def dtype_byte_size(dtype: torch.dtype): + """ + Returns the size (in bytes) occupied by one parameter of type `dtype`. + + Example: + + ```py + >>> dtype_byte_size(torch.float32) + 4 + ``` + """ + if dtype == torch.bool: + return 1 / 8 + bit_search = re.search(r"[^\d](\d+)$", str(dtype)) + if bit_search is None: + raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") + bit_size = int(bit_search.groups()[0]) + return bit_size // 8 + + +def set_module_tensor_to_device( + module: nn.Module, + tensor_name: str, + device: Union[int, str, torch.device], + value: Optional[torch.Tensor] = None, + dtype: Optional[Union[str, torch.dtype]] = None, +): + """ + A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing + `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). + + Args: + module (`torch.nn.Module`): The module in which the tensor we want to move lives. + param_name (`str`): The full name of the parameter/buffer. + device (`int`, `str` or `torch.device`): The device on which to set the tensor. + value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any + other device). + dtype (`torch.dtype`, *optional*): + If passed along the value of the parameter will be cast to this `dtype`. Otherwise, `value` will be cast to + the dtype of the existing parameter in the model. + """ + # Recurse if needed + if "." in tensor_name: + splits = tensor_name.split(".") + for split in splits[:-1]: + new_module = getattr(module, split) + if new_module is None: + raise ValueError(f"{module} has no attribute {split}.") + module = new_module + tensor_name = splits[-1] + + if tensor_name not in module._parameters and tensor_name not in module._buffers: + raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") + is_buffer = tensor_name in module._buffers + old_value = getattr(module, tensor_name) + + if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None: + raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.") + + if value is not None: + if dtype is None: + # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model + value = value.to(old_value.dtype) + elif not str(value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): + value = value.to(dtype) + + with torch.no_grad(): + if value is None: + new_value = old_value.to(device) + elif isinstance(value, torch.Tensor): + new_value = value.to(device) + else: + new_value = torch.tensor(value, device=device) + + if is_buffer: + module._buffers[tensor_name] = new_value + elif value is not None or torch.device(device) != module._parameters[tensor_name].device: + param_cls = type(module._parameters[tensor_name]) + kwargs = module._parameters[tensor_name].__dict__ + new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device) + module._parameters[tensor_name] = new_value + + +def named_module_tensors(module: nn.Module, include_buffers: bool = True, recurse: bool = False): + """ + A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True` + it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`. + + Args: + module (`torch.nn.Module`): The module we want the tensors or. + include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result. + recurse (`bool`, *optional`, defaults to `False`): + Whether or not to go look in every submodule or just return the direct parameters and buffers. + """ + for named_parameter in module.named_parameters(recurse=recurse): + yield named_parameter + + if include_buffers: + for named_buffer in module.named_buffers(recurse=recurse): + yield named_buffer + + +def find_tied_parameters(model: nn.Module, **kwargs): + """ + Find the tied parameters in a given model. + + Args: + model (`torch.nn.Module`): The model to inspect. + + + + The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore + them. + + + + Example: + + + ```py + >>> from collections import OrderedDict + >>> import torch.nn as nn + + >>> model = nn.Sequential(OrderedDict([("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(4, 4))])) + >>> model.linear2.weight = test_model.linear1.weight + >>> find_tied_parameters(test_model) + {'linear1.weight': 'linear2.weight'} + ``` + + Returns: + Dict[str, str]: A dictionary mapping tied parameter names to the name of the parameter they are tied to. + """ + # Initialize result and named_parameters before recursing. + named_parameters = kwargs.get("named_parameters", None) + prefix = kwargs.get("prefix", "") + result = kwargs.get("result", {}) + + if named_parameters is None: + named_parameters = {n: p for n, p in model.named_parameters()} + else: + # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters` + # of the submodule it belongs to. So while recursing we track the names that are not in the initial + # `named_parameters`. + for name, parameter in model.named_parameters(): + full_name = name if prefix == "" else f"{prefix}.{name}" + if full_name not in named_parameters: + # When we find one, it has to be one of the existing parameters. + for new_name, new_param in named_parameters.items(): + if new_param is parameter: + result[new_name] = full_name + + # Once we have treated direct parameters, we move to the child modules. + for name, child in model.named_children(): + child_name = name if prefix == "" else f"{prefix}.{name}" + find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result) + + return result + + +def retie_parameters(model, tied_params): + """ + Reties tied parameters in a given model if the link was broken (for instance when adding hooks). + + Args: + model (`torch.nn.Module`): The model in which to retie parameters. + tied_params (`Dict[str, str]`): + A mapping parameter name to tied parameter name as obtained by `find_tied_parameters`. + """ + for param_name, tied_param_name in tied_params.items(): + param = model + for split in param_name.split("."): + param = getattr(param, split) + tied_module = model + for split in tied_param_name.split(".")[:-1]: + tied_module = getattr(tied_module, split) + setattr(tied_module, tied_param_name.split(".")[-1], param) + + +def compute_module_sizes(model: nn.Module, dtype: Optional[Union[str, torch.device]] = None): + """ + Compute the size of each submodule of a given model. + """ + if isinstance(dtype, str): + # We accept "torch.float16" or just "float16" + dtype = dtype.replace("torch.", "") + dtype = getattr(torch, dtype) + if dtype is not None: + dtype_size = dtype_byte_size(dtype) + module_sizes = defaultdict(int) + for name, tensor in named_module_tensors(model, recurse=True): + if dtype is None: + size = tensor.numel() * dtype_byte_size(tensor.dtype) + else: + size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype)) + name_parts = name.split(".") + for idx in range(len(name_parts) + 1): + module_sizes[".".join(name_parts[:idx])] += size + + return module_sizes + + +def get_max_layer_size( + modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str] +): + """ + Utility function that will scan a list of named modules and return the maximum size used by one full layer. The + definition of a layer being: + - a module with no direct children (just parameters and buffers) + - a module whose class name is in the list `no_split_module_classes` + + Args: + modules (`List[Tuple[str, torch.nn.Module]]`): + The list of named modules where we want to determine the maximum layer size. + module_sizes (`Dict[str, int]`): + A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`). + no_split_module_classes (`List[str]`): + A list of class names for layers we don't want to be split. + + Returns: + `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size. + """ + max_size = 0 + layer_names = [] + modules_to_treat = modules.copy() + while len(modules_to_treat) > 0: + module_name, module = modules_to_treat.pop(0) + modules_children = list(module.named_children()) if isinstance(module, torch.nn.Module) else [] + if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: + # No splitting this one so we compare to the max_size + size = module_sizes[module_name] + if size > max_size: + max_size = size + layer_names = [module_name] + elif size == max_size: + layer_names.append(module_name) + else: + modules_to_treat = [(f"{module_name}.{n}", v) for n, v in modules_children] + modules_to_treat + return max_size, layer_names + + +def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None): + """ + Get the maximum memory available if nothing is passed, converts string to int otherwise. + """ + import psutil + + if max_memory is None: + if not torch.cuda.is_available(): + max_memory = {} + else: + # Make sure CUDA is initialized on each GPU to have the right memory info. + for i in range(torch.cuda.device_count()): + _ = torch.tensor([0], device=i) + max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())} + max_memory["cpu"] = psutil.virtual_memory().available + return max_memory + + for key in max_memory: + if isinstance(max_memory[key], str): + max_memory[key] = convert_file_size_to_int(max_memory[key]) + return max_memory + + +def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = ""): + """ + Cleans a device_map by grouping all submodules that go on the same device together. + """ + # Get the value of the current module and if there is only one split across several keys, regroup it. + prefix = "" if module_name == "" else f"{module_name}." + values = [v for k, v in device_map.items() if k.startswith(prefix)] + if len(set(values)) == 1 and len(values) > 1: + for k in [k for k in device_map if k.startswith(prefix)]: + del device_map[k] + device_map[module_name] = values[0] + + # Recurse over the children + children_modules = [k for k in device_map.keys() if k.startswith(module_name) and len(k) > len(module_name)] + idx = len(module_name.split(".")) + 1 if len(module_name) > 0 else 1 + children_modules = set(".".join(k.split(".")[:idx]) for k in children_modules) + for child in children_modules: + clean_device_map(device_map, module_name=child) + + return device_map + + +def load_offloaded_weights(model, index, offload_folder): + if index is None or len(index) == 0: + # Nothing to do + return + + for param_name, metadata in index.items(): + tensor_file = os.path.join(offload_folder, f"{param_name}.dat") + weight = load_offloaded_weight(tensor_file, metadata) + set_module_tensor_to_device(model, param_name, "cpu", value=weight) + + +def get_balanced_memory( + model: nn.Module, + max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, + no_split_module_classes: Optional[List[str]] = None, + dtype: Optional[Union[str, torch.dtype]] = None, + low_zero: bool = False, +): + """ + Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU. + + + + All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the + meta device (as it would if initialized within the `init_empty_weights` context manager). + + + + Args: + model (`torch.nn.Module`): The model to analyze. + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. + no_split_module_classes (`List[str]`, *optional*): + A list of layer class names that should never be split across device (for instance any layer that has a + residual connection). + dtype (`str` or `torch.dtype`, *optional*): + If provided, the weights will be converted to that type when loaded. + low_zero (`bool`, *optional*): + Minimizes the number of weights on GPU 0, which is convenient when it's used for other operations (like the + Transformers generate function). + """ + # Get default / clean up max_memory + max_memory = get_max_memory(max_memory) + + if not torch.cuda.is_available(): + return max_memory + + num_devices = len([d for d in max_memory if torch.device(d).type == "cuda" and max_memory[d] > 0]) + module_sizes = compute_module_sizes(model, dtype=dtype) + per_gpu = module_sizes[""] // (num_devices - 1 if low_zero else num_devices) + + # We can't just set the memory to model_size // num_devices as it will end being too small: each GPU will get + # slightly less layers and some layers will end up offload at the end. So this function computes a buffer size to + # add which is the biggest of: + # - the size of no split block (if applicable) + # - the mean of the layer sizes + if no_split_module_classes is None: + no_split_module_classes = [] + elif not isinstance(no_split_module_classes, (list, tuple)): + no_split_module_classes = [no_split_module_classes] + + # Identify the size of the no_split_block modules + if len(no_split_module_classes) > 0: + no_split_children = {} + for name, size in module_sizes.items(): + if name == "": + continue + submodule = model + for submodule_name in name.split("."): + submodule = getattr(submodule, submodule_name) + class_name = submodule.__class__.__name__ + if class_name in no_split_module_classes and class_name not in no_split_children: + no_split_children[class_name] = size + + if set(no_split_children.keys()) == set(no_split_module_classes): + break + buffer = max(no_split_children.values()) if len(no_split_children) > 0 else 0 + else: + buffer = 0 + + # Compute mean of final modules. In the first dict of module sizes, leaves are the parameters + leaves = [n for n in module_sizes if len([p for p in module_sizes if p.startswith(n) and len(p) > len(n)]) == 0] + module_sizes = {n: v for n, v in module_sizes.items() if n not in leaves} + # Once removed, leaves are the final modules. + leaves = [n for n in module_sizes if len([p for p in module_sizes if p.startswith(n) and len(p) > len(n)]) == 0] + mean_leaves = int(sum([module_sizes[n] for n in leaves]) / len(leaves)) + buffer = int(1.25 * max(buffer, mean_leaves)) + per_gpu += buffer + + max_memory = get_max_memory(max_memory) + last_gpu = max(i for i in max_memory if isinstance(i, int) and max_memory[i] > 0) + # The last device is left with max_memory just in case the buffer is not enough. + for i in range(last_gpu): + max_memory[i] = min(0 if low_zero and i == 0 else per_gpu, max_memory[i]) + + if low_zero: + min_zero = max(0, module_sizes[""] - sum([max_memory[i] for i in range(1, num_devices)])) + max_memory[0] = min(min_zero, max_memory[0]) + + return max_memory + + +def infer_auto_device_map( + model: nn.Module, + max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, + no_split_module_classes: Optional[List[str]] = None, + dtype: Optional[Union[str, torch.dtype]] = None, +): + """ + Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk, + such that: + - we don't exceed the memory available of any of the GPU. + - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that + has the largest size. + - if offload to the CPU is needed,we don't exceed the RAM available on the CPU. + - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk + that has the largest size. + + + + All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the + meta device (as it would if initialized within the `init_empty_weights` context manager). + + + + Args: + model (`torch.nn.Module`): The model to analyze. + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. + no_split_module_classes (`List[str]`, *optional*): + A list of layer class names that should never be split across device (for instance any layer that has a + residual connection). + dtype (`str` or `torch.dtype`, *optional*): + If provided, the weights will be converted to that type when loaded. + """ + # Get default / clean up max_memory + max_memory = get_max_memory(max_memory) + if no_split_module_classes is None: + no_split_module_classes = [] + elif not isinstance(no_split_module_classes, (list, tuple)): + no_split_module_classes = [no_split_module_classes] + + devices = list(max_memory.keys()) + gpus = [device for device in devices if device != "cpu"] + if "disk" not in devices: + devices.append("disk") + + # Devices that need to keep space for a potential offloaded layer. + main_devices = [gpus[0], "cpu"] if len(gpus) > 0 else ["cpu"] + + module_sizes = compute_module_sizes(model, dtype=dtype) + tied_parameters = find_tied_parameters(model) + + device_map = {} + current_device = 0 + current_memory_used = 0 + + # Direct submodules and parameters + modules_to_treat = ( + list(model.named_parameters(recurse=False)) + + list(model.named_children()) + + list(model.named_buffers(recurse=False)) + ) + # Initialize maximum largest layer, to know which space to keep in memory + max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes) + + # Ready ? This is going to be a bit messy. + while len(modules_to_treat) > 0: + name, module = modules_to_treat.pop(0) + # Max size in the remaining layers may have changed since we took one, so we maybe update it. + max_layer_names = [n for n in max_layer_names if not n.startswith(name)] + if len(max_layer_names) == 0: + max_layer_size, max_layer_names = get_max_layer_size( + [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], + module_sizes, + no_split_module_classes, + ) + # Assess size needed + module_size = module_sizes[name] + # We keep relevant tied parameters only: once of the tied parameters is inside the current module and the other + # is not. + tied_params = [v for k, v in tied_parameters.items() if name in k and name not in v] + # We ignore parameters that are tied when they're tied to > 1 one + tied_param = tied_params[0] if len(tied_params) == 1 else None + + device = devices[current_device] + current_max_size = max_memory[device] if device != "disk" else None + # Reduce max size available by the largest layer. + if devices[current_device] in main_devices: + current_max_size = current_max_size - max_layer_size + # Case 1 -> We're too big! + if current_max_size is not None and current_memory_used + module_size > current_max_size: + # Split or not split? + modules_children = list(module.named_children()) + if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: + # -> no split, we go to the next device + current_device += 1 + modules_to_treat = [(name, module)] + modules_to_treat + current_memory_used = 0 + else: + # -> split, we replace the module studied by its children + parameters + modules_children = list(module.named_parameters(recurse=False)) + modules_children + modules_to_treat = [(f"{name}.{n}", v) for n, v in modules_children] + modules_to_treat + # Update the max layer size. + max_layer_size, max_layer_names = get_max_layer_size( + [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], + module_sizes, + no_split_module_classes, + ) + + # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters. + elif tied_param is not None: + # Determine the sized occupied by this module + the module containing the tied parameter + tied_module_size = module_size + tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n in tied_param][0] + tied_module_name, tied_module = modules_to_treat[tied_module_index] + tied_module_size += module_sizes[tied_module_name] - module_sizes[tied_param] + if current_max_size is not None and current_memory_used + tied_module_size > current_max_size: + # Split or not split? + tied_module_children = list(tied_module.named_children()) + if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes: + # If the tied module is not split, we go to the next device + current_device += 1 + modules_to_treat = [(name, module)] + modules_to_treat + current_memory_used = 0 + else: + # Otherwise, we replace the tied module by its children. + tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children + tied_module_children = [(f"{tied_module_name}.{n}", v) for n, v in tied_module_children] + modules_to_treat = ( + [(name, module)] + + modules_to_treat[:tied_module_index] + + tied_module_children + + modules_to_treat[tied_module_index + 1 :] + ) + # Update the max layer size. + max_layer_size, max_layer_names = get_max_layer_size( + [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], + module_sizes, + no_split_module_classes, + ) + else: + # We really really fit! + current_memory_used += tied_module_size + device_map[name] = devices[current_device] + modules_to_treat.pop(tied_module_index) + device_map[tied_module_name] = devices[current_device] + else: + current_memory_used += module_size + device_map[name] = devices[current_device] + + return clean_device_map(device_map) + + +def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]): + """ + Checks a device map covers everything in a given model. + + Args: + model (`torch.nn.Module`): The model to check the device map against. + device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check. + """ + all_model_tensors = [name for name, _ in model.state_dict().items()] + for module_name in device_map.keys(): + all_model_tensors = [name for name in all_model_tensors if not name.startswith(module_name)] + if len(all_model_tensors) > 0: + non_covered_params = ", ".join(all_model_tensors) + raise ValueError( + f"The device_map provided does not give any device for the following parameters: {non_covered_params}" + ) + + +def load_checkpoint_in_model( + model: nn.Module, + checkpoint: Union[str, os.PathLike], + device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None, + offload_folder: Optional[Union[str, os.PathLike]] = None, + dtype: Optional[Union[str, torch.dtype]] = None, + offload_state_dict: bool = False, + offload_buffers: bool = False, +): + """ + Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are + loaded. + + + + Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To + group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`]. + + + + Args: + model (`torch.nn.Module`): The model in which we want to load a checkpoint. + checkpoint (`str` or `os.PathLike`): + The folder checkpoint to load. It can be: + - a path to a file containing a whole model state dict + - a path to a `.json` file containing the index to a sharded checkpoint + - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. + device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer + name, once a given module name is inside, every submodule of it will be sent to the same device. + offload_folder (`str` or `os.PathLike`, *optional*): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + dtype (`str` or `torch.dtype`, *optional*): + If provided, the weights will be converted to that type when loaded. + offload_state_dict (`bool`, *optional*, defaults to `False`): + If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if + the weight of the CPU state dict + the biggest shard does not fit. + offload_buffers (`bool`, *optional*, defaults to `False): + Whether or not to include the buffers in the weights offloaded to disk. + """ + if offload_folder is None and device_map is not None and "disk" in device_map.values(): + raise ValueError( + "At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`." + ) + elif offload_folder is not None and device_map is not None and "disk" in device_map.values(): + os.makedirs(offload_folder, exist_ok=True) + + if isinstance(dtype, str): + # We accept "torch.float16" or just "float16" + dtype = dtype.replace("torch.", "") + dtype = getattr(torch, dtype) + + checkpoint_files = None + index_filename = None + if os.path.isfile(checkpoint): + if str(checkpoint).endswith(".json"): + index_filename = checkpoint + else: + checkpoint_files = [checkpoint] + elif os.path.isdir(checkpoint): + potential_index = [f for f in os.listdir(checkpoint) if f.endswith(".index.json")] + if len(potential_index) == 0: + raise ValueError(f"{checkpoint} is not a folder containing a `.index.json` file.") + elif len(potential_index) == 1: + index_filename = os.path.join(checkpoint, potential_index[0]) + else: + raise ValueError(f"{checkpoint} containing more than one `.index.json` file, delete the irrelevant ones.") + else: + raise ValueError( + "`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded " + f"checkpoint, or a folder containing a sharded checkpoint, but got {checkpoint}." + ) + + if index_filename is not None: + checkpoint_folder = os.path.split(index_filename)[0] + with open(index_filename, "r") as f: + index = json.loads(f.read()) + + if "weight_map" in index: + index = index["weight_map"] + checkpoint_files = sorted(list(set(index.values()))) + checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files] + + # Logic for missing/unexepected keys goes here. + + offload_index = {} + if offload_state_dict: + state_dict_folder = tempfile.mkdtemp() + state_dict_index = {} + + buffer_names = [name for name, _ in model.named_buffers()] + + for checkpoint_file in checkpoint_files: + checkpoint = torch.load(checkpoint_file) + if device_map is None: + model.load_state_dict(checkpoint, strict=False) + else: + for param_name, param in checkpoint.items(): + module_name = param_name + + while len(module_name) > 0 and module_name not in device_map: + module_name = ".".join(module_name.split(".")[:-1]) + if module_name == "" and "" not in device_map: + # TODO: group all errors and raise at the end. + raise ValueError(f"{param_name} doesn't have any device set.") + param_device = device_map[module_name] + + if param_device == "disk": + if offload_buffers or param_name not in buffer_names: + set_module_tensor_to_device(model, param_name, "meta") + offload_weight(param, param_name, offload_folder, index=offload_index) + elif param_device == "cpu" and offload_state_dict: + set_module_tensor_to_device(model, param_name, "meta") + offload_weight(param, param_name, state_dict_folder, index=state_dict_index) + else: + set_module_tensor_to_device(model, param_name, param_device, value=param, dtype=dtype) + + # Force Python to clean up. + del checkpoint + gc.collect() + + save_offload_index(offload_index, offload_folder) + + # Load back offloaded state dict on CPU + if offload_state_dict: + load_offloaded_weights(model, state_dict_index, state_dict_folder) + shutil.rmtree(state_dict_folder) diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/offload.py b/testbed/huggingface__accelerate/src/accelerate/utils/offload.py new file mode 100644 index 0000000000000000000000000000000000000000..84c115fd392d119495baa19e87b1b4748a43e08c --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/offload.py @@ -0,0 +1,217 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from collections.abc import Mapping +from typing import Dict, List, Optional, Union + +import numpy as np +import torch + +from ..logging import get_logger +from .imports import is_safetensors_available + + +logger = get_logger(__name__) + + +def offload_weight(weight, weight_name, offload_folder, index=None): + dtype = None + # Check the string instead of the dtype to be compatible with versions of PyTorch that don't have bfloat16. + if str(weight.dtype) == "torch.bfloat16": + # Need to reinterpret the underlined data as int16 since NumPy does not handle bfloat16s. + weight = weight.view(torch.int16) + dtype = "bfloat16" + array = weight.cpu().numpy() + tensor_file = os.path.join(offload_folder, f"{weight_name}.dat") + if index is not None: + if dtype is None: + dtype = str(array.dtype) + index[weight_name] = {"dtype": dtype, "shape": list(array.shape)} + if array.ndim == 0: + array = array[None] + file_array = np.memmap(tensor_file, dtype=array.dtype, mode="w+", shape=array.shape) + file_array[:] = array[:] + file_array.flush() + return index + + +def load_offloaded_weight(weight_file, weight_info): + shape = tuple(weight_info["shape"]) + if shape == (): + # NumPy memory-mapped arrays can't have 0 dims so it was saved as 1d tensor + shape = (1,) + + dtype = weight_info["dtype"] + if dtype == "bfloat16": + # NumPy does not support bfloat16 so this was saved as a int16 + dtype = "int16" + + weight = np.memmap(weight_file, dtype=dtype, shape=shape, mode="r") + + if len(weight_info["shape"]) == 0: + weight = weight[0] + weight = torch.tensor(weight) + if weight_info["dtype"] == "bfloat16": + weight = weight.view(torch.bfloat16) + + return weight + + +def save_offload_index(index, offload_folder): + if index is None or len(index) == 0: + # Nothing to save + return + + offload_index_file = os.path.join(offload_folder, "index.json") + if os.path.isfile(offload_index_file): + with open(offload_index_file, "r", encoding="utf-8") as f: + current_index = json.load(f) + else: + current_index = {} + current_index.update(index) + + with open(offload_index_file, "w", encoding="utf-8") as f: + json.dump(current_index, f, indent=2) + + +def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]): + """ + Offload a state dict in a given folder. + + Args: + save_dir (`str` or `os.PathLike`): The directory in which to offload the state dict. + state_dict (`Dict[str, torch.Tensor]`): The dictionary of tensors to offload. + """ + os.makedirs(save_dir, exist_ok=True) + index = {} + for name, parameter in state_dict.items(): + index = offload_weight(parameter, name, save_dir, index=index) + + # Update index + save_offload_index(index, save_dir) + + +class PrefixedDataset(Mapping): + """ + Will access keys in a given dataset by adding a prefix. + + Args: + dataset (`Mapping`): Any map with string keys. + prefix (`str`): A prefix to add when trying to access any element in the underlying dataset. + """ + + def __init__(self, dataset: Mapping, prefix: str): + self.dataset = dataset + self.prefix = prefix + + def __getitem__(self, key): + return self.dataset[f"{self.prefix}{key}"] + + def __iter__(self): + return iter([key for key in self.dataset if key.startswith(self.prefix)]) + + def __len__(self): + return len(self.dataset) + + +class OffloadedWeightsLoader(Mapping): + """ + A collection that loads weights stored in a given state dict or memory-mapped on disk. + + Args: + state_dict (`Dict[str, torch.Tensor]`, *optional*): + A dictionary parameter name to tensor. + save_folder (`str` or `os.PathLike`, *optional*): + The directory in which the weights are stored (by `offload_state_dict` for instance). + index (`Dict`, *optional*): + A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default + to the index saved in `save_folder`. + """ + + def __init__( + self, + state_dict: Dict[str, torch.Tensor] = None, + save_folder: Optional[Union[str, os.PathLike]] = None, + index: Mapping = None, + device=None, + ): + if state_dict is None and save_folder is None: + raise ValueError("Need either a `state_dict` or a `save_folder` containing offloaded weights.") + + self.state_dict = {} if state_dict is None else state_dict + self.save_folder = save_folder + if index is None and save_folder is not None: + with open(os.path.join(save_folder, "index.json")) as f: + index = json.load(f) + self.index = {} if index is None else index + self.all_keys = list(self.state_dict.keys()) + self.all_keys.extend([key for key in self.index if key not in self.all_keys]) + self.device = device + + def __getitem__(self, key: str): + # State dict gets priority + if key in self.state_dict: + return self.state_dict[key] + weight_info = self.index[key] + if weight_info.get("safetensors_file") is not None: + if not is_safetensors_available(): + raise ImportError("These offloaded weights require the use of safetensors: `pip install safetensors`.") + + if "SAFETENSORS_FAST_GPU" not in os.environ: + logger.info("Enabling fast loading with safetensors by setting `SAFETENSORS_FAST_GPU` to 1.") + os.environ["SAFETENSORS_FAST_GPU"] = "1" + + from safetensors import safe_open + + device = "cpu" if self.device is None else self.device + with safe_open(weight_info["safetensors_file"], framework="pt", device=device) as f: + tensor = f.get_tensor(weight_info.get("weight_name", key)) + + if "dtype" in weight_info: + return tensor.to(getattr(torch, weight_info["dtype"])) + else: + return tensor + + weight_file = os.path.join(self.save_folder, f"{key}.dat") + return load_offloaded_weight(weight_file, weight_info) + + def __iter__(self): + return iter(self.all_keys) + + def __len__(self): + return len(self.all_keys) + + +def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]): + """ + Extract the sub state-dict corresponding to a list of given submodules. + + Args: + state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from. + submodule_names (`List[str]`): The list of submodule names we want to extract. + """ + result = {} + for module_name in submodule_names: + # We want to catch module_name parameter (module_name.xxx) or potentially module_name, but not any of the + # submodules that could being like module_name (transformers.h.1 and transformers.h.10 for instance) + result.update( + { + key: param + for key, param in state_dict.items() + if key == module_name or key.startswith(module_name + ".") + } + ) + return result diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/operations.py b/testbed/huggingface__accelerate/src/accelerate/utils/operations.py new file mode 100644 index 0000000000000000000000000000000000000000..3ebea2eefe2d5fdd677a3cfa919ad8ea2311a7d8 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/operations.py @@ -0,0 +1,519 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A set of basic tensor ops compatible with tpu, gpu, and multigpu +""" + + +import pickle +from functools import update_wrapper +from typing import Any, Mapping + +import torch +from torch.distributed import ReduceOp + +from ..state import AcceleratorState +from .constants import CUDA_DISTRIBUTED_TYPES +from .dataclasses import DistributedType, TensorInformation +from .imports import is_tpu_available +from .versions import is_torch_version + + +if is_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + + +def is_torch_tensor(tensor): + return isinstance(tensor, torch.Tensor) + + +def is_tensor_information(tensor_info): + return isinstance(tensor_info, TensorInformation) + + +def honor_type(obj, generator): + """ + Cast a generator to the same type as obj (list, tuple or namedtuple) + """ + try: + return type(obj)(generator) + except TypeError: + # Some objects may not be able to instantiate from a generator directly + return type(obj)(*list(generator)) + + +def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs): + """ + Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type. + + Args: + func (`callable`): + The function to recursively apply. + data (nested list/tuple/dictionary of `main_type`): + The data on which to apply `func` + *args: + Positional arguments that will be passed to `func` when applied on the unpacked data. + main_type (`type`, *optional*, defaults to `torch.Tensor`): + The base type of the objects to which apply `func`. + error_on_other_type (`bool`, *optional*, defaults to `False`): + Whether to return an error or not if after unpacking `data`, we get on an object that is not of type + `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged. + **kwargs: + Keyword arguments that will be passed to `func` when applied on the unpacked data. + + Returns: + The same data structure as `data` with `func` applied to every object of type `main_type`. + """ + if isinstance(data, (tuple, list)): + return honor_type( + data, + ( + recursively_apply( + func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs + ) + for o in data + ), + ) + elif isinstance(data, Mapping): + return type(data)( + { + k: recursively_apply( + func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs + ) + for k, v in data.items() + } + ) + elif test_type(data): + return func(data, *args, **kwargs) + elif error_on_other_type: + raise TypeError( + f"Can't apply {func.__name__} on object of type {type(data)}, only of nested list/tuple/dicts of objects " + f"that satisfy {test_type.__name__}." + ) + return data + + +def send_to_device(tensor, device, non_blocking=False): + """ + Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to send to a given device. + device (`torch.device`): + The device to send the data to. + + Returns: + The same data structure as `tensor` with all tensors sent to the proper device. + """ + + def _send_to_device(t, device, non_blocking): + try: + return t.to(device, non_blocking=non_blocking) + except TypeError: # .to() doesn't accept non_blocking as kwarg + return t.to(device) + + def _has_to_method(t): + return hasattr(t, "to") + + return recursively_apply(_send_to_device, tensor, device, non_blocking, test_type=_has_to_method) + + +def get_data_structure(data): + """ + Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors. + + Args: + data (nested list/tuple/dictionary of `torch.Tensor`): + The data to send to analyze. + + Returns: + The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors. + """ + + def _get_data_structure(tensor): + return TensorInformation(shape=tensor.shape, dtype=tensor.dtype) + + return recursively_apply(_get_data_structure, data) + + +def initialize_tensors(data_structure): + """ + Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`]. + + Returns: + The same data structure as `data` with tensors instead of [`~utils.TensorInformation`]. + """ + + def _initialize_tensor(tensor_info): + return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype) + + return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information) + + +def find_batch_size(data): + """ + Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors. + + Args: + data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size. + + Returns: + `int`: The batch size. + """ + if isinstance(data, (tuple, list)): + return find_batch_size(data[0]) + elif isinstance(data, Mapping): + for k in data.keys(): + return find_batch_size(data[k]) + elif not isinstance(data, torch.Tensor): + raise TypeError(f"Can only find the batch size of tensors but got {type(data)}.") + return data.shape[0] + + +def _tpu_gather(tensor, name="gather tensor"): + if isinstance(tensor, (list, tuple)): + return honor_type(tensor, (_tpu_gather(t, name=f"{name}_{i}") for i, t in enumerate(tensor))) + elif isinstance(tensor, Mapping): + return type(tensor)({k: _tpu_gather(v, name=f"{name}_{k}") for k, v in tensor.items()}) + elif not isinstance(tensor, torch.Tensor): + raise TypeError(f"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.") + if tensor.ndim == 0: + tensor = tensor.clone()[None] + return xm.mesh_reduce(name, tensor, torch.cat) + + +def _gpu_gather(tensor): + def _gpu_gather_one(tensor): + if tensor.ndim == 0: + tensor = tensor.clone()[None] + output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())] + torch.distributed.all_gather(output_tensors, tensor) + return torch.cat(output_tensors, dim=0) + + return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True) + + +_cpu_gather = _gpu_gather + + +def gather(tensor): + """ + Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to gather. + + Returns: + The same data structure as `tensor` with all tensors sent to the proper device. + """ + if AcceleratorState().distributed_type == DistributedType.TPU: + return _tpu_gather(tensor, name="accelerate.utils.gather") + elif AcceleratorState().distributed_type in CUDA_DISTRIBUTED_TYPES: + return _gpu_gather(tensor) + elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU: + return _cpu_gather(tensor) + else: + return tensor + + +def _gpu_gather_object(object: Any): + def _gpu_gather_object_one(object: Any): + output_objects = [None for _ in range(AcceleratorState().num_processes)] + torch.distributed.all_gather_object(output_objects, object) + return output_objects + + return recursively_apply(_gpu_gather_object_one, object) + + +_cpu_gather_object = _gpu_gather_object + + +def gather_object(object: Any): + """ + Recursively gather object in a nested list/tuple/dictionary of objects from all devices. + + Args: + object (nested list/tuple/dictionary of picklable object): + The data to gather. + + Returns: + The same data structure as `object` with all the objects sent to every device. + """ + if AcceleratorState().distributed_type == DistributedType.TPU: + raise NotImplementedError("gather objects in TPU is not supported") + elif AcceleratorState().distributed_type in CUDA_DISTRIBUTED_TYPES: + return _gpu_gather_object(object) + elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU: + return _cpu_gather_object(object) + else: + return object + + +def _gpu_broadcast(data, src=0): + def _gpu_broadcast_one(tensor, src=0): + torch.distributed.broadcast(tensor, src=src) + return tensor + + return recursively_apply(_gpu_broadcast_one, data, error_on_other_type=True, src=src) + + +def _tpu_broadcast(tensor, src=0, name="broadcast tensor"): + if isinstance(tensor, (list, tuple)): + return honor_type(tensor, (_tpu_broadcast(t, name=f"{name}_{i}") for i, t in enumerate(tensor))) + elif isinstance(tensor, Mapping): + return type(tensor)({k: _tpu_broadcast(v, name=f"{name}_{k}") for k, v in tensor.items()}) + return xm.mesh_reduce(name, tensor, lambda x: x[src]) + + +def broadcast(tensor, from_process: int = 0): + """ + Recursively broadcast tensor in a nested list/tuple/dictionary of tensors to all devices. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to gather. + from_process (`int`, *optional*, defaults to 0): + The process from which to send the data + + Returns: + The same data structure as `tensor` with all tensors broadcasted to the proper device. + """ + if AcceleratorState().distributed_type == DistributedType.TPU: + return _tpu_broadcast(tensor, src=from_process, name="accelerate.utils.broadcast") + elif AcceleratorState().distributed_type in CUDA_DISTRIBUTED_TYPES: + return _gpu_broadcast(tensor, src=from_process) + elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU: + return _gpu_broadcast(tensor, src=from_process) + else: + return tensor + + +def broadcast_object_list(object_list, from_process: int = 0): + """ + Broadcast a list of picklable objects form one process to the others. + + Args: + object_list (list of picklable objects): + The list of objects to broadcast. This list will be modified inplace. + from_process (`int`, *optional*, defaults to 0): + The process from which to send the data. + + Returns: + The same list containing the objects from process 0. + """ + if AcceleratorState().distributed_type == DistributedType.TPU: + for i, obj in enumerate(object_list): + object_list[i] = xm.mesh_reduce("accelerate.utils.broadcast_object_list", obj, lambda x: x[from_process]) + elif AcceleratorState().distributed_type in CUDA_DISTRIBUTED_TYPES: + torch.distributed.broadcast_object_list(object_list, src=from_process) + elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU: + torch.distributed.broadcast_object_list(object_list, src=from_process) + return object_list + + +def slice_tensors(data, tensor_slice): + """ + Recursively takes a slice in a nested list/tuple/dictionary of tensors. + + Args: + data (nested list/tuple/dictionary of `torch.Tensor`): + The data to slice. + tensor_slice (`slice`): + The slice to take. + + Returns: + The same data structure as `data` with all the tensors slices. + """ + + def _slice_tensor(tensor, tensor_slice): + return tensor[tensor_slice] + + return recursively_apply(_slice_tensor, data, tensor_slice) + + +def concatenate(data, dim=0): + """ + Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape. + + Args: + data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`): + The data to concatenate. + dim (`int`, *optional*, defaults to 0): + The dimension on which to concatenate. + + Returns: + The same data structure as `data` with all the tensors concatenated. + """ + if isinstance(data[0], (tuple, list)): + return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0])))) + elif isinstance(data[0], Mapping): + return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()}) + elif not isinstance(data[0], torch.Tensor): + raise TypeError(f"Can only concatenate tensors but got {type(data[0])}") + return torch.cat(data, dim=dim) + + +def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False): + """ + Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they + can safely be gathered. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to gather. + dim (`int`, *optional*, defaults to 0): + The dimension on which to pad. + pad_index (`int`, *optional*, defaults to 0): + The value with which to pad. + pad_first (`bool`, *optional*, defaults to `False`): + Whether to pad at the beginning or the end. + """ + + def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False): + if dim >= len(tensor.shape): + return tensor + + # Gather all sizes + size = torch.tensor(tensor.shape, device=tensor.device)[None] + sizes = gather(size).cpu() + # Then pad to the maximum size + max_size = max(s[dim] for s in sizes) + if max_size == tensor.shape[dim]: + return tensor + + old_size = tensor.shape + new_size = list(old_size) + new_size[dim] = max_size + new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index + if pad_first: + indices = tuple( + slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(new_size)) + ) + else: + indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size))) + new_tensor[indices] = tensor + return new_tensor + + return recursively_apply( + _pad_across_processes, tensor, error_on_other_type=True, dim=dim, pad_index=pad_index, pad_first=pad_first + ) + + +def reduce(tensor, reduction="mean"): + """ + Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the + mean of a given operation. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to reduce. + reduction (`str`, *optional*, defaults to `"mean"`): + A reduction method. Can be of "mean", "sum", or "none" + + Returns: + The same data structure as `data` with all the tensors reduced. + """ + + def _reduce_across_processes(tensor, reduction="mean"): + state = AcceleratorState() + cloned_tensor = tensor.clone() + if state.distributed_type == DistributedType.TPU: + xm.all_reduce("sum", cloned_tensor) + return cloned_tensor + elif state.distributed_type.value in CUDA_DISTRIBUTED_TYPES: + torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM) + return cloned_tensor + else: + if reduction == "sum": + return cloned_tensor.sum() + else: + return cloned_tensor.mean() + + return recursively_apply(_reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction) + + +def convert_to_fp32(tensor): + """ + Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to convert from FP16/BF16 to FP32. + + Returns: + The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32. + """ + + def _convert_to_fp32(tensor): + return tensor.float() + + def _is_fp16_bf16_tensor(tensor): + return hasattr(tensor, "dtype") and ( + tensor.dtype == torch.float16 or (is_torch_version(">=", "1.10") and tensor.dtype == torch.bfloat16) + ) + + return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor) + + +class ConvertOutputsToFp32: + """ + Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16 + precision will be convert back to FP32. + + Args: + model_forward (`Callable`): + The function which outputs we want to treat. + + Returns: + The same function as `model_forward` but with converted outputs. + """ + + def __init__(self, model_forward): + self.model_forward = model_forward + update_wrapper(self, model_forward) + + def __call__(self, *args, **kwargs): + return convert_to_fp32(self.model_forward(*args, **kwargs)) + + def __getstate__(self): + raise pickle.PicklingError( + "Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it." + ) + + +convert_outputs_to_fp32 = ConvertOutputsToFp32 + + +def find_device(data): + """ + Finds the device on which a nested dict/list/tuple of tensors lies (assuming they are all on the same device). + + Args: + (nested list/tuple/dictionary of `torch.Tensor`): The data we want to know the device of. + """ + if isinstance(data, Mapping): + for obj in data.values(): + device = find_device(obj) + if device is not None: + return device + elif isinstance(data, (tuple, list)): + for obj in data: + device = find_device(obj) + if device is not None: + return device + elif isinstance(data, torch.Tensor): + return data.device diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/other.py b/testbed/huggingface__accelerate/src/accelerate/utils/other.py new file mode 100644 index 0000000000000000000000000000000000000000..1eb7cca88f8ea2d83eb8b71031709c3fdf415d9f --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/other.py @@ -0,0 +1,126 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from contextlib import contextmanager + +import torch + +from ..commands.config.default import write_basic_config # noqa: F401 +from ..state import AcceleratorState +from .dataclasses import DistributedType +from .imports import is_deepspeed_available, is_tpu_available + + +if is_deepspeed_available(): + from deepspeed import DeepSpeedEngine + +if is_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + + +def extract_model_from_parallel(model, keep_fp32_wrapper: bool = False): + """ + Extract a model from its distributed containers. + + Args: + model (`torch.nn.Module`): + The model to extract. + keep_fp32_wrapper (`bool`, *optional*): + Whether to remove mixed precision hooks from the model. + + Returns: + `torch.nn.Module`: The extracted model. + """ + options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) + if is_deepspeed_available(): + options += (DeepSpeedEngine,) + + while isinstance(model, options): + model = model.module + + if not keep_fp32_wrapper: + forward = getattr(model, "forward") + original_forward = model.__dict__.pop("_original_forward", None) + if original_forward is not None: + while hasattr(forward, "__wrapped__"): + forward = forward.__wrapped__ + if forward == original_forward: + break + model.forward = forward + return model + + +def wait_for_everyone(): + """ + Introduces a blocking point in the script, making sure all processes have reached this point before continuing. + + + + Make sure all processes will reach this instruction otherwise one of your processes will hang forever. + + + """ + if ( + AcceleratorState().distributed_type == DistributedType.MULTI_GPU + or AcceleratorState().distributed_type == DistributedType.MULTI_CPU + or AcceleratorState().distributed_type == DistributedType.DEEPSPEED + or AcceleratorState().distributed_type == DistributedType.FSDP + ): + torch.distributed.barrier() + elif AcceleratorState().distributed_type == DistributedType.TPU: + xm.rendezvous("accelerate.utils.wait_for_everyone") + + +def save(obj, f): + """ + Save the data to disk. Use in place of `torch.save()`. + + Args: + obj: The data to save + f: The file (or file-like object) to use to save the data + """ + if AcceleratorState().distributed_type == DistributedType.TPU: + xm.save(obj, f) + elif AcceleratorState().local_process_index == 0: + torch.save(obj, f) + + +@contextmanager +def patch_environment(**kwargs): + """ + A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting. + + Will convert the values in `kwargs` to strings and upper-case all the keys. + """ + for key, value in kwargs.items(): + os.environ[key.upper()] = str(value) + + yield + + for key in kwargs: + del os.environ[key.upper()] + + +def get_pretty_name(obj): + """ + Gets a pretty name from `obj`. + """ + if not hasattr(obj, "__qualname__") and not hasattr(obj, "__name__"): + obj = getattr(obj, "__class__", obj) + if hasattr(obj, "__qualname__"): + return obj.__qualname__ + if hasattr(obj, "__name__"): + return obj.__name__ + return str(obj) diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/random.py b/testbed/huggingface__accelerate/src/accelerate/utils/random.py new file mode 100644 index 0000000000000000000000000000000000000000..01c4df2af049dd98a0cc5cfea3649b9f7ee76be1 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/random.py @@ -0,0 +1,88 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +from typing import List, Optional, Union + +import numpy as np +import torch + +from ..state import AcceleratorState +from .constants import CUDA_DISTRIBUTED_TYPES +from .dataclasses import DistributedType, RNGType +from .imports import is_tpu_available + + +if is_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + + +def set_seed(seed: int, device_specific: bool = False): + """ + Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`. + + Args: + seed (`int`): The seed to set. + device_specific (`bool`, *optional*, defaults to `False`): + Whether to differ the seed on each device slightly with `self.process_index`. + """ + if device_specific: + seed += AcceleratorState().process_index + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + # ^^ safe to call this function even if cuda is not available + if is_tpu_available(): + xm.set_rng_state(seed) + + +def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None): + # Get the proper rng state + if rng_type == RNGType.TORCH: + rng_state = torch.get_rng_state() + elif rng_type == RNGType.CUDA: + rng_state = torch.cuda.get_rng_state() + elif rng_type == RNGType.XLA: + assert is_tpu_available(), "Can't synchronize XLA seeds on an environment without TPUs." + rng_state = torch.tensor(xm.get_rng_state()) + elif rng_type == RNGType.GENERATOR: + assert generator is not None, "Need a generator to synchronize its seed." + rng_state = generator.get_state() + + # Broadcast the rng state from device 0 to other devices + state = AcceleratorState() + if state.distributed_type == DistributedType.TPU: + rng_state = xm.mesh_reduce("random_seed", rng_state, lambda x: x[0]) + elif state.distributed_type in CUDA_DISTRIBUTED_TYPES: + rng_state = rng_state.to(state.device) + torch.distributed.broadcast(rng_state, 0) + rng_state = rng_state.cpu() + elif state.distributed_type == DistributedType.MULTI_CPU: + torch.distributed.broadcast(rng_state, 0) + + # Set the broadcast rng state + if rng_type == RNGType.TORCH: + torch.set_rng_state(rng_state) + elif rng_type == RNGType.CUDA: + torch.cuda.set_rng_state(rng_state) + elif rng_type == RNGType.XLA: + xm.set_rng_state(rng_state.item()) + elif rng_type == RNGType.GENERATOR: + generator.set_state(rng_state) + + +def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None): + for rng_type in rng_types: + synchronize_rng_state(RNGType(rng_type), generator=generator) diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/rich.py b/testbed/huggingface__accelerate/src/accelerate/utils/rich.py new file mode 100644 index 0000000000000000000000000000000000000000..2d48661b7fcef92ef1168b74cc275c6d3ccc67a1 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/rich.py @@ -0,0 +1,24 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .imports import is_rich_available + + +if is_rich_available(): + from rich.traceback import install + + install(show_locals=False) + +else: + raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`") diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/torch_xla.py b/testbed/huggingface__accelerate/src/accelerate/utils/torch_xla.py new file mode 100644 index 0000000000000000000000000000000000000000..59fe7970cd318c934376fea3c4df527b40c21597 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/torch_xla.py @@ -0,0 +1,44 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import sys + +import pkg_resources + + +def install_xla(upgrade: bool = False): + """ + Helper function to install appropriate xla wheels based on the `torch` version. + + Args: + upgrade (`bool`, *optional*, defaults to `False`): + Whether to upgrade `torch` and install the latest `torch_xla` wheels. + """ + in_colab = False + if "IPython" in sys.modules: + in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython()) + + if in_colab: + if upgrade: + torch_install_cmd = ["pip", "install", "-U", "torch"] + subprocess.run(torch_install_cmd, check=True) + # get the current version of torch + torch_version = pkg_resources.get_distribution("torch").version + torch_version_trunc = torch_version[: torch_version.rindex(".")] + xla_wheel = f"https://storage.googleapis.com/tpu-pytorch/wheels/colab/torch_xla-{torch_version_trunc}-cp37-cp37m-linux_x86_64.whl" + xla_install_cmd = ["pip", "install", xla_wheel] + subprocess.run(xla_install_cmd, check=True) + else: + raise RuntimeError("`install_xla` utility works only on google colab.") diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/tqdm.py b/testbed/huggingface__accelerate/src/accelerate/utils/tqdm.py new file mode 100644 index 0000000000000000000000000000000000000000..be489bd904597ec128c45b835c9ecfb45aa06fe2 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/tqdm.py @@ -0,0 +1,37 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .imports import is_tqdm_available + + +if is_tqdm_available(): + import tqdm.auto as _tqdm + +from ..state import AcceleratorState + + +def tqdm(main_process_only: bool = True, *args, **kwargs): + """ + Wrapper around `tqdm.tqdm` that optionally displays only on the main process. + + Args: + main_process_only (`bool`, *optional*): + Whether to display the progress bar only on the main process + """ + if not is_tqdm_available(): + raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.") + disable = False + if main_process_only: + disable = AcceleratorState().local_process_index == 0 + return _tqdm(*args, **kwargs, disable=disable) diff --git a/testbed/huggingface__accelerate/src/accelerate/utils/versions.py b/testbed/huggingface__accelerate/src/accelerate/utils/versions.py new file mode 100644 index 0000000000000000000000000000000000000000..38674d4b39c160834cad177b52199a0bd9e27a17 --- /dev/null +++ b/testbed/huggingface__accelerate/src/accelerate/utils/versions.py @@ -0,0 +1,61 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from typing import Union + +from packaging.version import Version, parse + +from .constants import STR_OPERATION_TO_FUNC + + +if sys.version_info < (3, 8): + import importlib_metadata +else: + import importlib.metadata as importlib_metadata + +torch_version = parse(importlib_metadata.version("torch")) + + +def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): + """ + Compares a library version to some requirement using a given operation. + + Args: + library_or_version (`str` or `packaging.version.Version`): + A library name or a version to check. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="`. + requirement_version (`str`): + The version to compare the library version against + """ + if operation not in STR_OPERATION_TO_FUNC.keys(): + raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") + operation = STR_OPERATION_TO_FUNC[operation] + if isinstance(library_or_version, str): + library_or_version = parse(importlib_metadata.version(library_or_version)) + return operation(library_or_version, parse(requirement_version)) + + +def is_torch_version(operation: str, version: str): + """ + Compares the current PyTorch version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A string version of PyTorch + """ + return compare_versions(torch_version, operation, version) diff --git a/testbed/huggingface__accelerate/tests/deepspeed/ds_config_zero2.json b/testbed/huggingface__accelerate/tests/deepspeed/ds_config_zero2.json new file mode 100644 index 0000000000000000000000000000000000000000..f031969eeb8cc01c910493a73ea3bffdff518c8c --- /dev/null +++ b/testbed/huggingface__accelerate/tests/deepspeed/ds_config_zero2.json @@ -0,0 +1,49 @@ +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "bf16": { + "enabled": "auto" + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "weight_decay": "auto", + "torch_adam": true, + "adam_w_mode": true + } + }, + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": "auto", + "contiguous_gradients": true + }, + "gradient_accumulation_steps": 1, + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} \ No newline at end of file diff --git a/testbed/huggingface__accelerate/tests/deepspeed/ds_config_zero3.json b/testbed/huggingface__accelerate/tests/deepspeed/ds_config_zero3.json new file mode 100644 index 0000000000000000000000000000000000000000..846cd73614802f6e6f51bce59bae93f771b3b8cd --- /dev/null +++ b/testbed/huggingface__accelerate/tests/deepspeed/ds_config_zero3.json @@ -0,0 +1,56 @@ +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "bf16": { + "enabled": "auto" + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "weight_decay": "auto", + "torch_adam": true, + "adam_w_mode": true + } + }, + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": "auto" + }, + "gradient_accumulation_steps": 1, + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} \ No newline at end of file diff --git a/testbed/huggingface__accelerate/tests/deepspeed/test_deepspeed.py b/testbed/huggingface__accelerate/tests/deepspeed/test_deepspeed.py new file mode 100644 index 0000000000000000000000000000000000000000..03a4e346cee664707a97ddc8759676eb352c97bc --- /dev/null +++ b/testbed/huggingface__accelerate/tests/deepspeed/test_deepspeed.py @@ -0,0 +1,876 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import io +import itertools +import json +import os +import tempfile +import unittest +from copy import deepcopy +from pathlib import Path + +import torch +from torch.utils.data import DataLoader + +import accelerate +from accelerate.accelerator import Accelerator +from accelerate.scheduler import AcceleratedScheduler +from accelerate.state import AcceleratorState +from accelerate.test_utils.testing import ( + TempDirTestCase, + execute_subprocess_async, + require_cuda, + require_deepspeed, + require_multi_gpu, + slow, +) +from accelerate.test_utils.training import RegressionDataset +from accelerate.utils.dataclasses import DeepSpeedPlugin +from accelerate.utils.deepspeed import ( + DeepSpeedEngineWrapper, + DeepSpeedOptimizerWrapper, + DeepSpeedSchedulerWrapper, + DummyOptim, + DummyScheduler, +) +from accelerate.utils.other import patch_environment +from parameterized import parameterized +from transformers import AutoModel, AutoModelForCausalLM, get_scheduler +from transformers.testing_utils import mockenv_context +from transformers.trainer_utils import set_seed +from transformers.utils import is_torch_bf16_available + + +set_seed(42) + +T5_SMALL = "t5-small" +T5_TINY = "patrickvonplaten/t5-tiny-random" +GPT2_TINY = "sshleifer/tiny-gpt2" + +ZERO2 = "zero2" +ZERO3 = "zero3" + +FP16 = "fp16" +BF16 = "bf16" + +CUSTOM_OPTIMIZER = "custom_optimizer" +CUSTOM_SCHEDULER = "custom_scheduler" +DS_OPTIMIZER = "deepspeed_optimizer" +DS_SCHEDULER = "deepspeed_scheduler" + +stages = [ZERO2, ZERO3] +optims = [CUSTOM_OPTIMIZER, DS_OPTIMIZER] +schedulers = [CUSTOM_SCHEDULER, DS_SCHEDULER] +if is_torch_bf16_available(): + dtypes = [FP16, BF16] +else: + dtypes = [FP16] + + +def parameterized_custom_name_func(func, param_num, param): + # customize the test name generator function as we want both params to appear in the sub-test + # name, as by default it shows only the first param + param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) + return f"{func.__name__}_{param_based_name}" + + +# Cartesian-product of zero stages with models to test +params = list(itertools.product(stages, dtypes)) +optim_scheduler_params = list(itertools.product(optims, schedulers)) + + +@require_deepspeed +@require_cuda +class DeepSpeedConfigIntegration(unittest.TestCase): + def setUp(self): + super().setUp() + + self._test_file_path = inspect.getfile(self.__class__) + path = Path(self._test_file_path).resolve() + self.test_file_dir_str = str(path.parents[0]) + + self.ds_config_file = dict( + zero2=f"{self.test_file_dir_str}/ds_config_zero2.json", + zero3=f"{self.test_file_dir_str}/ds_config_zero3.json", + ) + + # use self.get_config_dict(stage) to use these to ensure the original is not modified + with io.open(self.ds_config_file[ZERO2], "r", encoding="utf-8") as f: + config_zero2 = json.load(f) + with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f: + config_zero3 = json.load(f) + # The following setting slows things down, so don't enable it by default unless needed by a test. + # It's in the file as a demo for users since we want everything to work out of the box even if slower. + config_zero3["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = False + + self.ds_config_dict = dict(zero2=config_zero2, zero3=config_zero3) + + self.dist_env = dict( + ACCELERATE_USE_DEEPSPEED="true", + MASTER_ADDR="localhost", + MASTER_PORT="10999", + RANK="0", + LOCAL_RANK="0", + WORLD_SIZE="1", + ) + + def tearDown(self): + super().tearDown() + AcceleratorState._reset_state() + + def get_config_dict(self, stage): + # As some tests modify the dict, always make a copy + return deepcopy(self.ds_config_dict[stage]) + + @parameterized.expand(stages, name_func=parameterized_custom_name_func) + def test_deepspeed_plugin(self, stage): + + # Test zero3_init_flag will be set to False when ZeRO stage != 3 + deepspeed_plugin = DeepSpeedPlugin( + gradient_accumulation_steps=1, + gradient_clipping=1.0, + zero_stage=2, + offload_optimizer_device="cpu", + offload_param_device="cpu", + zero3_save_16bit_model=True, + zero3_init_flag=True, + ) + self.assertFalse(deepspeed_plugin.zero3_init_flag) + deepspeed_plugin.deepspeed_config = None + + # Test zero3_init_flag will be set to True only when ZeRO stage == 3 + deepspeed_plugin = DeepSpeedPlugin( + gradient_accumulation_steps=1, + gradient_clipping=1.0, + zero_stage=3, + offload_optimizer_device="cpu", + offload_param_device="cpu", + zero3_save_16bit_model=True, + zero3_init_flag=True, + ) + self.assertTrue(deepspeed_plugin.zero3_init_flag) + deepspeed_plugin.deepspeed_config = None + + # Test config files are loaded correctly + deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[stage], zero3_init_flag=True) + if stage == ZERO2: + self.assertFalse(deepspeed_plugin.zero3_init_flag) + elif stage == ZERO3: + self.assertTrue(deepspeed_plugin.zero3_init_flag) + + # Test `gradient_accumulation_steps` is set to 1 if unavailable in config file + with tempfile.TemporaryDirectory() as dirpath: + ds_config = self.get_config_dict(stage) + del ds_config["gradient_accumulation_steps"] + with open(os.path.join(dirpath, "ds_config.json"), "w") as out_file: + json.dump(ds_config, out_file) + deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=os.path.join(dirpath, "ds_config.json")) + self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 1) + deepspeed_plugin.deepspeed_config = None + + # Test `ValueError` is raised if `zero_optimization` is unavailable in config file + with tempfile.TemporaryDirectory() as dirpath: + ds_config = self.get_config_dict(stage) + del ds_config["zero_optimization"] + with open(os.path.join(dirpath, "ds_config.json"), "w") as out_file: + json.dump(ds_config, out_file) + with self.assertRaises(ValueError) as cm: + deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=os.path.join(dirpath, "ds_config.json")) + self.assertTrue( + "Please specify the ZeRO optimization config in the DeepSpeed config." in str(cm.exception) + ) + deepspeed_plugin.deepspeed_config = None + + # Test `deepspeed_config_process` + deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[stage]) + kwargs = { + "fp16.enabled": True, + "bf16.enabled": False, + "optimizer.params.lr": 5e-5, + "optimizer.params.weight_decay": 0.0, + "scheduler.params.warmup_min_lr": 0.0, + "scheduler.params.warmup_max_lr": 5e-5, + "scheduler.params.warmup_num_steps": 0, + "train_micro_batch_size_per_gpu": 16, + "gradient_clipping": 1.0, + "train_batch_size": 16, + "zero_optimization.reduce_bucket_size": 5e5, + "zero_optimization.stage3_prefetch_bucket_size": 5e5, + "zero_optimization.stage3_param_persistence_threshold": 5e5, + "zero_optimization.stage3_gather_16bit_weights_on_model_save": False, + } + deepspeed_plugin.deepspeed_config_process(**kwargs) + for ds_key_long, value in kwargs.items(): + config, ds_key = deepspeed_plugin.hf_ds_config.find_config_node(ds_key_long) + if config.get(ds_key) is not None: + self.assertEqual(config.get(ds_key), value) + + # Test mismatches + mismatches = { + "optimizer.params.lr": 1e-5, + "optimizer.params.weight_decay": 1e-5, + "gradient_accumulation_steps": 2, + } + with self.assertRaises(ValueError) as cm: + new_kwargs = deepcopy(kwargs) + new_kwargs.update(mismatches) + deepspeed_plugin.deepspeed_config_process(**new_kwargs) + for key in mismatches.keys(): + self.assertTrue( + key in str(cm.exception), + f"{key} is not in the exception message:\n{cm.exception}", + ) + + # Test `ValueError` is raised if some config file fields with `auto` value is missing in `kwargs` + deepspeed_plugin.deepspeed_config["optimizer"]["params"]["lr"] = "auto" + with self.assertRaises(ValueError) as cm: + del kwargs["optimizer.params.lr"] + deepspeed_plugin.deepspeed_config_process(**kwargs) + self.assertTrue("`optimizer.params.lr` not found in kwargs." in str(cm.exception)) + + @parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func) + def test_accelerate_state_deepspeed(self, dtype): + AcceleratorState._reset_state() + deepspeed_plugin = DeepSpeedPlugin( + gradient_accumulation_steps=1, + gradient_clipping=1.0, + zero_stage=ZERO2, + offload_optimizer_device="cpu", + offload_param_device="cpu", + zero3_save_16bit_model=True, + zero3_init_flag=True, + ) + with mockenv_context(**self.dist_env): + state = Accelerator(mixed_precision=dtype, deepspeed_plugin=deepspeed_plugin).state + self.assertTrue(state.deepspeed_plugin.deepspeed_config[dtype]["enabled"]) + + def test_init_zero3(self): + deepspeed_plugin = DeepSpeedPlugin( + gradient_accumulation_steps=1, + gradient_clipping=1.0, + zero_stage=3, + offload_optimizer_device="cpu", + offload_param_device="cpu", + zero3_save_16bit_model=True, + zero3_init_flag=True, + ) + + with mockenv_context(**self.dist_env): + accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin) # noqa: F841 + from transformers.deepspeed import is_deepspeed_zero3_enabled + + self.assertTrue(is_deepspeed_zero3_enabled()) + + @parameterized.expand(optim_scheduler_params, name_func=parameterized_custom_name_func) + def test_prepare_deepspeed(self, optim_type, scheduler_type): + # 1. Testing with one of the ZeRO Stages is enough to test the `_prepare_deepspeed` function. + # Here we test using ZeRO Stage 2 with FP16 enabled. + from deepspeed.runtime.engine import DeepSpeedEngine + + kwargs = { + "optimizer.params.lr": 5e-5, + "optimizer.params.weight_decay": 0.0, + "scheduler.params.warmup_min_lr": 0.0, + "scheduler.params.warmup_max_lr": 5e-5, + "scheduler.params.warmup_num_steps": 0, + "train_micro_batch_size_per_gpu": 16, + "gradient_clipping": 1.0, + "train_batch_size": 16, + "zero_optimization.reduce_bucket_size": 5e5, + "zero_optimization.stage3_prefetch_bucket_size": 5e5, + "zero_optimization.stage3_param_persistence_threshold": 5e5, + "zero_optimization.stage3_gather_16bit_weights_on_model_save": False, + } + + if optim_type == CUSTOM_OPTIMIZER and scheduler_type == CUSTOM_SCHEDULER: + # Test custom optimizer + custom scheduler + deepspeed_plugin = DeepSpeedPlugin( + gradient_accumulation_steps=1, + gradient_clipping=1.0, + zero_stage=2, + offload_optimizer_device="cpu", + offload_param_device="cpu", + zero3_save_16bit_model=False, + zero3_init_flag=False, + ) + with mockenv_context(**self.dist_env): + accelerator = Accelerator(mixed_precision="fp16", deepspeed_plugin=deepspeed_plugin) + + train_set = RegressionDataset(length=80) + eval_set = RegressionDataset(length=20) + train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True) + eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False) + model = AutoModel.from_pretrained(GPT2_TINY) + optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5) + lr_scheduler = get_scheduler( + name="linear", + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=1000, + ) + dummy_optimizer = DummyOptim(params=model.parameters()) + dummy_lr_scheduler = DummyScheduler(dummy_optimizer) + + with self.assertRaises(ValueError) as cm: + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + self.assertTrue( + "You cannot create a `DummyOptim` without specifying an optimizer in the config file." + in str(cm.exception) + ) + with self.assertRaises(ValueError) as cm: + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler + ) + self.assertTrue( + "You cannot create a `DummyScheduler` without specifying a scheduler in the config file." + in str(cm.exception) + ) + + with self.assertRaises(ValueError) as cm: + model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) + self.assertTrue( + "When using DeepSpeed `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders " + "or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file" + "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`." + in str(cm.exception) + ) + + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + self.assertTrue(accelerator.deepspeed_config["zero_allow_untested_optimizer"]) + self.assertTrue(accelerator.deepspeed_config["train_batch_size"], 16) + self.assertEqual(type(model), DeepSpeedEngine) + self.assertEqual(type(optimizer), DeepSpeedOptimizerWrapper) + self.assertEqual(type(lr_scheduler), AcceleratedScheduler) + self.assertEqual(type(accelerator.deepspeed_engine_wrapped), DeepSpeedEngineWrapper) + + elif optim_type == DS_OPTIMIZER and scheduler_type == DS_SCHEDULER: + # Test DeepSpeed optimizer + DeepSpeed scheduler + deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2]) + with mockenv_context(**self.dist_env): + accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision="fp16") + train_set = RegressionDataset(length=80) + eval_set = RegressionDataset(length=20) + train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True) + eval_dataloader = DataLoader(eval_set, batch_size=5, shuffle=False) + model = AutoModel.from_pretrained(GPT2_TINY) + optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5) + lr_scheduler = get_scheduler( + name="linear", + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=1000, + ) + dummy_optimizer = DummyOptim(params=model.parameters()) + dummy_lr_scheduler = DummyScheduler(dummy_optimizer) + kwargs["train_batch_size"] = ( + kwargs["train_micro_batch_size_per_gpu"] + * deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"] + * accelerator.num_processes + ) + accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs) + with self.assertRaises(ValueError) as cm: + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler + ) + self.assertTrue( + "You cannot specify an optimizer in the config file and in the code at the same time" + in str(cm.exception) + ) + + with self.assertRaises(ValueError) as cm: + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + self.assertTrue( + "You cannot specify a scheduler in the config file and in the code at the same time" + in str(cm.exception) + ) + + with self.assertRaises(ValueError) as cm: + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + self.assertTrue( + "You cannot specify a scheduler in the config file and in the code at the same time" + in str(cm.exception) + ) + + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler + ) + self.assertTrue(type(model) == DeepSpeedEngine) + self.assertTrue(type(optimizer) == DeepSpeedOptimizerWrapper) + self.assertTrue(type(lr_scheduler) == DeepSpeedSchedulerWrapper) + self.assertTrue(type(accelerator.deepspeed_engine_wrapped) == DeepSpeedEngineWrapper) + + elif optim_type == CUSTOM_OPTIMIZER and scheduler_type == DS_SCHEDULER: + # Test custom optimizer + DeepSpeed scheduler + deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2]) + with mockenv_context(**self.dist_env): + accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision="fp16") + train_set = RegressionDataset(length=80) + eval_set = RegressionDataset(length=20) + train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True) + eval_dataloader = DataLoader(eval_set, batch_size=5, shuffle=False) + model = AutoModel.from_pretrained(GPT2_TINY) + optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5) + lr_scheduler = get_scheduler( + name="linear", + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=1000, + ) + dummy_optimizer = DummyOptim(params=model.parameters()) + dummy_lr_scheduler = DummyScheduler(dummy_optimizer) + kwargs["train_batch_size"] = ( + kwargs["train_micro_batch_size_per_gpu"] + * deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"] + * accelerator.num_processes + ) + accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs) + del accelerator.state.deepspeed_plugin.deepspeed_config["optimizer"] + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler + ) + self.assertTrue(type(model) == DeepSpeedEngine) + self.assertTrue(type(optimizer) == DeepSpeedOptimizerWrapper) + self.assertTrue(type(lr_scheduler) == DeepSpeedSchedulerWrapper) + self.assertTrue(type(accelerator.deepspeed_engine_wrapped) == DeepSpeedEngineWrapper) + elif optim_type == DS_OPTIMIZER and scheduler_type == CUSTOM_SCHEDULER: + # Test deepspeed optimizer + custom scheduler + deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.ds_config_file[ZERO2]) + with mockenv_context(**self.dist_env): + accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision="fp16") + train_set = RegressionDataset(length=80) + eval_set = RegressionDataset(length=20) + train_dataloader = DataLoader(train_set, batch_size=10, shuffle=True) + eval_dataloader = DataLoader(eval_set, batch_size=5, shuffle=False) + model = AutoModel.from_pretrained(GPT2_TINY) + optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5) + lr_scheduler = get_scheduler( + name="linear", + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=1000, + ) + dummy_optimizer = DummyOptim(params=model.parameters()) + dummy_lr_scheduler = DummyScheduler(dummy_optimizer) + kwargs["train_batch_size"] = ( + kwargs["train_micro_batch_size_per_gpu"] + * deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"] + * accelerator.num_processes + ) + accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs) + del accelerator.state.deepspeed_plugin.deepspeed_config["scheduler"] + with self.assertRaises(ValueError) as cm: + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, dummy_optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + self.assertTrue( + "You can only specify `accelerate.utils.DummyScheduler` in the code when using `accelerate.utils.DummyOptim`." + in str(cm.exception) + ) + + def test_save_checkpoints(self): + deepspeed_plugin = DeepSpeedPlugin( + hf_ds_config=self.ds_config_file[ZERO3], + zero3_init_flag=True, + ) + del deepspeed_plugin.deepspeed_config["bf16"] + kwargs = { + "optimizer.params.lr": 5e-5, + "optimizer.params.weight_decay": 0.0, + "scheduler.params.warmup_min_lr": 0.0, + "scheduler.params.warmup_max_lr": 5e-5, + "scheduler.params.warmup_num_steps": 0, + "train_micro_batch_size_per_gpu": 16, + "gradient_clipping": 1.0, + "train_batch_size": 16, + "zero_optimization.reduce_bucket_size": 5e5, + "zero_optimization.stage3_prefetch_bucket_size": 5e5, + "zero_optimization.stage3_param_persistence_threshold": 5e5, + "zero_optimization.stage3_gather_16bit_weights_on_model_save": False, + } + + with mockenv_context(**self.dist_env): + accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision="fp16") + kwargs["train_batch_size"] = ( + kwargs["train_micro_batch_size_per_gpu"] + * deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"] + * accelerator.num_processes + ) + accelerator.state.deepspeed_plugin.deepspeed_config_process(**kwargs) + + train_set = RegressionDataset(length=80) + eval_set = RegressionDataset(length=20) + train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True) + eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False) + model = AutoModelForCausalLM.from_pretrained("gpt2") + dummy_optimizer = DummyOptim(params=model.parameters()) + dummy_lr_scheduler = DummyScheduler(dummy_optimizer) + + model, _, train_dataloader, eval_dataloader, _ = accelerator.prepare( + model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler + ) + with self.assertRaises(ValueError) as cm: + accelerator.get_state_dict(model) + msg = ( + "Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. " + "To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or " + "set `zero3_save_16bit_model` to True when using `accelerate config`. " + "To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights." + ) + self.assertTrue(msg in str(cm.exception)) + + def test_autofill_dsconfig(self): + deepspeed_plugin = DeepSpeedPlugin( + hf_ds_config=self.ds_config_file[ZERO3], + zero3_init_flag=True, + ) + del deepspeed_plugin.deepspeed_config["bf16"] + del deepspeed_plugin.deepspeed_config["fp16"] + + with mockenv_context(**self.dist_env): + accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin) + train_set = RegressionDataset(length=80) + eval_set = RegressionDataset(length=20) + train_dataloader = DataLoader(train_set, batch_size=16, shuffle=True) + eval_dataloader = DataLoader(eval_set, batch_size=32, shuffle=False) + model = AutoModelForCausalLM.from_pretrained("gpt2") + dummy_optimizer = DummyOptim(params=model.parameters(), lr=5e-5, weight_decay=1e-4) + dummy_lr_scheduler = DummyScheduler(dummy_optimizer, warmup_num_steps=10, total_num_steps=1000) + hidden_size = model.config.hidden_size + model, _, train_dataloader, eval_dataloader, _ = accelerator.prepare( + model, dummy_optimizer, train_dataloader, eval_dataloader, dummy_lr_scheduler + ) + self.assertEqual(accelerator.deepspeed_config["train_micro_batch_size_per_gpu"], 16) + self.assertEqual(accelerator.deepspeed_config["train_batch_size"], 16) + + self.assertEqual(accelerator.deepspeed_config["optimizer"]["params"]["lr"], 5e-5) + self.assertEqual(accelerator.deepspeed_config["optimizer"]["params"]["weight_decay"], 1e-4) + + self.assertEqual(accelerator.deepspeed_config["scheduler"]["params"]["warmup_min_lr"], 0.0) + self.assertEqual(accelerator.deepspeed_config["scheduler"]["params"]["warmup_max_lr"], 5e-5) + self.assertEqual(accelerator.deepspeed_config["scheduler"]["params"]["warmup_num_steps"], 10) + + self.assertEqual(accelerator.deepspeed_config["gradient_clipping"], 1.0) + self.assertEqual( + accelerator.deepspeed_config["zero_optimization"]["reduce_bucket_size"], hidden_size * hidden_size + ) + self.assertEqual( + accelerator.deepspeed_config["zero_optimization"]["stage3_prefetch_bucket_size"], + 0.9 * hidden_size * hidden_size, + ) + self.assertEqual( + accelerator.deepspeed_config["zero_optimization"]["stage3_param_persistence_threshold"], + 10 * hidden_size, + ) + self.assertFalse( + accelerator.deepspeed_config["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] + ) + + @parameterized.expand([FP16, BF16], name_func=parameterized_custom_name_func) + def test_autofill_dsconfig_from_ds_plugin(self, dtype): + ds_config = self.ds_config_dict["zero3"] + if dtype == BF16: + del ds_config["fp16"] + else: + del ds_config["bf16"] + ds_config[dtype]["enabled"] = "auto" + ds_config["zero_optimization"]["stage"] = "auto" + ds_config["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = "auto" + ds_config["zero_optimization"]["offload_optimizer"]["device"] = "auto" + ds_config["zero_optimization"]["offload_param"]["device"] = "auto" + ds_config["gradient_accumulation_steps"] = "auto" + ds_config["gradient_clipping"] = "auto" + + deepspeed_plugin = DeepSpeedPlugin( + hf_ds_config=ds_config, + zero3_init_flag=True, + gradient_accumulation_steps=1, + gradient_clipping=1.0, + zero_stage=2, + offload_optimizer_device="cpu", + offload_param_device="cpu", + zero3_save_16bit_model=True, + ) + + with mockenv_context(**self.dist_env): + accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=dtype) + deepspeed_plugin = accelerator.state.deepspeed_plugin + self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_clipping"], 1.0) + self.assertEqual(deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"], 1) + self.assertEqual(deepspeed_plugin.deepspeed_config["zero_optimization"]["stage"], 2) + self.assertEqual( + deepspeed_plugin.deepspeed_config["zero_optimization"]["offload_optimizer"]["device"], "cpu" + ) + self.assertEqual(deepspeed_plugin.deepspeed_config["zero_optimization"]["offload_param"]["device"], "cpu") + self.assertTrue( + deepspeed_plugin.deepspeed_config["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] + ) + self.assertTrue(deepspeed_plugin.deepspeed_config[dtype]["enabled"]) + + AcceleratorState._reset_state() + diff_dtype = "bf16" if dtype == "fp16" else "fp16" + with mockenv_context(**self.dist_env): + with self.assertRaises(ValueError) as cm: + accelerator = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=diff_dtype) + self.assertTrue( + f"`--mixed_precision` arg cannot be set to `{diff_dtype}` when `{dtype}` is set in the DeepSpeed config file." + in str(cm.exception) + ) + + def test_ds_config_assertions(self): + ambiguous_env = self.dist_env.copy() + ambiguous_env[ + "ACCELERATE_CONFIG_DS_FIELDS" + ] = "gradient_accumulation_steps,gradient_clipping,zero_stage,offload_optimizer_device,offload_param_device,zero3_save_16bit_model,mixed_precision" + + with mockenv_context(**ambiguous_env): + with self.assertRaises(ValueError) as cm: + deepspeed_plugin = DeepSpeedPlugin( + hf_ds_config=self.ds_config_file[ZERO3], + zero3_init_flag=True, + gradient_accumulation_steps=1, + gradient_clipping=1.0, + zero_stage=ZERO2, + offload_optimizer_device="cpu", + offload_param_device="cpu", + zero3_save_16bit_model=True, + ) + _ = Accelerator(deepspeed_plugin=deepspeed_plugin, mixed_precision=FP16) + self.assertTrue( + "If you are using an accelerate config file, remove others config variables mentioned in the above specified list." + in str(cm.exception) + ) + + def test_basic_run(self): + mod_file = inspect.getfile(accelerate.test_utils) + test_file_path = os.path.sep.join( + mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_performance.py"] + ) + with tempfile.TemporaryDirectory() as dirpath: + cmd = [ + "accelerate", + "launch", + "--num_processes=1", + "--num_machines=1", + "--machine_rank=0", + "--mixed_precision=fp16", + "--use_deepspeed", + "--gradient_accumulation_steps=1", + "--zero_stage=2", + "--offload_optimizer_device=none", + "--offload_param_device=none", + test_file_path, + "--model_name_or_path=distilbert-base-uncased", + "--num_epochs=1", + f"--output_dir={dirpath}", + ] + with patch_environment(omp_num_threads=1): + execute_subprocess_async(cmd, env=os.environ.copy()) + + +@require_deepspeed +@require_multi_gpu +@slow +class DeepSpeedIntegrationTest(TempDirTestCase): + def setUp(self): + super().setUp() + self._test_file_path = inspect.getfile(self.__class__) + path = Path(self._test_file_path).resolve() + self.test_file_dir_str = str(path.parents[0]) + + self.ds_config_file = dict( + zero2=f"{self.test_file_dir_str}/ds_config_zero2.json", + zero3=f"{self.test_file_dir_str}/ds_config_zero3.json", + ) + + self.stages = [1, 2, 3] + self.zero3_offload_config = False + self.performance_lower_bound = 0.82 + self.peak_memory_usage_upper_bound = { + "multi_gpu_fp16": 3200, + "deepspeed_stage_1_fp16": 1600, + "deepspeed_stage_2_fp16": 2500, + "deepspeed_stage_3_zero_init_fp16": 2800, + # Disabling below test as it overwhelms the RAM memory usage + # on CI self-hosted runner leading to tests getting killed. + # "deepspeed_stage_3_cpu_offload_fp16": 1900, + } + self.n_train = 160 + self.n_val = 160 + + mod_file = inspect.getfile(accelerate.test_utils) + self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps"]) + + def test_performance(self): + self.test_file_path = os.path.join(self.test_scripts_folder, "test_performance.py") + cmd = [ + "accelerate", + "launch", + "--num_processes=2", + "--num_machines=1", + "--machine_rank=0", + "--mixed_precision=fp16", + "--use_deepspeed", + "--gradient_accumulation_steps=1", + "--gradient_clipping=1", + "--zero3_init_flag=True", + "--zero3_save_16bit_model=True", + ] + for stage in self.stages: + if stage == 1: + continue + cmd_stage = cmd.copy() + cmd_stage.extend([f"--zero_stage={stage}"]) + cmd_stage.extend(["--offload_optimizer_device=none", "--offload_param_device=none"]) + if self.zero3_offload_config: + with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f: + ds_config = json.load(f) + del ds_config["bf16"] + del ds_config["optimizer"]["params"]["torch_adam"] + del ds_config["optimizer"]["params"]["adam_w_mode"] + ds_config["fp16"]["enabled"] = True + ds_config_path = os.path.join(self.tmpdir, "ds_config.json") + with open(ds_config_path, "w") as out_file: + json.dump(ds_config, out_file) + + cmd_stage.extend([f"--deepspeed_config_file={ds_config_path}"]) + + cmd_stage.extend( + [ + self.test_file_path, + f"--output_dir={self.tmpdir}", + f"--performance_lower_bound={self.performance_lower_bound}", + ] + ) + with patch_environment(omp_num_threads=1): + execute_subprocess_async(cmd_stage, env=os.environ.copy()) + + def test_checkpointing(self): + self.test_file_path = os.path.join(self.test_scripts_folder, "test_checkpointing.py") + cmd = [ + "accelerate", + "launch", + "--num_processes=2", + "--num_machines=1", + "--machine_rank=0", + "--mixed_precision=fp16", + "--use_deepspeed", + "--gradient_accumulation_steps=1", + "--gradient_clipping=1", + "--zero3_init_flag=True", + "--zero3_save_16bit_model=True", + ] + for stage in self.stages: + if stage == 1: + continue + cmd_stage = cmd.copy() + cmd_stage.extend([f"--zero_stage={stage}"]) + cmd_stage.extend(["--offload_optimizer_device=none", "--offload_param_device=none"]) + if self.zero3_offload_config: + with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f: + ds_config = json.load(f) + del ds_config["bf16"] + del ds_config["optimizer"]["params"]["torch_adam"] + del ds_config["optimizer"]["params"]["adam_w_mode"] + ds_config["fp16"]["enabled"] = True + ds_config_path = os.path.join(self.tmpdir, "ds_config.json") + with open(ds_config_path, "w") as out_file: + json.dump(ds_config, out_file) + + cmd_stage.extend([f"--deepspeed_config_file={ds_config_path}"]) + + cmd_stage.extend( + [ + self.test_file_path, + f"--output_dir={self.tmpdir}", + "--partial_train_epoch=1", + ] + ) + with patch_environment(omp_num_threads=1): + execute_subprocess_async(cmd_stage, env=os.environ.copy()) + + cmd_stage = cmd_stage[:-1] + resume_from_checkpoint = os.path.join(self.tmpdir, "epoch_0") + cmd_stage.extend( + [ + f"--resume_from_checkpoint={resume_from_checkpoint}", + ] + ) + with patch_environment(omp_num_threads=1): + execute_subprocess_async(cmd_stage, env=os.environ.copy()) + + def test_peak_memory_usage(self): + self.test_file_path = os.path.join(self.test_scripts_folder, "test_peak_memory_usage.py") + cmd = [ + "accelerate", + "launch", + "--num_processes=2", + "--num_machines=1", + "--machine_rank=0", + ] + for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): + cmd_stage = cmd.copy() + if "fp16" in spec: + cmd_stage.extend(["--mixed_precision=fp16"]) + + if "multi_gpu" in spec: + continue + else: + cmd_stage.extend( + [ + "--use_deepspeed", + "--gradient_accumulation_steps=1", + "--gradient_clipping=1", + "--zero3_init_flag=True", + "--zero3_save_16bit_model=True", + ] + ) + for i in range(3): + if f"stage_{i+1}" in spec: + cmd_stage.extend([f"--zero_stage={i+1}"]) + break + cmd_stage.extend(["--offload_optimizer_device=none", "--offload_param_device=none"]) + if "cpu_offload" in spec: + with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f: + ds_config = json.load(f) + del ds_config["bf16"] + del ds_config["fp16"] + del ds_config["optimizer"]["params"]["torch_adam"] + del ds_config["optimizer"]["params"]["adam_w_mode"] + ds_config_path = os.path.join(self.tmpdir, "ds_config.json") + with open(ds_config_path, "w") as out_file: + json.dump(ds_config, out_file) + + cmd_stage.extend([f"--deepspeed_config_file={ds_config_path}"]) + + cmd_stage.extend( + [ + self.test_file_path, + f"--output_dir={self.tmpdir}", + f"--peak_memory_upper_bound={peak_mem_upper_bound}", + f"--n_train={self.n_train}", + f"--n_val={self.n_val}", + ] + ) + with patch_environment(omp_num_threads=1): + execute_subprocess_async(cmd_stage, env=os.environ.copy()) diff --git a/testbed/huggingface__accelerate/tests/fsdp/test_fsdp.py b/testbed/huggingface__accelerate/tests/fsdp/test_fsdp.py new file mode 100644 index 0000000000000000000000000000000000000000..19917c671a0ffab4b734083335bfa00d424246ad --- /dev/null +++ b/testbed/huggingface__accelerate/tests/fsdp/test_fsdp.py @@ -0,0 +1,331 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +import os +import unittest + +import torch + +import accelerate +from accelerate.accelerator import Accelerator +from accelerate.state import AcceleratorState +from accelerate.test_utils.testing import ( + TempDirTestCase, + execute_subprocess_async, + require_cuda, + require_fsdp, + require_multi_gpu, + slow, +) +from accelerate.utils.constants import ( + FSDP_AUTO_WRAP_POLICY, + FSDP_BACKWARD_PREFETCH, + FSDP_SHARDING_STRATEGY, + FSDP_STATE_DICT_TYPE, +) +from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin +from accelerate.utils.other import patch_environment +from transformers import AutoModel +from transformers.testing_utils import mockenv_context +from transformers.trainer_utils import set_seed + + +set_seed(42) + +BERT_BASE_CASED = "bert-base-cased" +FP16 = "fp16" +BF16 = "bf16" +dtypes = [FP16, BF16] + + +@require_fsdp +@require_cuda +class FSDPPluginIntegration(unittest.TestCase): + def setUp(self): + super().setUp() + + self.dist_env = dict( + ACCELERATE_USE_FSDP="true", + MASTER_ADDR="localhost", + MASTER_PORT="10999", + RANK="0", + LOCAL_RANK="0", + WORLD_SIZE="1", + ) + + def tearDown(self): + super().tearDown() + AcceleratorState._reset_state() + + def test_sharding_strategy(self): + from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy + + for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): + env = self.dist_env.copy() + env["FSDP_SHARDING_STRATEGY"] = f"{i + 1}" + env["FSDP_SHARDING_STRATEGY_NAME"] = strategy + with mockenv_context(**env): + fsdp_plugin = FullyShardedDataParallelPlugin() + self.assertEqual(fsdp_plugin.sharding_strategy, ShardingStrategy(i + 1)) + + def test_backward_prefetch(self): + from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch + + for i, prefetch_policy in enumerate(FSDP_BACKWARD_PREFETCH): + env = self.dist_env.copy() + env["FSDP_BACKWARD_PREFETCH"] = prefetch_policy + with mockenv_context(**env): + fsdp_plugin = FullyShardedDataParallelPlugin() + if prefetch_policy == "NO_PREFETCH": + self.assertIsNone(fsdp_plugin.backward_prefetch) + else: + self.assertEqual(fsdp_plugin.backward_prefetch, BackwardPrefetch(i + 1)) + + def test_state_dict_type(self): + from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType + + for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE): + env = self.dist_env.copy() + env["FSDP_STATE_DICT_TYPE"] = state_dict_type + with mockenv_context(**env): + fsdp_plugin = FullyShardedDataParallelPlugin() + self.assertEqual(fsdp_plugin.state_dict_type, StateDictType(i + 1)) + if state_dict_type == "FULL_STATE_DICT": + self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu) + self.assertTrue(fsdp_plugin.state_dict_config.rank0_only) + + def test_auto_wrap_policy(self): + model = AutoModel.from_pretrained(BERT_BASE_CASED) + for policy in FSDP_AUTO_WRAP_POLICY: + env = self.dist_env.copy() + env["FSDP_AUTO_WRAP_POLICY"] = policy + if policy == "TRANSFORMER_BASED_WRAP": + env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = "BertLayer" + elif policy == "SIZE_BASED_WRAP": + env["FSDP_MIN_NUM_PARAMS"] = "2000" + with mockenv_context(**env): + fsdp_plugin = FullyShardedDataParallelPlugin() + fsdp_plugin.set_auto_wrap_policy(model) + if policy == "NO_WRAP": + self.assertIsNone(fsdp_plugin.auto_wrap_policy) + else: + self.assertIsNotNone(fsdp_plugin.auto_wrap_policy) + + env = self.dist_env.copy() + env["FSDP_AUTO_WRAP_POLICY"] = "TRANSFORMER_BASED_WRAP" + env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = "T5Layer" + with mockenv_context(**env): + fsdp_plugin = FullyShardedDataParallelPlugin() + with self.assertRaises(Exception) as cm: + fsdp_plugin.set_auto_wrap_policy(model) + self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception)) + + env = self.dist_env.copy() + env["FSDP_AUTO_WRAP_POLICY"] = "SIZE_BASED_WRAP" + env["FSDP_MIN_NUM_PARAMS"] = "0" + with mockenv_context(**env): + fsdp_plugin = FullyShardedDataParallelPlugin() + fsdp_plugin.set_auto_wrap_policy(model) + self.assertIsNone(fsdp_plugin.auto_wrap_policy) + + def test_mixed_precision(self): + from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision + from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler + + for mp_dtype in dtypes: + env = self.dist_env.copy() + env["ACCELERATE_MIXED_PRECISION"] = mp_dtype + with mockenv_context(**env): + accelerator = Accelerator() + if mp_dtype == "fp16": + dtype = torch.float16 + elif mp_dtype == "bf16": + dtype = torch.bfloat16 + mp_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype) + self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy, mp_policy) + if mp_dtype == FP16: + self.assertTrue(isinstance(accelerator.scaler, ShardedGradScaler)) + elif mp_dtype == BF16: + self.assertIsNone(accelerator.scaler) + AcceleratorState._reset_state() + + def test_cpu_offload(self): + from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload + + for flag in [True, False]: + env = self.dist_env.copy() + env["FSDP_OFFLOAD_PARAMS"] = str(flag).lower() + with mockenv_context(**env): + fsdp_plugin = FullyShardedDataParallelPlugin() + self.assertEqual(fsdp_plugin.cpu_offload, CPUOffload(offload_params=flag)) + + +@require_fsdp +@require_multi_gpu +@slow +class FSDPIntegrationTest(TempDirTestCase): + def setUp(self): + super().setUp() + self.performance_lower_bound = 0.82 + self.performance_configs = [ + "fsdp_shard_grad_op_transformer_based_wrap", + "fsdp_full_shard_transformer_based_wrap", + ] + self.peak_memory_usage_upper_bound = { + "multi_gpu_fp16": 3200, + "fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000, + "fsdp_full_shard_transformer_based_wrap_fp16": 1900, + # Disabling below test as it overwhelms the RAM memory usage + # on CI self-hosted runner leading to tests getting killed. + # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang + } + self.n_train = 160 + self.n_val = 160 + + mod_file = inspect.getfile(accelerate.test_utils) + self.test_scripts_folder = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps"]) + + def test_performance(self): + self.test_file_path = os.path.join(self.test_scripts_folder, "test_performance.py") + cmd = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"] + for config in self.performance_configs: + cmd_config = cmd.copy() + for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): + if strategy.lower() in config: + cmd_config.append(f"--fsdp_sharding_strategy={i+1}") + break + + if "fp32" in config: + cmd_config.append("--mixed_precision=no") + else: + cmd_config.append("--mixed_precision=fp16") + + if "cpu_offload" in config: + cmd_config.append("--fsdp_offload_params=True") + + for policy in FSDP_AUTO_WRAP_POLICY: + if policy.lower() in config: + cmd_config.append(f"--fsdp_auto_wrap_policy={policy}") + break + + if policy == "TRANSFORMER_BASED_WRAP": + cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer") + elif policy == "SIZE_BASED_WRAP": + cmd_config.append("--fsdp_min_num_params=2000") + + cmd_config.extend( + [ + self.test_file_path, + f"--output_dir={self.tmpdir}", + f"--performance_lower_bound={self.performance_lower_bound}", + ] + ) + with patch_environment(omp_num_threads=1): + execute_subprocess_async(cmd_config, env=os.environ.copy()) + + def test_checkpointing(self): + self.test_file_path = os.path.join(self.test_scripts_folder, "test_checkpointing.py") + cmd = [ + "accelerate", + "launch", + "--num_processes=2", + "--num_machines=1", + "--machine_rank=0", + "--use_fsdp", + "--mixed_precision=fp16", + "--fsdp_transformer_layer_cls_to_wrap=BertLayer", + ] + + for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): + cmd_config = cmd.copy() + cmd_config.append(f"--fsdp_sharding_strategy={i+1}") + if strategy != "FULL_SHARD": + continue + state_dict_config_index = len(cmd_config) + for state_dict_type in FSDP_STATE_DICT_TYPE: + cmd_config = cmd_config[:state_dict_config_index] + if state_dict_type == "SHARDED_STATE_DICT": + continue + cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}") + cmd_config.extend( + [ + self.test_file_path, + f"--output_dir={self.tmpdir}", + "--partial_train_epoch=1", + ] + ) + with patch_environment(omp_num_threads=1): + execute_subprocess_async(cmd_config, env=os.environ.copy()) + + cmd_config = cmd_config[:-1] + resume_from_checkpoint = os.path.join(self.tmpdir, "epoch_0") + cmd_config.extend( + [ + f"--resume_from_checkpoint={resume_from_checkpoint}", + ] + ) + with patch_environment(omp_num_threads=1): + execute_subprocess_async(cmd_config, env=os.environ.copy()) + + def test_peak_memory_usage(self): + self.test_file_path = os.path.join(self.test_scripts_folder, "test_peak_memory_usage.py") + cmd = [ + "accelerate", + "launch", + "--num_processes=2", + "--num_machines=1", + "--machine_rank=0", + ] + for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): + cmd_config = cmd.copy() + if "fp16" in spec: + cmd_config.extend(["--mixed_precision=fp16"]) + else: + cmd_config.extend(["--mixed_precision=no"]) + + if "multi_gpu" in spec: + continue + else: + cmd_config.extend(["--use_fsdp"]) + for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): + if strategy.lower() in spec: + cmd_config.append(f"--fsdp_sharding_strategy={i+1}") + break + + if "cpu_offload" in spec: + cmd_config.append("--fsdp_offload_params=True") + + for policy in FSDP_AUTO_WRAP_POLICY: + if policy.lower() in spec: + cmd_config.append(f"--fsdp_auto_wrap_policy={policy}") + break + + if policy == "TRANSFORMER_BASED_WRAP": + cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer") + elif policy == "SIZE_BASED_WRAP": + cmd_config.append("--fsdp_min_num_params=2000") + + cmd_config.extend( + [ + self.test_file_path, + f"--output_dir={self.tmpdir}", + f"--peak_memory_upper_bound={peak_mem_upper_bound}", + f"--n_train={self.n_train}", + f"--n_val={self.n_val}", + ] + ) + with patch_environment(omp_num_threads=1): + execute_subprocess_async(cmd_config, env=os.environ.copy()) diff --git a/testbed/huggingface__accelerate/tests/test_accelerator.py b/testbed/huggingface__accelerate/tests/test_accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..19d6c1655b47957d417d841bc59ca0093bf953fe --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_accelerator.py @@ -0,0 +1,51 @@ +import unittest + +import torch +from torch.utils.data import DataLoader, TensorDataset + +from accelerate.accelerator import Accelerator +from accelerate.state import AcceleratorState + + +def create_components(): + model = torch.nn.Linear(2, 4) + optimizer = torch.optim.AdamW(model.parameters(), lr=1.0) + scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1) + train_dl = DataLoader(TensorDataset(torch.tensor([1, 2, 3]))) + valid_dl = DataLoader(TensorDataset(torch.tensor([4, 5, 6]))) + + return model, optimizer, scheduler, train_dl, valid_dl + + +class AcceleratorTester(unittest.TestCase): + def test_prepared_objects_are_referenced(self): + accelerator = Accelerator() + model, optimizer, scheduler, train_dl, valid_dl = create_components() + + ( + prepared_model, + prepared_optimizer, + prepared_scheduler, + prepared_train_dl, + prepared_valid_dl, + ) = accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) + + self.assertTrue(prepared_model in accelerator._models) + self.assertTrue(prepared_optimizer in accelerator._optimizers) + self.assertTrue(prepared_scheduler in accelerator._schedulers) + self.assertTrue(prepared_train_dl in accelerator._dataloaders) + self.assertTrue(prepared_valid_dl in accelerator._dataloaders) + AcceleratorState._reset_state() + + def test_free_memory_dereferences_prepared_components(self): + accelerator = Accelerator() + model, optimizer, scheduler, train_dl, valid_dl = create_components() + accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl) + + accelerator.free_memory() + + self.assertTrue(len(accelerator._models) == 0) + self.assertTrue(len(accelerator._optimizers) == 0) + self.assertTrue(len(accelerator._schedulers) == 0) + self.assertTrue(len(accelerator._dataloaders) == 0) + AcceleratorState._reset_state() diff --git a/testbed/huggingface__accelerate/tests/test_big_modeling.py b/testbed/huggingface__accelerate/tests/test_big_modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..6fe78beb04ad5cef746bd73d8d9247a6018a0a72 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_big_modeling.py @@ -0,0 +1,486 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest +from tempfile import TemporaryDirectory + +import torch +import torch.nn as nn + +from accelerate.big_modeling import ( + cpu_offload, + disk_offload, + dispatch_model, + init_empty_weights, + init_on_device, + load_checkpoint_and_dispatch, +) +from accelerate.hooks import remove_hook_from_submodules +from accelerate.test_utils import require_cuda, require_mps, require_multi_gpu, require_torch_min_version, slow +from accelerate.utils import offload_state_dict +from transformers import AutoModelForCausalLM, AutoTokenizer + + +class ModelForTest(nn.Module): + def __init__(self): + super().__init__() + self.linear1 = nn.Linear(3, 4) + self.batchnorm = nn.BatchNorm1d(4) + self.linear2 = nn.Linear(4, 5) + + def forward(self, x): + return self.linear2(self.batchnorm(self.linear1(x))) + + +class ModelForTestTiedWeights(nn.Module): + def __init__(self): + super().__init__() + self.linear1 = nn.Linear(4, 4) + self.batchnorm = nn.BatchNorm1d(4) + self.linear2 = nn.Linear(4, 4) + + def forward(self, x): + return self.linear2(self.batchnorm(self.linear1(x))) + + +class BiggerModelForTest(nn.Module): + def __init__(self): + super().__init__() + self.linear1 = nn.Linear(3, 4) + self.linear2 = nn.Linear(4, 5) + self.batchnorm = nn.BatchNorm1d(5) + self.linear3 = nn.Linear(5, 6) + self.linear4 = nn.Linear(6, 5) + + def forward(self, x): + return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x))))) + + +# To test preload_module_classes +class ModuleWithUnusedSubModules(nn.Module): + def __init__(self, input_dim, output_dim): + super().__init__() + self.linear = nn.Linear(input_dim, output_dim) + + def forward(self, x): + return x @ self.linear.weight.t() + self.linear.bias + + +class ModelWithUnusedSubModulesForTest(nn.Module): + def __init__(self): + super().__init__() + self.linear1 = ModuleWithUnusedSubModules(3, 4) + self.linear2 = ModuleWithUnusedSubModules(4, 5) + self.batchnorm = nn.BatchNorm1d(5) + self.linear3 = ModuleWithUnusedSubModules(5, 6) + self.linear4 = ModuleWithUnusedSubModules(6, 5) + + def forward(self, x): + return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x))))) + + +@require_torch_min_version(version="1.9.0") +class BigModelingTester(unittest.TestCase): + def test_init_empty_weights(self): + # base use + with init_empty_weights(): + module = nn.Linear(4, 5) + self.assertEqual(module.weight.device, torch.device("meta")) + + # base use with buffers, they are not touched + with init_empty_weights(): + module = nn.BatchNorm1d(4) + self.assertEqual(module.weight.device, torch.device("meta")) + self.assertEqual(module.running_mean.device, torch.device("cpu")) + + # Use with include_buffers=True + with init_empty_weights(include_buffers=True): + module = nn.BatchNorm1d(4) + self.assertEqual(module.weight.device, torch.device("meta")) + self.assertEqual(module.running_mean.device, torch.device("meta")) + + # Double check we didn't break PyTorch + module = nn.BatchNorm1d(4) + self.assertEqual(module.weight.device, torch.device("cpu")) + self.assertEqual(module.running_mean.device, torch.device("cpu")) + + def test_init_empty_weights_very_large_model(self): + # This is a 100 billion parameters model. + with init_empty_weights(): + _ = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) + + @require_cuda + def test_init_on_device_cuda(self): + device = torch.device("cuda:0") + with init_on_device(device): + model = nn.Linear(10, 10) + self.assertEqual(model.weight.device, device) + self.assertEqual(model.weight.device, device) + + @require_mps + def test_init_on_device_mps(self): + device = torch.device("mps:0") + with init_on_device(device): + model = nn.Linear(10, 10) + self.assertEqual(model.weight.device, device) + self.assertEqual(model.weight.device, device) + + def test_cpu_offload(self): + model = ModelForTest() + x = torch.randn(2, 3) + expected = model(x) + + device = torch.device(0 if torch.cuda.is_available() else "cpu") + + cpu_offload(model, execution_device=device) + output = model(x) + self.assertTrue( + torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" + ) + + # Clean up for next test. + remove_hook_from_submodules(model) + + cpu_offload(model, execution_device=device, offload_buffers=True) + output = model(x) + self.assertTrue( + torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" + ) + + def test_cpu_offload_with_unused_submodules(self): + model = ModelWithUnusedSubModulesForTest() + x = torch.randn(2, 3) + expected = model(x) + + device = torch.device(0 if torch.cuda.is_available() else "cpu") + + cpu_offload(model, execution_device=device, preload_module_classes=["ModuleWithUnusedSubModules"]) + output = model(x) + self.assertTrue( + torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" + ) + + # Clean up for next test. + remove_hook_from_submodules(model) + + cpu_offload( + model, + execution_device=device, + offload_buffers=True, + preload_module_classes=["ModuleWithUnusedSubModules"], + ) + output = model(x) + self.assertTrue( + torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" + ) + + @slow + @require_cuda + def test_cpu_offload_gpt2(self): + tokenizer = AutoTokenizer.from_pretrained("gpt2") + inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(0) + + gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") + cpu_offload(gpt2, execution_device=0) + outputs = gpt2.generate(inputs["input_ids"]) + self.assertEqual( + tokenizer.decode(outputs[0].tolist()), + "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo", + ) + + def test_disk_offload(self): + model = ModelForTest() + x = torch.randn(2, 3) + expected = model(x) + + device = torch.device(0 if torch.cuda.is_available() else "cpu") + + with TemporaryDirectory() as tmp_dir: + disk_offload(model, tmp_dir, execution_device=device) + output = model(x) + self.assertTrue( + torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" + ) + + # Clean up for next test. + remove_hook_from_submodules(model) + + with TemporaryDirectory() as tmp_dir: + disk_offload(model, tmp_dir, execution_device=device, offload_buffers=True) + output = model(x) + self.assertTrue( + torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" + ) + + def test_disk_offload_with_unused_submodules(self): + model = ModelWithUnusedSubModulesForTest() + x = torch.randn(2, 3) + expected = model(x) + + device = torch.device(0 if torch.cuda.is_available() else "cpu") + + with TemporaryDirectory() as tmp_dir: + disk_offload( + model, tmp_dir, execution_device=device, preload_module_classes=["ModuleWithUnusedSubModules"] + ) + output = model(x) + self.assertTrue( + torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" + ) + + # Clean up for next test. + remove_hook_from_submodules(model) + + with TemporaryDirectory() as tmp_dir: + disk_offload( + model, + tmp_dir, + execution_device=device, + offload_buffers=True, + preload_module_classes=["ModuleWithUnusedSubModules"], + ) + output = model(x) + self.assertTrue( + torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f"Expected: {expected}\nActual: {output.cpu()}" + ) + + @slow + @require_cuda + def test_disk_offload_gpt2(self): + tokenizer = AutoTokenizer.from_pretrained("gpt2") + inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(0) + + gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") + with TemporaryDirectory() as tmp_dir: + disk_offload(gpt2, tmp_dir, execution_device=0) + outputs = gpt2.generate(inputs["input_ids"]) + self.assertEqual( + tokenizer.decode(outputs[0].tolist()), + "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo", + ) + + @require_cuda + def test_dispatch_model(self): + model = ModelForTest() + device_map = {"linear1": "disk", "batchnorm": "cpu", "linear2": 0} + + x = torch.randn(2, 3) + expected = model(x) + + with TemporaryDirectory() as tmp_dir: + dispatch_model(model, device_map, offload_dir=tmp_dir) + output = model(x) + self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) + + @require_cuda + def test_dispatch_model_tied_weights(self): + model = ModelForTestTiedWeights() + model.linear1.weight = model.linear2.weight + device_map = {"linear1": 0, "batchnorm": 0, "linear2": 0} + + dispatch_model(model, device_map) + self.assertIs(model.linear2.weight, model.linear1.weight) + + @require_multi_gpu + def test_dispatch_model_multi_gpu(self): + model = BiggerModelForTest() + device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 1} + + x = torch.randn(2, 3) + expected = model(x) + + with TemporaryDirectory() as tmp_dir: + dispatch_model(model, device_map, offload_dir=tmp_dir) + output = model(x) + self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) + + @slow + @require_multi_gpu + def test_dispatch_model_gpt2_on_two_gpus(self): + tokenizer = AutoTokenizer.from_pretrained("gpt2") + inputs = tokenizer("Hello world! My name is", return_tensors="pt").to(0) + + gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") + # Dispatch on GPUs 0 and 1 + device_map = { + "transformer.wte": 0, + "transformer.wpe": 0, + "transformer.ln_f": 1, + "lm_head": 1, + } + for i in range(12): + device_map[f"transformer.h.{i}"] = 0 if i <= 5 else 1 + + gpt2 = dispatch_model(gpt2, device_map) + outputs = gpt2.generate(inputs["input_ids"]) + self.assertEqual( + tokenizer.decode(outputs[0].tolist()), + "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo", + ) + + # Dispatch with a bit of CPU offload + gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") + for i in range(4): + device_map[f"transformer.h.{i}"] = "cpu" + gpt2 = dispatch_model(gpt2, device_map) + outputs = gpt2.generate(inputs["input_ids"]) + self.assertEqual( + tokenizer.decode(outputs[0].tolist()), + "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo", + ) + # Dispatch with a bit of CPU and disk offload + gpt2 = AutoModelForCausalLM.from_pretrained("gpt2") + for i in range(2): + device_map[f"transformer.h.{i}"] = "disk" + + with TemporaryDirectory() as tmp_dir: + state_dict = { + k: p for k, p in gpt2.state_dict().items() if "transformer.h.0" in k or "transformer.h.1" in k + } + offload_state_dict(tmp_dir, state_dict) + gpt2 = dispatch_model(gpt2, device_map, offload_dir=tmp_dir) + outputs = gpt2.generate(inputs["input_ids"]) + self.assertEqual( + tokenizer.decode(outputs[0].tolist()), + "Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo", + ) + + @require_cuda + def test_dispatch_model_with_unused_submodules(self): + model = ModelWithUnusedSubModulesForTest() + device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 0} + + x = torch.randn(2, 3) + expected = model(x) + + with TemporaryDirectory() as tmp_dir: + dispatch_model( + model, device_map, offload_dir=tmp_dir, preload_module_classes=["ModuleWithUnusedSubModules"] + ) + output = model(x) + self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) + + @require_multi_gpu + def test_dispatch_model_with_unused_submodules_multi_gpu(self): + model = ModelWithUnusedSubModulesForTest() + device_map = {"linear1": "cpu", "linear2": "disk", "batchnorm": "cpu", "linear3": 0, "linear4": 1} + + x = torch.randn(2, 3) + expected = model(x) + + with TemporaryDirectory() as tmp_dir: + dispatch_model( + model, device_map, offload_dir=tmp_dir, preload_module_classes=["ModuleWithUnusedSubModules"] + ) + output = model(x) + self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) + + @require_cuda + def test_load_checkpoint_and_dispatch(self): + model = ModelForTest() + device_map = {"linear1": "cpu", "batchnorm": "cpu", "linear2": 0} + + x = torch.randn(2, 3) + expected = model(x) + + with TemporaryDirectory() as tmp_dir: + checkpoint = os.path.join(tmp_dir, "pt_model.bin") + torch.save(model.state_dict(), checkpoint) + + new_model = ModelForTest() + new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map) + + # CPU-offloaded weights are on the meta device while waiting for the forward pass. + self.assertEqual(new_model.linear1.weight.device, torch.device("meta")) + self.assertEqual(new_model.linear2.weight.device, torch.device(0)) + + output = new_model(x) + self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) + + @require_multi_gpu + def test_load_checkpoint_and_dispatch_multi_gpu(self): + model = BiggerModelForTest() + device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 1} + + x = torch.randn(2, 3) + expected = model(x) + + with TemporaryDirectory() as tmp_dir: + checkpoint = os.path.join(tmp_dir, "pt_model.bin") + torch.save(model.state_dict(), checkpoint) + + new_model = BiggerModelForTest() + new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map) + + # CPU-offloaded weights are on the meta device while waiting for the forward pass. + self.assertEqual(new_model.linear1.weight.device, torch.device("meta")) + self.assertEqual(new_model.linear2.weight.device, torch.device("meta")) + self.assertEqual(new_model.linear3.weight.device, torch.device(0)) + self.assertEqual(new_model.linear4.weight.device, torch.device(1)) + + output = new_model(x) + self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) + + @require_cuda + def test_load_checkpoint_and_dispatch_with_unused_submodules(self): + model = ModelWithUnusedSubModulesForTest() + device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 0} + + x = torch.randn(2, 3) + expected = model(x) + + with TemporaryDirectory() as tmp_dir: + checkpoint = os.path.join(tmp_dir, "pt_model.bin") + torch.save(model.state_dict(), checkpoint) + + new_model = ModelWithUnusedSubModulesForTest() + new_model = load_checkpoint_and_dispatch( + new_model, checkpoint, device_map=device_map, preload_module_classes=["ModuleWithUnusedSubModules"] + ) + + # CPU-offloaded weights are on the meta device while waiting for the forward pass. + self.assertEqual(new_model.linear1.linear.weight.device, torch.device("meta")) + self.assertEqual(new_model.linear2.linear.weight.device, torch.device("meta")) + self.assertEqual(new_model.linear3.linear.weight.device, torch.device(0)) + self.assertEqual(new_model.linear4.linear.weight.device, torch.device(0)) + + output = new_model(x) + self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) + + @require_multi_gpu + def test_load_checkpoint_and_dispatch_multi_gpu_with_unused_submodules(self): + model = ModelWithUnusedSubModulesForTest() + device_map = {"linear1": "cpu", "linear2": "cpu", "batchnorm": 0, "linear3": 0, "linear4": 1} + + x = torch.randn(2, 3) + expected = model(x) + + with TemporaryDirectory() as tmp_dir: + checkpoint = os.path.join(tmp_dir, "pt_model.bin") + torch.save(model.state_dict(), checkpoint) + + new_model = ModelWithUnusedSubModulesForTest() + new_model = load_checkpoint_and_dispatch( + new_model, checkpoint, device_map=device_map, preload_module_classes=["ModuleWithUnusedSubModules"] + ) + + # CPU-offloaded weights are on the meta device while waiting for the forward pass. + self.assertEqual(new_model.linear1.linear.weight.device, torch.device("meta")) + self.assertEqual(new_model.linear2.linear.weight.device, torch.device("meta")) + self.assertEqual(new_model.linear3.linear.weight.device, torch.device(0)) + self.assertEqual(new_model.linear4.linear.weight.device, torch.device(1)) + + output = new_model(x) + self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5)) diff --git a/testbed/huggingface__accelerate/tests/test_cli.py b/testbed/huggingface__accelerate/tests/test_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..50ed87d9f25aeaffb9e7a82a62edd9d99a90cdc5 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_cli.py @@ -0,0 +1,214 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +import unittest +from pathlib import Path + +import torch + +import accelerate +from accelerate.test_utils import execute_subprocess_async +from accelerate.test_utils.testing import run_command + + +class AccelerateLauncherTester(unittest.TestCase): + """ + Test case for verifying the `accelerate launch` CLI operates correctly. + If a `default_config.yaml` file is located in the cache it will temporarily move it + for the duration of the tests. + """ + + mod_file = inspect.getfile(accelerate.test_utils) + test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_cli.py"]) + + base_cmd = ["accelerate", "launch"] + config_folder = Path.home() / ".cache/huggingface/accelerate" + config_file = "default_config.yaml" + config_path = config_folder / config_file + changed_path = config_folder / "_default_config.yaml" + + test_config_path = Path("tests/test_configs") + + @classmethod + def setUpClass(cls): + if cls.config_path.is_file(): + cls.config_path.rename(cls.changed_path) + + @classmethod + def tearDownClass(cls): + if cls.changed_path.is_file(): + cls.changed_path.rename(cls.config_path) + + def test_no_config(self): + cmd = self.base_cmd + if torch.cuda.is_available() and (torch.cuda.device_count() > 1): + cmd += ["--multi_gpu"] + execute_subprocess_async(cmd + [self.test_file_path], env=os.environ.copy()) + + def test_config_compatibility(self): + for config in sorted(self.test_config_path.glob("**/*.yaml")): + with self.subTest(config_file=config): + execute_subprocess_async( + self.base_cmd + ["--config_file", str(config), self.test_file_path], env=os.environ.copy() + ) + + +class TpuConfigTester(unittest.TestCase): + """ + Test case for verifying the `accelerate tpu-config` CLI passes the right `gcloud` command. + """ + + tpu_name = "test-tpu" + tpu_zone = "us-central1-a" + command = "ls" + cmd = ["accelerate", "tpu-config"] + base_output = "cd /usr/share" + command_file = "tests/test_samples/test_command_file.sh" + gcloud = "Running gcloud compute tpus tpu-vm ssh" + + @staticmethod + def clean_output(output): + return "".join(output).rstrip() + + def test_base(self): + output = run_command( + self.cmd + + ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"], + return_stdout=True, + ) + self.assertEqual( + self.clean_output(output), + f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", + ) + + def test_base_backward_compatibility(self): + output = run_command( + self.cmd + + [ + "--config_file", + "tests/test_configs/0_12_0.yaml", + "--command", + self.command, + "--tpu_zone", + self.tpu_zone, + "--tpu_name", + self.tpu_name, + "--debug", + ], + return_stdout=True, + ) + self.assertEqual( + self.clean_output(output), + f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", + ) + + def test_with_config_file(self): + output = run_command( + self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"], return_stdout=True + ) + self.assertEqual( + self.clean_output(output), + f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all', + ) + + def test_with_config_file_and_command(self): + output = run_command( + self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"], + return_stdout=True, + ) + self.assertEqual( + self.clean_output(output), + f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", + ) + + def test_with_config_file_and_multiple_command(self): + output = run_command( + self.cmd + + [ + "--config_file", + "tests/test_configs/latest.yaml", + "--command", + self.command, + "--command", + 'echo "Hello World"', + "--debug", + ], + return_stdout=True, + ) + self.assertEqual( + self.clean_output(output), + f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all', + ) + + def test_with_config_file_and_command_file(self): + output = run_command( + self.cmd + + ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"], + return_stdout=True, + ) + self.assertEqual( + self.clean_output(output), + f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all', + ) + + def test_with_config_file_and_command_file_backward_compatibility(self): + output = run_command( + self.cmd + + [ + "--config_file", + "tests/test_configs/0_12_0.yaml", + "--command_file", + self.command_file, + "--tpu_zone", + self.tpu_zone, + "--tpu_name", + self.tpu_name, + "--debug", + ], + return_stdout=True, + ) + self.assertEqual( + self.clean_output(output), + f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all', + ) + + def test_accelerate_install(self): + output = run_command( + self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"], + return_stdout=True, + ) + self.assertEqual( + self.clean_output(output), + f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all', + ) + + def test_accelerate_install_version(self): + output = run_command( + self.cmd + + [ + "--config_file", + "tests/test_configs/latest.yaml", + "--install_accelerate", + "--accelerate_version", + "12.0.0", + "--debug", + ], + return_stdout=True, + ) + self.assertEqual( + self.clean_output(output), + f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all', + ) diff --git a/testbed/huggingface__accelerate/tests/test_configs/0_11_0.yaml b/testbed/huggingface__accelerate/tests/test_configs/0_11_0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ef829e6b53811462f6ffc457312b48e2055ffad --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_configs/0_11_0.yaml @@ -0,0 +1,12 @@ +compute_environment: LOCAL_MACHINE +deepspeed_config: {} +distributed_type: 'NO' +fsdp_config: {} +machine_rank: 0 +main_process_ip: null +main_process_port: null +main_training_function: main +mixed_precision: 'no' +num_machines: 1 +num_processes: 1 +use_cpu: false \ No newline at end of file diff --git a/testbed/huggingface__accelerate/tests/test_configs/0_12_0.yaml b/testbed/huggingface__accelerate/tests/test_configs/0_12_0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..00d06aab98f2a5489d7a1505dc8e1107fcc20762 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_configs/0_12_0.yaml @@ -0,0 +1,13 @@ +compute_environment: LOCAL_MACHINE +deepspeed_config: {} +distributed_type: 'NO' +downcast_bf16: 'no' +fsdp_config: {} +machine_rank: 0 +main_process_ip: null +main_process_port: null +main_training_function: main +mixed_precision: 'no' +num_machines: 1 +num_processes: 1 +use_cpu: false \ No newline at end of file diff --git a/testbed/huggingface__accelerate/tests/test_configs/README.md b/testbed/huggingface__accelerate/tests/test_configs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fd88d066c5dfdb82fe5067a8c5644fffa3054d99 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_configs/README.md @@ -0,0 +1,2 @@ +This folder contains test configs for `accelerate config`. These should be generated for each major version +and are written based on `accelerate config` and selecting the "No distributed training" option. \ No newline at end of file diff --git a/testbed/huggingface__accelerate/tests/test_configs/latest.yaml b/testbed/huggingface__accelerate/tests/test_configs/latest.yaml new file mode 100644 index 0000000000000000000000000000000000000000..456348ef9e6d01326d727b71c1dddd4177f89861 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_configs/latest.yaml @@ -0,0 +1,21 @@ +compute_environment: LOCAL_MACHINE +deepspeed_config: {} +distributed_type: 'NO' +downcast_bf16: 'no' +fsdp_config: {} +gpu_ids: all +machine_rank: 0 +main_process_ip: null +main_process_port: null +main_training_function: main +megatron_lm_config: {} +mixed_precision: 'no' +num_machines: 1 +num_processes: 1 +rdzv_backend: static +same_network: true +use_cpu: false +tpu_name: 'test-tpu' +tpu_zone: 'us-central1-a' +commands: null +command_file: tests/test_samples/test_command_file.sh \ No newline at end of file diff --git a/testbed/huggingface__accelerate/tests/test_cpu.py b/testbed/huggingface__accelerate/tests/test_cpu.py new file mode 100644 index 0000000000000000000000000000000000000000..ab73058c1ad0c8548c20db62d7b31f8d85c28df6 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_cpu.py @@ -0,0 +1,24 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from accelerate import debug_launcher +from accelerate.test_utils import require_cpu, test_script + + +@require_cpu +class MultiCPUTester(unittest.TestCase): + def test_cpu(self): + debug_launcher(test_script.main) diff --git a/testbed/huggingface__accelerate/tests/test_data_loader.py b/testbed/huggingface__accelerate/tests/test_data_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..a18dae7324d211bbfe1ad297914432fe25524fb7 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_data_loader.py @@ -0,0 +1,356 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest + +from torch.utils.data import BatchSampler, IterableDataset + +from accelerate.data_loader import BatchSamplerShard, IterableDatasetShard + + +class RandomIterableDataset(IterableDataset): + # For testing, an iterable dataset of random length + def __init__(self, p_stop=0.01, max_length=1000): + self.p_stop = p_stop + self.max_length = max_length + + def __iter__(self): + count = 0 + stop = False + while not stop and count < self.max_length: + yield count + count += 1 + stop = random.random() < self.p_stop + + +class DataLoaderTester(unittest.TestCase): + def check_batch_sampler_shards(self, batch_sampler, expected, split_batches=False, even_batches=True): + batch_sampler_shards = [ + BatchSamplerShard(batch_sampler, 2, i, split_batches=split_batches, even_batches=even_batches) + for i in range(2) + ] + batch_sampler_lists = [list(batch_sampler_shard) for batch_sampler_shard in batch_sampler_shards] + if not split_batches: + self.assertListEqual([len(shard) for shard in batch_sampler_shards], [len(e) for e in expected]) + self.assertListEqual(batch_sampler_lists, expected) + + def test_batch_sampler_shards_with_no_splits(self): + # Check the shards when the dataset is a round multiple of total batch size. + batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], + ] + self.check_batch_sampler_shards(batch_sampler, expected) + + batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True) + # Expected shouldn't change + self.check_batch_sampler_shards(batch_sampler, expected) + + # Check the shards when the dataset is a round multiple of batch size but not total batch size. + batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], + ] + self.check_batch_sampler_shards(batch_sampler, expected) + + batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17]], + ] + self.check_batch_sampler_shards(batch_sampler, expected) + + # Check the shards when the dataset is not a round multiple of batch size but has a multiple of + # num_processes batch. + batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], + ] + self.check_batch_sampler_shards(batch_sampler, expected) + + batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17]], + ] + self.check_batch_sampler_shards(batch_sampler, expected) + + # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of + # num_processes batch. + batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], + ] + self.check_batch_sampler_shards(batch_sampler, expected) + + batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17]], + ] + self.check_batch_sampler_shards(batch_sampler, expected) + + # Check the shards when the dataset is very small. + batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False) + expected = [[[0, 1, 0]], [[1, 0, 1]]] + self.check_batch_sampler_shards(batch_sampler, expected) + + batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True) + expected = [[], []] + self.check_batch_sampler_shards(batch_sampler, expected) + + def test_batch_sampler_shards_with_splits(self): + # Check the shards when the dataset is a round multiple of batch size. + batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False) + expected = [ + [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], + [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) + + batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True) + # Expected shouldn't change + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) + + # Check the shards when the dataset is not a round multiple of batch size. + batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False) + expected = [ + [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], + [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) + + batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True) + expected = [ + [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], + [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) + + # Check the shards when the dataset is not a round multiple of batch size or num_processes. + batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False) + expected = [ + [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], + [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) + + batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True) + expected = [ + [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], + [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) + + # Check the shards when the dataset is very small. + batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False) + expected = [[[0, 1]], [[0, 1]]] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) + + batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True) + expected = [[], []] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) + + def test_batch_sampler_shards_with_no_splits_no_even(self): + # Check the shards when the dataset is a round multiple of total batch size. + batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) + + batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True) + # Expected shouldn't change + self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) + + # Check the shards when the dataset is a round multiple of batch size but not total batch size. + batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) + + batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) + + # Check the shards when the dataset is not a round multiple of batch size but has a multiple of + # num_processes batch. + batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) + + batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) + + # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of + # num_processes batch. + batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) + + batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True) + expected = [ + [[0, 1, 2], [6, 7, 8], [12, 13, 14]], + [[3, 4, 5], [9, 10, 11], [15, 16, 17]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) + + # Check the shards when the dataset is very small. + batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False) + expected = [[[0, 1]], []] + self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) + + batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True) + expected = [[], []] + self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) + + def test_batch_sampler_shards_with_splits_no_even(self): + # Check the shards when the dataset is a round multiple of batch size. + batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False) + expected = [ + [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], + [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) + + batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True) + # Expected shouldn't change + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) + + # Check the shards when the dataset is not a round multiple of batch size. + batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False) + expected = [ + [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], + [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) + + batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True) + expected = [ + [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], + [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) + + # Check the shards when the dataset is not a round multiple of batch size or num_processes. + batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False) + expected = [ + [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], + [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) + + batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True) + expected = [ + [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], + [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], + ] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) + + # Check the shards when the dataset is very small. + batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False) + expected = [[[0, 1]], []] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) + + batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True) + expected = [[], []] + self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) + + def test_batch_sampler_with_varying_batch_size(self): + batch_sampler = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] + batch_sampler_shards = [BatchSamplerShard(batch_sampler, 2, i, even_batches=False) for i in range(2)] + + self.assertEqual(len(batch_sampler_shards[0]), 3) + self.assertEqual(len(batch_sampler_shards[1]), 2) + + self.assertListEqual(list(batch_sampler_shards[0]), [[0, 1, 2], [5, 6, 7, 8], [12, 13]]) + self.assertListEqual(list(batch_sampler_shards[1]), [[3, 4], [9, 10, 11]]) + + def check_iterable_dataset_shards( + self, dataset, seed, batch_size, drop_last=False, num_processes=2, split_batches=False + ): + random.seed(seed) + reference = list(dataset) + + iterable_dataset_shards = [ + IterableDatasetShard( + dataset, + batch_size=batch_size, + drop_last=drop_last, + num_processes=num_processes, + process_index=i, + split_batches=split_batches, + ) + for i in range(num_processes) + ] + iterable_dataset_lists = [] + for iterable_dataset_shard in iterable_dataset_shards: + # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. + random.seed(seed) + iterable_dataset_lists.append(list(iterable_dataset_shard)) + + shard_batch_size = batch_size // num_processes if split_batches else batch_size + # All iterable dataset shard should have the same length, a round multiple of shard_batch_size + first_list = iterable_dataset_lists[0] + for l in iterable_dataset_lists[1:]: + self.assertEqual(len(l), len(first_list)) + self.assertTrue(len(l) % shard_batch_size == 0) + + observed = [] + for idx in range(0, len(first_list), shard_batch_size): + for l in iterable_dataset_lists: + observed += l[idx : idx + shard_batch_size] + + if not drop_last: + while len(reference) < len(observed): + reference += reference + self.assertListEqual(observed, reference[: len(observed)]) + + def test_iterable_dataset_shard(self): + seed = 42 + dataset = RandomIterableDataset() + + self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False) + self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False) + self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True) + self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True) + + # Edge case with a very small dataset + dataset = RandomIterableDataset(max_length=2) + + self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False) + self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False) + self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True) + self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True) diff --git a/testbed/huggingface__accelerate/tests/test_examples.py b/testbed/huggingface__accelerate/tests/test_examples.py new file mode 100644 index 0000000000000000000000000000000000000000..05688f2b5e976c7968029c953c94c1f0ca2f95bb --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_examples.py @@ -0,0 +1,216 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ast +import os +import re +import shutil +import tempfile +import unittest +from unittest import mock + +import torch + +from accelerate.test_utils.examples import compare_against_test +from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow +from accelerate.utils import write_basic_config + + +# DataLoaders built from `test_samples/MRPC` for quick testing +# Should mock `{script_name}.get_dataloaders` via: +# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) + +EXCLUDE_EXAMPLES = [ + "cross_validation.py", + "gradient_accumulation.py", + "multi_process_metrics.py", + "memory.py", + "automatic_gradient_accumulation.py", + "fsdp_with_peak_mem_tracking.py", + "deepspeed_with_config_support.py", + "megatron_lm_gpt_pretraining.py", +] + + +class ExampleDifferenceTests(unittest.TestCase): + """ + This TestCase checks that all of the `complete_*` scripts contain all of the + information found in the `by_feature` scripts, line for line. If one fails, + then a complete example does not contain all of the features in the features + scripts, and should be updated. + + Each example script should be a single test (such as `test_nlp_example`), + and should run `one_complete_example` twice: once with `parser_only=True`, + and the other with `parser_only=False`. This is so that when the test + failures are returned to the user, they understand if the discrepancy lies in + the `main` function, or the `training_loop` function. Otherwise it will be + unclear. + + Also, if there are any expected differences between the base script used and + `complete_nlp_example.py` (the canonical base script), these should be included in + `special_strings`. These would be differences in how something is logged, print statements, + etc (such as calls to `Accelerate.log()`) + """ + + def one_complete_example( + self, complete_file_name: str, parser_only: bool, secondary_filename: str = None, special_strings: list = None + ): + """ + Tests a single `complete` example against all of the implemented `by_feature` scripts + + Args: + complete_file_name (`str`): + The filename of a complete example + parser_only (`bool`): + Whether to look at the main training function, or the argument parser + secondary_filename (`str`, *optional*): + A potential secondary base file to strip all script information not relevant for checking, + such as "cv_example.py" when testing "complete_cv_example.py" + special_strings (`list`, *optional*): + A list of strings to potentially remove before checking no differences are left. These should be + diffs that are file specific, such as different logging variations between files. + """ + self.maxDiff = None + by_feature_path = os.path.abspath(os.path.join("examples", "by_feature")) + examples_path = os.path.abspath("examples") + for item in os.listdir(by_feature_path): + if item not in EXCLUDE_EXAMPLES: + item_path = os.path.join(by_feature_path, item) + if os.path.isfile(item_path) and ".py" in item_path: + with self.subTest( + tested_script=complete_file_name, + feature_script=item, + tested_section="main()" if parser_only else "training_function()", + ): + diff = compare_against_test( + os.path.join(examples_path, complete_file_name), item_path, parser_only, secondary_filename + ) + diff = "\n".join(diff) + if special_strings is not None: + for string in special_strings: + diff = diff.replace(string, "") + self.assertEqual(diff, "") + + def test_nlp_examples(self): + self.one_complete_example("complete_nlp_example.py", True) + self.one_complete_example("complete_nlp_example.py", False) + + def test_cv_examples(self): + cv_path = os.path.abspath(os.path.join("examples", "cv_example.py")) + special_strings = [ + " " * 16 + "{\n\n", + " " * 20 + '"accuracy": eval_metric["accuracy"],\n\n', + " " * 20 + '"f1": eval_metric["f1"],\n\n', + " " * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', + " " * 20 + '"epoch": epoch,\n\n', + " " * 16 + "},\n\n", + " " * 16 + "step=epoch,\n", + " " * 12, + ] + self.one_complete_example("complete_cv_example.py", True, cv_path, special_strings) + self.one_complete_example("complete_cv_example.py", False, cv_path, special_strings) + + +@mock.patch.dict(os.environ, {"TESTING_MOCKED_DATALOADERS": "1"}) +class FeatureExamplesTests(TempDirTestCase): + clear_on_setup = False + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls._tmpdir = tempfile.mkdtemp() + cls.configPath = os.path.join(cls._tmpdir, "default_config.yml") + + write_basic_config(save_location=cls.configPath) + cls._launch_args = ["accelerate", "launch", "--config_file", cls.configPath] + + @classmethod + def tearDownClass(cls): + super().tearDownClass() + shutil.rmtree(cls._tmpdir) + + def test_checkpointing_by_epoch(self): + testargs = f""" + examples/by_feature/checkpointing.py + --checkpointing_steps epoch + --output_dir {self.tmpdir} + """.split() + run_command(self._launch_args + testargs) + self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "epoch_0"))) + + def test_checkpointing_by_steps(self): + testargs = f""" + examples/by_feature/checkpointing.py + --checkpointing_steps 1 + --output_dir {self.tmpdir} + """.split() + _ = run_command(self._launch_args + testargs) + self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "step_2"))) + + def test_load_states_by_epoch(self): + testargs = f""" + examples/by_feature/checkpointing.py + --resume_from_checkpoint {os.path.join(self.tmpdir, "epoch_0")} + """.split() + output = run_command(self._launch_args + testargs, return_stdout=True) + self.assertNotIn("epoch 0:", output) + self.assertIn("epoch 1:", output) + + def test_load_states_by_steps(self): + testargs = f""" + examples/by_feature/checkpointing.py + --resume_from_checkpoint {os.path.join(self.tmpdir, "step_2")} + """.split() + output = run_command(self._launch_args + testargs, return_stdout=True) + if torch.cuda.is_available(): + num_processes = torch.cuda.device_count() + else: + num_processes = 1 + if num_processes > 1: + self.assertNotIn("epoch 0:", output) + self.assertIn("epoch 1:", output) + else: + self.assertIn("epoch 0:", output) + self.assertIn("epoch 1:", output) + + @slow + def test_cross_validation(self): + testargs = """ + examples/by_feature/cross_validation.py + --num_folds 2 + """.split() + with mock.patch.dict(os.environ, {"TESTING_MOCKED_DATALOADERS": "0"}): + output = run_command(self._launch_args + testargs, return_stdout=True) + results = ast.literal_eval(re.findall("({.+})", output)[-1]) + self.assertGreaterEqual(results["accuracy"], 0.75) + + def test_multi_process_metrics(self): + testargs = ["examples/by_feature/multi_process_metrics.py"] + run_command(self._launch_args + testargs) + + @require_trackers + @mock.patch.dict(os.environ, {"WANDB_MODE": "offline"}) + def test_tracking(self): + with tempfile.TemporaryDirectory() as tmpdir: + testargs = f""" + examples/by_feature/tracking.py + --with_tracking + --logging_dir {tmpdir} + """.split() + run_command(self._launch_args + testargs) + self.assertTrue(os.path.exists(os.path.join(tmpdir, "tracking"))) + + def test_gradient_accumulation(self): + testargs = ["examples/by_feature/gradient_accumulation.py"] + run_command(self._launch_args + testargs) diff --git a/testbed/huggingface__accelerate/tests/test_grad_sync.py b/testbed/huggingface__accelerate/tests/test_grad_sync.py new file mode 100644 index 0000000000000000000000000000000000000000..182d3ef9570f31cdead12d4fb712921c4765e27d --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_grad_sync.py @@ -0,0 +1,55 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +import unittest + +import torch + +import accelerate +from accelerate import debug_launcher +from accelerate.test_utils import ( + execute_subprocess_async, + require_cpu, + require_multi_gpu, + require_single_gpu, + test_sync, +) +from accelerate.utils import get_launch_prefix, patch_environment + + +class SyncScheduler(unittest.TestCase): + def setUp(self): + mod_file = inspect.getfile(accelerate.test_utils) + self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_sync.py"]) + + @require_cpu + def test_gradient_sync_cpu_noop(self): + debug_launcher(test_sync.main, num_processes=1) + + @require_cpu + def test_gradient_sync_cpu_multi(self): + debug_launcher(test_sync.main) + + @require_single_gpu + def test_gradient_sync_gpu(self): + test_sync.main() + + @require_multi_gpu + def test_gradient_sync_gpu_multi(self): + print(f"Found {torch.cuda.device_count()} devices.") + cmd = get_launch_prefix() + [f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] + with patch_environment(omp_num_threads=1): + execute_subprocess_async(cmd, env=os.environ.copy()) diff --git a/testbed/huggingface__accelerate/tests/test_hooks.py b/testbed/huggingface__accelerate/tests/test_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..e3824809c51f0035d20276a8ac4c7389d8263d6f --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_hooks.py @@ -0,0 +1,350 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import torch +import torch.nn as nn + +from accelerate.hooks import ( + AlignDevicesHook, + ModelHook, + SequentialHook, + add_hook_to_module, + attach_align_device_hook, + remove_hook_from_module, + remove_hook_from_submodules, +) +from accelerate.test_utils import require_multi_gpu, require_torch_min_version + + +class ModelForTest(nn.Module): + def __init__(self): + super().__init__() + self.linear1 = nn.Linear(3, 4) + self.batchnorm = nn.BatchNorm1d(4) + self.linear2 = nn.Linear(4, 5) + + def forward(self, x): + return self.linear2(self.batchnorm(self.linear1(x))) + + +class PreForwardHook(ModelHook): + def pre_forward(self, module, *args, **kwargs): + return (args[0] + 1,) + args[1:], kwargs + + +class PostForwardHook(ModelHook): + def post_forward(self, module, output): + return output + 1 + + +@require_torch_min_version(version="1.9.0") +class HooksModelTester(unittest.TestCase): + def test_add_and_remove_hooks(self): + test_model = ModelForTest() + test_hook = ModelHook() + + add_hook_to_module(test_model, test_hook) + self.assertEqual(test_model._hf_hook, test_hook) + self.assertTrue(hasattr(test_model, "_old_forward")) + + # Check adding the hook did not change the name or the signature + self.assertEqual(test_model.forward.__name__, "forward") + self.assertListEqual(list(inspect.signature(test_model.forward).parameters), ["x"]) + + remove_hook_from_module(test_model) + self.assertFalse(hasattr(test_model, "_hf_hook")) + self.assertFalse(hasattr(test_model, "_old_forward")) + + def test_append_and_remove_hooks(self): + test_model = ModelForTest() + test_hook = ModelHook() + + add_hook_to_module(test_model, test_hook) + add_hook_to_module(test_model, test_hook, append=True) + + self.assertEqual(isinstance(test_model._hf_hook, SequentialHook), True) + self.assertEqual(len(test_model._hf_hook.hooks), 2) + self.assertTrue(hasattr(test_model, "_old_forward")) + + # Check adding the hook did not change the name or the signature + self.assertEqual(test_model.forward.__name__, "forward") + self.assertListEqual(list(inspect.signature(test_model.forward).parameters), ["x"]) + + remove_hook_from_module(test_model) + self.assertFalse(hasattr(test_model, "_hf_hook")) + self.assertFalse(hasattr(test_model, "_old_forward")) + + def test_pre_forward_hook_is_executed(self): + test_model = ModelForTest() + x = torch.randn(2, 3) + expected = test_model(x + 1) + expected2 = test_model(x + 2) + + test_hook = PreForwardHook() + add_hook_to_module(test_model, test_hook) + output1 = test_model(x) + self.assertTrue(torch.allclose(output1, expected, atol=1e-5)) + + # Attaching a hook to a model when it already has one replaces, does not chain + test_hook = PreForwardHook() + add_hook_to_module(test_model, test_hook) + output1 = test_model(x) + self.assertTrue(torch.allclose(output1, expected, atol=1e-5)) + + # You need to use the sequential hook to chain two or more hooks + test_hook = SequentialHook(PreForwardHook(), PreForwardHook()) + add_hook_to_module(test_model, test_hook) + + output2 = test_model(x) + assert torch.allclose(output2, expected2, atol=1e-5) + + def test_post_forward_hook_is_executed(self): + test_model = ModelForTest() + x = torch.randn(2, 3) + output = test_model(x) + + test_hook = PostForwardHook() + add_hook_to_module(test_model, test_hook) + output1 = test_model(x) + self.assertTrue(torch.allclose(output1, output + 1, atol=1e-5)) + + # Attaching a hook to a model when it already has one replaces, does not chain + test_hook = PostForwardHook() + add_hook_to_module(test_model, test_hook) + output1 = test_model(x) + self.assertTrue(torch.allclose(output1, output + 1, atol=1e-5)) + + # You need to use the sequential hook to chain two or more hooks + test_hook = SequentialHook(PostForwardHook(), PostForwardHook()) + add_hook_to_module(test_model, test_hook) + + output2 = test_model(x) + assert torch.allclose(output2, output + 2, atol=1e-5) + + def test_no_grad_in_hook(self): + test_model = ModelForTest() + x = torch.randn(2, 3) + output = test_model(x) + + test_hook = PostForwardHook() + add_hook_to_module(test_model, test_hook) + output1 = test_model(x) + self.assertTrue(torch.allclose(output1, output + 1)) + self.assertTrue(output1.requires_grad) + + test_hook.no_grad = True + output1 = test_model(x) + self.assertFalse(output1.requires_grad) + + @require_multi_gpu + def test_align_devices_as_model_parallelism(self): + model = ModelForTest() + # Everything is on CPU + self.assertEqual(model.linear1.weight.device, torch.device("cpu")) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + # This will move each submodule on different devices + add_hook_to_module(model.linear1, AlignDevicesHook(execution_device=0)) + add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0)) + add_hook_to_module(model.linear2, AlignDevicesHook(execution_device=1)) + + self.assertEqual(model.linear1.weight.device, torch.device(0)) + self.assertEqual(model.batchnorm.weight.device, torch.device(0)) + self.assertEqual(model.batchnorm.running_mean.device, torch.device(0)) + self.assertEqual(model.linear2.weight.device, torch.device(1)) + + # We can still make a forward pass. The input does not need to be on any particular device + x = torch.randn(2, 3) + output = model(x) + self.assertEqual(output.device, torch.device(1)) + + # We can add a general hook to put back output on same device as input. + add_hook_to_module(model, AlignDevicesHook(io_same_device=True)) + x = torch.randn(2, 3).to(0) + output = model(x) + self.assertEqual(output.device, torch.device(0)) + + def test_align_devices_as_cpu_offload(self): + model = ModelForTest() + + # Everything is on CPU + self.assertEqual(model.linear1.weight.device, torch.device("cpu")) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + # This will move each submodule on different devices + hook_kwargs = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True} + + add_hook_to_module(model.linear1, AlignDevicesHook(**hook_kwargs)) + add_hook_to_module(model.batchnorm, AlignDevicesHook(**hook_kwargs)) + add_hook_to_module(model.linear2, AlignDevicesHook(**hook_kwargs)) + + # Parameters have been offloaded, so on the meta device + self.assertEqual(model.linear1.weight.device, torch.device("meta")) + self.assertEqual(model.batchnorm.weight.device, torch.device("meta")) + self.assertEqual(model.linear2.weight.device, torch.device("meta")) + # Buffers are not included in the offload by default, so are on the execution device + device = torch.device(hook_kwargs["execution_device"]) + self.assertEqual(model.batchnorm.running_mean.device, device) + + x = torch.randn(2, 3) + output = model(x) + self.assertEqual(output.device, device) + + # Removing hooks loads back the weights in the model. + remove_hook_from_module(model.linear1) + remove_hook_from_module(model.batchnorm) + remove_hook_from_module(model.linear2) + self.assertEqual(model.linear1.weight.device, torch.device("cpu")) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + # Now test with buffers included in the offload + hook_kwargs = { + "execution_device": 0 if torch.cuda.is_available() else "cpu", + "offload": True, + "offload_buffers": True, + } + + add_hook_to_module(model.linear1, AlignDevicesHook(**hook_kwargs)) + add_hook_to_module(model.batchnorm, AlignDevicesHook(**hook_kwargs)) + add_hook_to_module(model.linear2, AlignDevicesHook(**hook_kwargs)) + + # Parameters have been offloaded, so on the meta device, buffers included + self.assertEqual(model.linear1.weight.device, torch.device("meta")) + self.assertEqual(model.batchnorm.weight.device, torch.device("meta")) + self.assertEqual(model.linear2.weight.device, torch.device("meta")) + self.assertEqual(model.batchnorm.running_mean.device, torch.device("meta")) + + x = torch.randn(2, 3) + output = model(x) + self.assertEqual(output.device, device) + + # Removing hooks loads back the weights in the model. + remove_hook_from_module(model.linear1) + remove_hook_from_module(model.batchnorm) + remove_hook_from_module(model.linear2) + self.assertEqual(model.linear1.weight.device, torch.device("cpu")) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + def test_attach_align_device_hook_as_cpu_offload(self): + model = ModelForTest() + + # Everything is on CPU + self.assertEqual(model.linear1.weight.device, torch.device("cpu")) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + # This will move each submodule on different devices + execution_device = 0 if torch.cuda.is_available() else "cpu" + attach_align_device_hook(model, execution_device=execution_device, offload=True) + + # Parameters have been offloaded, so on the meta device + self.assertEqual(model.linear1.weight.device, torch.device("meta")) + self.assertEqual(model.batchnorm.weight.device, torch.device("meta")) + self.assertEqual(model.linear2.weight.device, torch.device("meta")) + # Buffers are not included in the offload by default, so are on the execution device + device = torch.device(execution_device) + self.assertEqual(model.batchnorm.running_mean.device, device) + + x = torch.randn(2, 3) + output = model(x) + self.assertEqual(output.device, device) + + # Removing hooks loads back the weights in the model. + remove_hook_from_submodules(model) + self.assertEqual(model.linear1.weight.device, torch.device("cpu")) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + # Now test with buffers included in the offload + attach_align_device_hook(model, execution_device=execution_device, offload=True, offload_buffers=True) + + # Parameters have been offloaded, so on the meta device, buffers included + self.assertEqual(model.linear1.weight.device, torch.device("meta")) + self.assertEqual(model.batchnorm.weight.device, torch.device("meta")) + self.assertEqual(model.linear2.weight.device, torch.device("meta")) + self.assertEqual(model.batchnorm.running_mean.device, torch.device("meta")) + + x = torch.randn(2, 3) + output = model(x) + self.assertEqual(output.device, device) + + # Removing hooks loads back the weights in the model. + remove_hook_from_submodules(model) + self.assertEqual(model.linear1.weight.device, torch.device("cpu")) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + def test_attach_align_device_hook_as_cpu_offload_with_weight_map(self): + model = ModelForTest() + + # Everything is on CPU + self.assertEqual(model.linear1.weight.device, torch.device("cpu")) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + # This will move each submodule on different devices + execution_device = 0 if torch.cuda.is_available() else "cpu" + attach_align_device_hook( + model, execution_device=execution_device, offload=True, weights_map=model.state_dict() + ) + + # Parameters have been offloaded, so on the meta device + self.assertEqual(model.linear1.weight.device, torch.device("meta")) + self.assertEqual(model.batchnorm.weight.device, torch.device("meta")) + self.assertEqual(model.linear2.weight.device, torch.device("meta")) + # Buffers are not included in the offload by default, so are on the execution device + device = torch.device(execution_device) + self.assertEqual(model.batchnorm.running_mean.device, device) + + x = torch.randn(2, 3) + output = model(x) + self.assertEqual(output.device, device) + + # Removing hooks loads back the weights in the model. + remove_hook_from_submodules(model) + self.assertEqual(model.linear1.weight.device, torch.device("cpu")) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + # Now test with buffers included in the offload + attach_align_device_hook( + model, + execution_device=execution_device, + offload=True, + weights_map=model.state_dict(), + offload_buffers=True, + ) + + # Parameters have been offloaded, so on the meta device, buffers included + self.assertEqual(model.linear1.weight.device, torch.device("meta")) + self.assertEqual(model.batchnorm.weight.device, torch.device("meta")) + self.assertEqual(model.linear2.weight.device, torch.device("meta")) + self.assertEqual(model.batchnorm.running_mean.device, torch.device("meta")) + + x = torch.randn(2, 3) + output = model(x) + self.assertEqual(output.device, device) + + # Removing hooks loads back the weights in the model. + remove_hook_from_submodules(model) + self.assertEqual(model.linear1.weight.device, torch.device("cpu")) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) diff --git a/testbed/huggingface__accelerate/tests/test_kwargs_handlers.py b/testbed/huggingface__accelerate/tests/test_kwargs_handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..d8c8932345a404018c3a332f4f615f20462a96b9 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_kwargs_handlers.py @@ -0,0 +1,98 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +import sys +import unittest +from dataclasses import dataclass + +import torch + +from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs +from accelerate.state import AcceleratorState +from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu +from accelerate.utils import KwargsHandler + + +@dataclass +class MockClass(KwargsHandler): + a: int = 0 + b: bool = False + c: float = 3.0 + + +class DataLoaderTester(unittest.TestCase): + def test_kwargs_handler(self): + # If no defaults are changed, `to_kwargs` returns an empty dict. + self.assertDictEqual(MockClass().to_kwargs(), {}) + self.assertDictEqual(MockClass(a=2).to_kwargs(), {"a": 2}) + self.assertDictEqual(MockClass(a=2, b=True).to_kwargs(), {"a": 2, "b": True}) + self.assertDictEqual(MockClass(a=2, c=2.25).to_kwargs(), {"a": 2, "c": 2.25}) + + @require_cuda + def test_grad_scaler_kwargs(self): + # If no defaults are changed, `to_kwargs` returns an empty dict. + scaler_handler = GradScalerKwargs(init_scale=1024, growth_factor=2) + AcceleratorState._reset_state() + accelerator = Accelerator(mixed_precision="fp16", kwargs_handlers=[scaler_handler]) + print(accelerator.use_fp16) + scaler = accelerator.scaler + + # Check the kwargs have been applied + self.assertEqual(scaler._init_scale, 1024.0) + self.assertEqual(scaler._growth_factor, 2.0) + + # Check the other values are at the default + self.assertEqual(scaler._backoff_factor, 0.5) + self.assertEqual(scaler._growth_interval, 2000) + self.assertEqual(scaler._enabled, True) + + @require_multi_gpu + def test_ddp_kwargs(self): + distributed_args = f""" + -m torch.distributed.launch + --nproc_per_node={torch.cuda.device_count()} + --use_env + {inspect.getfile(self.__class__)} + """.split() + cmd = [sys.executable] + distributed_args + execute_subprocess_async(cmd, env=os.environ.copy()) + + +if __name__ == "__main__": + ddp_scaler = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) + accelerator = Accelerator(kwargs_handlers=[ddp_scaler]) + model = torch.nn.Linear(100, 200) + model = accelerator.prepare(model) + + # Check the values changed in kwargs + error_msg = "" + observed_bucket_cap_map = model.bucket_bytes_cap // (1024 * 1024) + if observed_bucket_cap_map != 15: + error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" + if model.find_unused_parameters is not True: + error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" + + # Check the values of the defaults + if model.dim != 0: + error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" + if model.broadcast_buffers is not True: + error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" + if model.gradient_as_bucket_view is not False: + error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" + + # Raise error at the end to make sure we don't stop at the first failure. + if len(error_msg) > 0: + raise ValueError(error_msg) diff --git a/testbed/huggingface__accelerate/tests/test_memory_utils.py b/testbed/huggingface__accelerate/tests/test_memory_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..09e3b4e184a56e41796859ff245c846f1f612e5f --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_memory_utils.py @@ -0,0 +1,115 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch +from torch import nn + +from accelerate.test_utils import require_cuda +from accelerate.utils.memory import find_executable_batch_size, release_memory + + +def raise_fake_out_of_memory(): + raise RuntimeError("CUDA out of memory.") + + +class ModelForTest(nn.Module): + def __init__(self): + super().__init__() + self.linear1 = nn.Linear(3, 4) + self.batchnorm = nn.BatchNorm1d(4) + self.linear2 = nn.Linear(4, 5) + + def forward(self, x): + return self.linear2(self.batchnorm(self.linear1(x))) + + +class MemoryTest(unittest.TestCase): + def test_memory_implicit(self): + batch_sizes = [] + + @find_executable_batch_size(starting_batch_size=128) + def mock_training_loop_function(batch_size): + nonlocal batch_sizes + batch_sizes.append(batch_size) + if batch_size != 8: + raise_fake_out_of_memory() + + mock_training_loop_function() + self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8]) + + def test_memory_explicit(self): + batch_sizes = [] + + @find_executable_batch_size(starting_batch_size=128) + def mock_training_loop_function(batch_size, arg1): + nonlocal batch_sizes + batch_sizes.append(batch_size) + if batch_size != 8: + raise_fake_out_of_memory() + return batch_size, arg1 + + bs, arg1 = mock_training_loop_function("hello") + self.assertListEqual(batch_sizes, [128, 64, 32, 16, 8]) + self.assertListEqual([bs, arg1], [8, "hello"]) + + def test_start_zero(self): + @find_executable_batch_size(starting_batch_size=0) + def mock_training_loop_function(batch_size): + pass + + with self.assertRaises(RuntimeError) as cm: + mock_training_loop_function() + self.assertIn("No executable batch size found, reached zero.", cm.exception.args[0]) + + def test_approach_zero(self): + @find_executable_batch_size(starting_batch_size=16) + def mock_training_loop_function(batch_size): + if batch_size > 0: + raise_fake_out_of_memory() + pass + + with self.assertRaises(RuntimeError) as cm: + mock_training_loop_function() + self.assertIn("No executable batch size found, reached zero.", cm.exception.args[0]) + + def test_verbose_guard(self): + @find_executable_batch_size(starting_batch_size=128) + def mock_training_loop_function(batch_size, arg1, arg2): + if batch_size != 8: + raise raise_fake_out_of_memory() + + with self.assertRaises(TypeError) as cm: + mock_training_loop_function(128, "hello", "world") + self.assertIn("Batch size was passed into `f`", cm.exception.args[0]) + self.assertIn("`f(arg1='hello', arg2='world')", cm.exception.args[0]) + + def test_any_other_error(self): + @find_executable_batch_size(starting_batch_size=16) + def mock_training_loop_function(batch_size): + raise ValueError("Oops, we had an error!") + + with self.assertRaises(ValueError) as cm: + mock_training_loop_function() + self.assertIn("Oops, we had an error!", cm.exception.args[0]) + + @require_cuda + def test_release_memory(self): + self.assertEqual(torch.cuda.memory_allocated(), 0) + model = ModelForTest() + model.cuda() + self.assertGreater(torch.cuda.memory_allocated(), 0) + model = release_memory(model) + self.assertEqual(torch.cuda.memory_allocated(), 0) diff --git a/testbed/huggingface__accelerate/tests/test_metrics.py b/testbed/huggingface__accelerate/tests/test_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..9dd0984568a3fb4c5a1996605cbccaa75316ceb4 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_metrics.py @@ -0,0 +1,64 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +import unittest + +import torch + +import accelerate +from accelerate import debug_launcher +from accelerate.test_utils import ( + execute_subprocess_async, + require_cpu, + require_huggingface_suite, + require_multi_gpu, + require_single_gpu, + require_torch_min_version, +) +from accelerate.utils import get_launch_prefix, patch_environment + + +@require_huggingface_suite +@require_torch_min_version(version="1.8.0") +class MetricTester(unittest.TestCase): + def setUp(self): + mod_file = inspect.getfile(accelerate.test_utils) + self.test_file_path = os.path.sep.join( + mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_metrics.py"] + ) + + from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 + + self.test_metrics = test_metrics + + @require_cpu + def test_metric_cpu_noop(self): + debug_launcher(self.test_metrics.main, num_processes=1) + + @require_cpu + def test_metric_cpu_multi(self): + debug_launcher(self.test_metrics.main) + + @require_single_gpu + def test_metric_gpu(self): + self.test_metrics.main() + + @require_multi_gpu + def test_metric_gpu_multi(self): + print(f"Found {torch.cuda.device_count()} devices.") + cmd = get_launch_prefix() + [f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] + with patch_environment(omp_num_threads=1): + execute_subprocess_async(cmd, env=os.environ.copy()) diff --git a/testbed/huggingface__accelerate/tests/test_modeling_utils.py b/testbed/huggingface__accelerate/tests/test_modeling_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f0d9dcde6fed39ae2c970d6860c027809219562a --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_modeling_utils.py @@ -0,0 +1,415 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import tempfile +import unittest + +import torch +import torch.nn as nn + +from accelerate.test_utils import require_cuda, require_multi_gpu +from accelerate.test_utils.testing import require_torch_min_version +from accelerate.utils.modeling import ( + check_device_map, + clean_device_map, + compute_module_sizes, + find_tied_parameters, + get_balanced_memory, + infer_auto_device_map, + load_checkpoint_in_model, + named_module_tensors, + set_module_tensor_to_device, +) + + +class ModelForTest(nn.Module): + def __init__(self): + super().__init__() + self.linear1 = nn.Linear(3, 4) + self.batchnorm = nn.BatchNorm1d(4) + self.linear2 = nn.Linear(4, 5) + + def forward(self, x): + return self.linear2(self.batchnorm(self.linear1(x))) + + +@require_torch_min_version(version="1.9.0") +class ModelingUtilsTester(unittest.TestCase): + def check_set_module_tensor_for_device(self, model, device1, device2): + self.assertEqual(model.linear1.weight.device, torch.device(device1)) + + with self.subTest("Access by submodule and direct name for a parameter"): + set_module_tensor_to_device(model.linear1, "weight", device2) + self.assertEqual(model.linear1.weight.device, torch.device(device2)) + + if torch.device(device2) == torch.device("meta"): + with self.assertRaises(ValueError): + # We need a `value` to set the weight back on device1 + set_module_tensor_to_device(model.linear1, "weight", device1) + + set_module_tensor_to_device(model.linear1, "weight", device1, value=torch.randn(4, 3)) + else: + set_module_tensor_to_device(model.linear1, "weight", device1) + self.assertEqual(model.linear1.weight.device, torch.device(device1)) + + with self.subTest("Access by module and full name for a parameter"): + set_module_tensor_to_device(model, "linear1.weight", device2) + self.assertEqual(model.linear1.weight.device, torch.device(device2)) + + if torch.device(device2) == torch.device("meta"): + with self.assertRaises(ValueError): + # We need a `value` to set the weight back on device1 + set_module_tensor_to_device(model, "linear1.weight", device1) + set_module_tensor_to_device(model, "linear1.weight", device1, value=torch.randn(4, 3)) + else: + set_module_tensor_to_device(model, "linear1.weight", device1) + self.assertEqual(model.linear1.weight.device, torch.device(device1)) + + self.assertEqual(model.batchnorm.running_mean.device, torch.device(device1)) + + with self.subTest("Access by submodule and direct name for a buffer"): + set_module_tensor_to_device(model.batchnorm, "running_mean", device2) + self.assertEqual(model.batchnorm.running_mean.device, torch.device(device2)) + + if torch.device(device2) == torch.device("meta"): + with self.assertRaises(ValueError): + # We need a `value` to set the weight back on device1 + set_module_tensor_to_device(model.batchnorm, "running_mean", device1) + set_module_tensor_to_device(model.batchnorm, "running_mean", device1, value=torch.randn(4)) + else: + set_module_tensor_to_device(model.batchnorm, "running_mean", device1) + self.assertEqual(model.batchnorm.running_mean.device, torch.device(device1)) + + with self.subTest("Access by module and full name for a parameter"): + set_module_tensor_to_device(model, "batchnorm.running_mean", device2) + self.assertEqual(model.batchnorm.running_mean.device, torch.device(device2)) + + if torch.device(device2) == torch.device("meta"): + with self.assertRaises(ValueError): + # We need a `value` to set the weight back on CPU + set_module_tensor_to_device(model, "batchnorm.running_mean", device1) + + set_module_tensor_to_device(model, "batchnorm.running_mean", device1, value=torch.randn(4)) + else: + set_module_tensor_to_device(model, "batchnorm.running_mean", device1) + self.assertEqual(model.batchnorm.running_mean.device, torch.device(device1)) + + def test_set_module_tensor_to_meta_and_cpu(self): + model = ModelForTest() + self.check_set_module_tensor_for_device(model, "cpu", "meta") + + @require_cuda + def test_set_module_tensor_to_cpu_and_gpu(self): + model = ModelForTest() + self.check_set_module_tensor_for_device(model, "cpu", 0) + + @require_cuda + def test_set_module_tensor_to_meta_and_gpu(self): + model = ModelForTest().to(0) + self.check_set_module_tensor_for_device(model, 0, "meta") + + @require_multi_gpu + def test_set_module_tensor_between_gpus(self): + model = ModelForTest().to(0) + self.check_set_module_tensor_for_device(model, 0, 1) + + def test_set_module_tensor_sets_dtype(self): + model = ModelForTest() + set_module_tensor_to_device(model, "linear1.weight", "cpu", value=model.linear1.weight, dtype=torch.float16) + self.assertEqual(model.linear1.weight.dtype, torch.float16) + + def test_named_tensors(self): + model = nn.BatchNorm1d(4) + named_tensors = named_module_tensors(model) + self.assertListEqual( + [name for name, _ in named_tensors], + ["weight", "bias", "running_mean", "running_var", "num_batches_tracked"], + ) + + named_tensors = named_module_tensors(model, include_buffers=False) + self.assertListEqual([name for name, _ in named_tensors], ["weight", "bias"]) + + model = ModelForTest() + named_tensors = named_module_tensors(model) + self.assertListEqual([name for name, _ in named_tensors], []) + + named_tensors = named_module_tensors(model, recurse=True) + self.assertListEqual( + [name for name, _ in named_tensors], + [ + "linear1.weight", + "linear1.bias", + "batchnorm.weight", + "batchnorm.bias", + "linear2.weight", + "linear2.bias", + "batchnorm.running_mean", + "batchnorm.running_var", + "batchnorm.num_batches_tracked", + ], + ) + + named_tensors = named_module_tensors(model, include_buffers=False, recurse=True) + self.assertListEqual( + [name for name, _ in named_tensors], + ["linear1.weight", "linear1.bias", "batchnorm.weight", "batchnorm.bias", "linear2.weight", "linear2.bias"], + ) + + def test_find_tied_parameters(self): + model = ModelForTest() + self.assertDictEqual(find_tied_parameters(model), {}) + model.linear2.weight = model.linear1.weight + self.assertDictEqual(find_tied_parameters(model), {"linear1.weight": "linear2.weight"}) + + def test_compute_module_sizes(self): + model = ModelForTest() + expected_sizes = {"": 236, "linear1": 64, "linear1.weight": 48, "linear1.bias": 16} + expected_sizes.update({"linear2": 100, "linear2.weight": 80, "linear2.bias": 20}) + expected_sizes.update({"batchnorm": 72, "batchnorm.weight": 16, "batchnorm.bias": 16}) + expected_sizes.update( + {"batchnorm.running_mean": 16, "batchnorm.running_var": 16, "batchnorm.num_batches_tracked": 8} + ) + + module_sizes = compute_module_sizes(model) + self.assertDictEqual(module_sizes, expected_sizes) + + model.half() + expected_sizes = {k: s // 2 for k, s in expected_sizes.items()} + # This one is not converted to half. + expected_sizes["batchnorm.num_batches_tracked"] = 8 + # This impacts batchnorm and total + expected_sizes["batchnorm"] += 4 + expected_sizes[""] += 4 + + module_sizes = compute_module_sizes(model) + self.assertDictEqual(module_sizes, expected_sizes) + + def test_check_device_map(self): + model = ModelForTest() + check_device_map(model, {"": 0}) + with self.assertRaises(ValueError): + check_device_map(model, {"linear1": 0, "linear2": 1}) + + check_device_map(model, {"linear1": 0, "linear2": 1, "batchnorm": 1}) + + def shard_test_model(self, model, tmp_dir): + module_index = { + "linear1": "checkpoint_part1.bin", + "batchnorm": "checkpoint_part2.bin", + "linear2": "checkpoint_part3.bin", + } + index = {} + for name, _ in model.state_dict().items(): + module = name.split(".")[0] + index[name] = module_index[module] + + with open(os.path.join(tmp_dir, "weight_map.index.json"), "w") as f: + json.dump(index, f) + + for module, fname in module_index.items(): + state_dict = {k: v for k, v in model.state_dict().items() if k.startswith(module)} + full_fname = os.path.join(tmp_dir, fname) + torch.save(state_dict, full_fname) + + def test_load_checkpoint_in_model(self): + # Check with whole checkpoint + model = ModelForTest() + with tempfile.TemporaryDirectory() as tmp_dir: + fname = os.path.join(tmp_dir, "pt_model.bin") + torch.save(model.state_dict(), fname) + load_checkpoint_in_model(model, fname) + + # Check with sharded index + model = ModelForTest() + with tempfile.TemporaryDirectory() as tmp_dir: + self.shard_test_model(model, tmp_dir) + index_file = os.path.join(tmp_dir, "weight_map.index.json") + load_checkpoint_in_model(model, index_file) + + # Check with sharded checkpoint + model = ModelForTest() + with tempfile.TemporaryDirectory() as tmp_dir: + self.shard_test_model(model, tmp_dir) + load_checkpoint_in_model(model, tmp_dir) + + @require_cuda + def test_load_checkpoint_in_model_one_gpu(self): + device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": "cpu"} + + # Check with whole checkpoint + model = ModelForTest() + with tempfile.TemporaryDirectory() as tmp_dir: + fname = os.path.join(tmp_dir, "pt_model.bin") + torch.save(model.state_dict(), fname) + load_checkpoint_in_model(model, fname, device_map=device_map) + self.assertEqual(model.linear1.weight.device, torch.device(0)) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + # Check with sharded index + model = ModelForTest() + with tempfile.TemporaryDirectory() as tmp_dir: + self.shard_test_model(model, tmp_dir) + index_file = os.path.join(tmp_dir, "weight_map.index.json") + load_checkpoint_in_model(model, index_file, device_map=device_map) + + self.assertEqual(model.linear1.weight.device, torch.device(0)) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + # Check with sharded checkpoint folder + model = ModelForTest() + with tempfile.TemporaryDirectory() as tmp_dir: + self.shard_test_model(model, tmp_dir) + load_checkpoint_in_model(model, tmp_dir, device_map=device_map) + + self.assertEqual(model.linear1.weight.device, torch.device(0)) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + @require_cuda + def test_load_checkpoint_in_model_disk_offload(self): + device_map = {"linear1": "cpu", "batchnorm": "disk", "linear2": "cpu"} + + model = ModelForTest() + with tempfile.TemporaryDirectory() as tmp_dir: + fname = os.path.join(tmp_dir, "pt_model.bin") + torch.save(model.state_dict(), fname) + load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir) + self.assertEqual(model.linear1.weight.device, torch.device("cpu")) + self.assertEqual(model.batchnorm.weight.device, torch.device("meta")) + # Buffers are not offloaded by default + self.assertEqual(model.batchnorm.running_mean.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + model = ModelForTest() + with tempfile.TemporaryDirectory() as tmp_dir: + fname = os.path.join(tmp_dir, "pt_model.bin") + torch.save(model.state_dict(), fname) + load_checkpoint_in_model(model, fname, device_map=device_map, offload_folder=tmp_dir, offload_buffers=True) + self.assertEqual(model.linear1.weight.device, torch.device("cpu")) + self.assertEqual(model.batchnorm.weight.device, torch.device("meta")) + self.assertEqual(model.batchnorm.running_mean.device, torch.device("meta")) + self.assertEqual(model.linear2.weight.device, torch.device("cpu")) + + @require_multi_gpu + def test_load_checkpoint_in_model_two_gpu(self): + device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": 1} + + # Check with whole checkpoint + model = ModelForTest() + with tempfile.TemporaryDirectory() as tmp_dir: + fname = os.path.join(tmp_dir, "pt_model.bin") + torch.save(model.state_dict(), fname) + load_checkpoint_in_model(model, fname, device_map=device_map) + self.assertEqual(model.linear1.weight.device, torch.device(0)) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device(1)) + + # Check with sharded index + model = ModelForTest() + with tempfile.TemporaryDirectory() as tmp_dir: + self.shard_test_model(model, tmp_dir) + index_file = os.path.join(tmp_dir, "weight_map.index.json") + load_checkpoint_in_model(model, index_file, device_map=device_map) + + self.assertEqual(model.linear1.weight.device, torch.device(0)) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device(1)) + + # Check with sharded checkpoint + model = ModelForTest() + with tempfile.TemporaryDirectory() as tmp_dir: + self.shard_test_model(model, tmp_dir) + load_checkpoint_in_model(model, tmp_dir, device_map=device_map) + + self.assertEqual(model.linear1.weight.device, torch.device(0)) + self.assertEqual(model.batchnorm.weight.device, torch.device("cpu")) + self.assertEqual(model.linear2.weight.device, torch.device(1)) + + def test_clean_device_map(self): + # Regroup everything if all is on the same device + self.assertDictEqual(clean_device_map({"a": 0, "b": 0, "c": 0}), {"": 0}) + # Regroups children of level 1 on the same device + self.assertDictEqual( + clean_device_map({"a.x": 0, "a.y": 0, "b.x": 1, "b.y": 1, "c": 1}), {"a": 0, "b": 1, "c": 1} + ) + # Regroups children of level 2 on the same device + self.assertDictEqual( + clean_device_map({"a.x": 0, "a.y": 0, "b.x.0": 1, "b.x.1": 1, "b.y.0": 2, "b.y.1": 2, "c": 2}), + {"a": 0, "b.x": 1, "b.y": 2, "c": 2}, + ) + + def test_infer_auto_device_map(self): + model = ModelForTest() + # model has size 236: linear1 64, batchnorm 72, linear2 100 + + device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 200}) + # only linear1 fits on device 0 as we keep memory available for the maximum layer in case of offload + self.assertDictEqual(device_map, {"linear1": 0, "batchnorm": 1, "linear2": 1}) + + device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 172, 2: 200}) + # On device 1, we don't care about keeping size available for the max layer, so even if there is just the + # size available for batchnorm + linear2, they fit here. + self.assertDictEqual(device_map, {"linear1": 0, "batchnorm": 1, "linear2": 1}) + + model.linear1.weight = model.linear2.weight + device_map = infer_auto_device_map(model, max_memory={0: 200, 1: 200}) + # By tying weights, the whole model fits on device 0 + self.assertDictEqual(device_map, {"": 0}) + + # When splitting a bigger model, the split is done at the layer level + model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest()) + device_map = infer_auto_device_map(model, max_memory={0: 500, 1: 500}) + self.assertDictEqual(device_map, {"0": 0, "1.linear1": 0, "1.batchnorm": 0, "1.linear2": 1, "2": 1}) + + # With no_split_module_classes, it's done at that module level + model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest()) + device_map = infer_auto_device_map( + model, max_memory={0: 500, 1: 500}, no_split_module_classes=["ModelForTest"] + ) + self.assertDictEqual(device_map, {"0": 0, "1": 1, "2": 1}) + + # Now if we have weights tied inside submodules, tied weights are on the same device. + model = nn.Sequential(ModelForTest(), ModelForTest(), ModelForTest()) + layer0 = getattr(model, "0") + layer2 = getattr(model, "2") + layer0.linear2.weight = layer2.linear2.weight + device_map = infer_auto_device_map(model, max_memory={0: 400, 1: 500}) + expected = {"0": 0, "2.linear2": 0, "1": 1, "2.linear1": 1, "2.batchnorm": 1} + self.assertDictEqual(device_map, expected) + + @require_cuda + def test_get_balanced_memory(self): + model = ModelForTest() + # model has size 236: linear1 64, batchnorm 72, linear2 100 + max_memory = get_balanced_memory(model, max_memory={0: 200, 1: 200}) + self.assertDictEqual({0: 200, 1: 200}, max_memory) + + max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 300}) + self.assertDictEqual({0: 215, 1: 300}, max_memory) + + # Last device always get max memory to give more buffer and avoid accidental CPU offload + max_memory = get_balanced_memory(model, max_memory={0: 300, 1: 500}) + self.assertDictEqual({0: 215, 1: 500}, max_memory) + + # Last device always get max memory to give more buffer, even if CPU is provided + max_memory = get_balanced_memory(model, max_memory={0: 300, "cpu": 1000}) + self.assertDictEqual({0: 300, "cpu": 1000}, max_memory) + + # If we set a device to 0, it's not counted. + max_memory = get_balanced_memory(model, max_memory={0: 0, 1: 300, 2: 300}) + self.assertDictEqual({0: 0, 1: 215, 2: 300}, max_memory) diff --git a/testbed/huggingface__accelerate/tests/test_multigpu.py b/testbed/huggingface__accelerate/tests/test_multigpu.py new file mode 100644 index 0000000000000000000000000000000000000000..54a5bca75cc71f4ea3a2a8bef45119224802b40c --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_multigpu.py @@ -0,0 +1,86 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +import unittest + +import torch + +import accelerate +from accelerate import Accelerator +from accelerate.test_utils import execute_subprocess_async, require_multi_gpu +from accelerate.utils import get_launch_prefix, patch_environment + + +class MultiGPUTester(unittest.TestCase): + def setUp(self): + mod_file = inspect.getfile(accelerate.test_utils) + self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) + self.data_loop_file_path = os.path.sep.join( + mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"] + ) + + @require_multi_gpu + def test_multi_gpu(self): + print(f"Found {torch.cuda.device_count()} devices.") + cmd = get_launch_prefix() + [self.test_file_path] + with patch_environment(omp_num_threads=1): + execute_subprocess_async(cmd, env=os.environ.copy()) + + @require_multi_gpu + def test_pad_across_processes(self): + cmd = get_launch_prefix() + [inspect.getfile(self.__class__)] + with patch_environment(omp_num_threads=1): + execute_subprocess_async(cmd, env=os.environ.copy()) + + @require_multi_gpu + def test_distributed_data_loop(self): + """ + This TestCase checks the behaviour that occurs during distributed training or evaluation, + when the batch size does not evenly divide the dataset size. + """ + print(f"Found {torch.cuda.device_count()} devices, using 2 devices only") + cmd = get_launch_prefix() + [f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] + with patch_environment(omp_num_threads=1, cuda_visible_devices="0,1"): + execute_subprocess_async(cmd, env=os.environ.copy()) + + +if __name__ == "__main__": + accelerator = Accelerator() + shape = (accelerator.state.process_index + 2, 10) + tensor = torch.randint(0, 10, shape).to(accelerator.device) + + error_msg = "" + + tensor1 = accelerator.pad_across_processes(tensor) + if tensor1.shape[0] != accelerator.state.num_processes + 1: + error_msg += f"Found shape {tensor1.shape} but should have {accelerator.state.num_processes + 1} at dim 0." + if not torch.equal(tensor1[: accelerator.state.process_index + 2], tensor): + error_msg += "Tensors have different values." + if not torch.all(tensor1[accelerator.state.process_index + 2 :] == 0): + error_msg += "Padding was not done with the right value (0)." + + tensor2 = accelerator.pad_across_processes(tensor, pad_first=True) + if tensor2.shape[0] != accelerator.state.num_processes + 1: + error_msg += f"Found shape {tensor2.shape} but should have {accelerator.state.num_processes + 1} at dim 0." + index = accelerator.state.num_processes - accelerator.state.process_index - 1 + if not torch.equal(tensor2[index:], tensor): + error_msg += "Tensors have different values." + if not torch.all(tensor2[:index] == 0): + error_msg += "Padding was not done with the right value (0)." + + # Raise error at the end to make sure we don't stop at the first failure. + if len(error_msg) > 0: + raise ValueError(error_msg) diff --git a/testbed/huggingface__accelerate/tests/test_offload.py b/testbed/huggingface__accelerate/tests/test_offload.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ebd36eedc056e98e6e43d94ec19a0c7a67299a --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_offload.py @@ -0,0 +1,117 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest +from tempfile import TemporaryDirectory + +import torch +import torch.nn as nn + +from accelerate.utils import ( + OffloadedWeightsLoader, + extract_submodules_state_dict, + is_torch_version, + load_offloaded_weight, + offload_state_dict, + offload_weight, +) + + +class ModelForTest(nn.Module): + def __init__(self): + super().__init__() + self.linear1 = nn.Linear(3, 4) + self.batchnorm = nn.BatchNorm1d(4) + self.linear2 = nn.Linear(4, 5) + + def forward(self, x): + return self.linear2(self.batchnorm(self.linear1(x))) + + +class OffloadTester(unittest.TestCase): + def test_offload_state_dict(self): + model = ModelForTest() + with TemporaryDirectory() as tmp_dir: + offload_state_dict(tmp_dir, model.state_dict()) + index_file = os.path.join(tmp_dir, "index.json") + self.assertTrue(os.path.isfile(index_file)) + # TODO: add tests on what is inside the index + + for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: + weight_file = os.path.join(tmp_dir, f"{key}.dat") + self.assertTrue(os.path.isfile(weight_file)) + # TODO: add tests on the fact weights are properly loaded + + def test_offload_weight(self): + dtypes = [torch.float16, torch.float32] + if is_torch_version(">=", "1.10"): + dtypes.append(torch.bfloat16) + + for dtype in dtypes: + weight = torch.randn(2, 3, dtype=dtype) + with TemporaryDirectory() as tmp_dir: + index = offload_weight(weight, "weight", tmp_dir, {}) + weight_file = os.path.join(tmp_dir, "weight.dat") + self.assertTrue(os.path.isfile(weight_file)) + self.assertDictEqual(index, {"weight": {"shape": [2, 3], "dtype": str(dtype).split(".")[1]}}) + + new_weight = load_offloaded_weight(weight_file, index["weight"]) + self.assertTrue(torch.equal(weight, new_weight)) + + def test_offload_weights_loader(self): + model = ModelForTest() + state_dict = model.state_dict() + cpu_part = {k: v for k, v in state_dict.items() if "linear2" not in k} + disk_part = {k: v for k, v in state_dict.items() if "linear2" in k} + + with TemporaryDirectory() as tmp_dir: + offload_state_dict(tmp_dir, disk_part) + weight_map = OffloadedWeightsLoader(state_dict=cpu_part, save_folder=tmp_dir) + + # Every key is there with the right value + self.assertEqual(sorted(weight_map), sorted(state_dict.keys())) + for key, param in state_dict.items(): + self.assertTrue(torch.allclose(param, weight_map[key])) + + cpu_part = {k: v for k, v in state_dict.items() if "weight" in k} + disk_part = {k: v for k, v in state_dict.items() if "weight" not in k} + + with TemporaryDirectory() as tmp_dir: + offload_state_dict(tmp_dir, disk_part) + weight_map = OffloadedWeightsLoader(state_dict=cpu_part, save_folder=tmp_dir) + + # Every key is there with the right value + self.assertEqual(sorted(weight_map), sorted(state_dict.keys())) + for key, param in state_dict.items(): + self.assertTrue(torch.allclose(param, weight_map[key])) + + with TemporaryDirectory() as tmp_dir: + offload_state_dict(tmp_dir, state_dict) + # Duplicates are removed + weight_map = OffloadedWeightsLoader(state_dict=cpu_part, save_folder=tmp_dir) + + # Every key is there with the right value + self.assertEqual(sorted(weight_map), sorted(state_dict.keys())) + for key, param in state_dict.items(): + self.assertTrue(torch.allclose(param, weight_map[key])) + + def test_extract_submodules_state_dict(self): + state_dict = {"a.1": 0, "a.10": 1, "a.2": 2} + extracted = extract_submodules_state_dict(state_dict, ["a.1", "a.2"]) + self.assertDictEqual(extracted, {"a.1": 0, "a.2": 2}) + + state_dict = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} + extracted = extract_submodules_state_dict(state_dict, ["a.1", "a.2"]) + self.assertDictEqual(extracted, {"a.1.a": 0, "a.2.a": 2}) diff --git a/testbed/huggingface__accelerate/tests/test_optimizer.py b/testbed/huggingface__accelerate/tests/test_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..15a095bf7983ab03360773874d216a41062ac707 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_optimizer.py @@ -0,0 +1,36 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pickle +import unittest + +import torch + +from accelerate import Accelerator +from accelerate.state import AcceleratorState +from accelerate.test_utils import require_cpu + + +@require_cpu +class OptimizerTester(unittest.TestCase): + def test_accelerated_optimizer_pickling(self): + model = torch.nn.Linear(10, 10) + optimizer = torch.optim.SGD(model.parameters(), 0.1) + accelerator = Accelerator() + optimizer = accelerator.prepare(optimizer) + try: + pickle.loads(pickle.dumps(optimizer)) + except Exception as e: + self.fail(f"Accelerated optimizer pickling failed with {e}") + AcceleratorState._reset_state() diff --git a/testbed/huggingface__accelerate/tests/test_sagemaker.py b/testbed/huggingface__accelerate/tests/test_sagemaker.py new file mode 100644 index 0000000000000000000000000000000000000000..2824493d6c28a8af4d7614de50e8a533e6c9983e --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_sagemaker.py @@ -0,0 +1,62 @@ +import unittest +from dataclasses import dataclass + +import pytest +from accelerate.commands.config.config_args import SageMakerConfig +from accelerate.commands.launch import _convert_nargs_to_dict +from accelerate.utils import ComputeEnvironment + + +@dataclass +class MockLaunchConfig(SageMakerConfig): + compute_environment = ComputeEnvironment.AMAZON_SAGEMAKER + fp16 = True + ec2_instance_type = "ml.p3.2xlarge" + iam_role_name = "accelerate_sagemaker_execution_role" + profile = "hf-sm" + region = "us-east-1" + num_machines = 1 + base_job_name = "accelerate-sagemaker-1" + pytorch_version = "1.6" + transformers_version = "4.4" + training_script = "train.py" + success_training_script_args = [ + "--model_name_or_path", + "bert", + "--do_train", + "False", + "--epochs", + "3", + "--learning_rate", + "5e-5", + "--max_steps", + "50.5", + ] + fail_training_script_args = [ + "--model_name_or_path", + "bert", + "--do_train", + "--do_test", + "False", + "--do_predict", + "--epochs", + "3", + "--learning_rate", + "5e-5", + "--max_steps", + "50.5", + ] + + +class SageMakerLaunch(unittest.TestCase): + def test_args_convert(self): + # If no defaults are changed, `to_kwargs` returns an empty dict. + converted_args = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args) + assert isinstance(converted_args["model_name_or_path"], str) + assert isinstance(converted_args["do_train"], bool) + assert isinstance(converted_args["epochs"], int) + assert isinstance(converted_args["learning_rate"], float) + assert isinstance(converted_args["max_steps"], float) + + with pytest.raises(ValueError): + _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) diff --git a/testbed/huggingface__accelerate/tests/test_samples/MRPC/dev.csv b/testbed/huggingface__accelerate/tests/test_samples/MRPC/dev.csv new file mode 100644 index 0000000000000000000000000000000000000000..96beccda96d7e164e4484e037a52fb338cc22180 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_samples/MRPC/dev.csv @@ -0,0 +1,7 @@ +label,sentence1,sentence2 +equivalent,He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .,""" The foodservice pie business does not fit our long-term growth strategy ." +not_equivalent,Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war .,"His wife said he was "" 100 percent behind George Bush "" and looked forward to using his years of training in the war ." +not_equivalent,"The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat .","The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent ." +equivalent,The AFL-CIO is waiting until October to decide if it will endorse a candidate .,The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries . +not_equivalent,No dates have been set for the civil or the criminal trial .,"No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty ." +equivalent,Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed .,It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status . diff --git a/testbed/huggingface__accelerate/tests/test_samples/MRPC/train.csv b/testbed/huggingface__accelerate/tests/test_samples/MRPC/train.csv new file mode 100644 index 0000000000000000000000000000000000000000..96beccda96d7e164e4484e037a52fb338cc22180 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_samples/MRPC/train.csv @@ -0,0 +1,7 @@ +label,sentence1,sentence2 +equivalent,He said the foodservice pie business doesn 't fit the company 's long-term growth strategy .,""" The foodservice pie business does not fit our long-term growth strategy ." +not_equivalent,Magnarelli said Racicot hated the Iraqi regime and looked forward to using his long years of training in the war .,"His wife said he was "" 100 percent behind George Bush "" and looked forward to using his years of training in the war ." +not_equivalent,"The dollar was at 116.92 yen against the yen , flat on the session , and at 1.2891 against the Swiss franc , also flat .","The dollar was at 116.78 yen JPY = , virtually flat on the session , and at 1.2871 against the Swiss franc CHF = , down 0.1 percent ." +equivalent,The AFL-CIO is waiting until October to decide if it will endorse a candidate .,The AFL-CIO announced Wednesday that it will decide in October whether to endorse a candidate before the primaries . +not_equivalent,No dates have been set for the civil or the criminal trial .,"No dates have been set for the criminal or civil cases , but Shanley has pleaded not guilty ." +equivalent,Wal-Mart said it would check all of its million-plus domestic workers to ensure they were legally employed .,It has also said it would review all of its domestic employees more than 1 million to ensure they have legal status . diff --git a/testbed/huggingface__accelerate/tests/test_samples/test_command_file.sh b/testbed/huggingface__accelerate/tests/test_samples/test_command_file.sh new file mode 100644 index 0000000000000000000000000000000000000000..592a7d5324e281f0bfc5490e4ae303cae1fe7df1 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_samples/test_command_file.sh @@ -0,0 +1,2 @@ +echo "hello world" +echo "this is a second command" \ No newline at end of file diff --git a/testbed/huggingface__accelerate/tests/test_scheduler.py b/testbed/huggingface__accelerate/tests/test_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..c1ef18f1e6645a6c364fbc895d859947a4ce5fb4 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_scheduler.py @@ -0,0 +1,96 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from functools import partial + +import torch + +from accelerate import Accelerator, debug_launcher +from accelerate.test_utils import require_cpu + + +def one_cycle_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False): + accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches) + model = torch.nn.Linear(2, 4) + optimizer = torch.optim.AdamW(model.parameters(), lr=1.0) + scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1) + model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) + + # Optimizer has stepped + scheduler.step() + if step_scheduler_with_optimizer or (num_processes == 1): + assert ( + scheduler.scheduler.last_epoch == num_processes + ), f"Last Epoch ({scheduler.scheduler.last_epoch}) != Num Processes ({num_processes})" + else: + assert ( + scheduler.scheduler.last_epoch != num_processes + ), f"Last Epoch ({scheduler.scheduler.last_epoch}) == Num Processes ({num_processes})" + + +def lambda_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False): + accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches) + model = torch.nn.Linear(2, 4) + optimizer = torch.optim.AdamW(model.parameters(), lr=1.0) + scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda n: 1 - n / 10) + model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) + + # Optimizer has stepped + optimizer._is_overflow = False + scheduler.step() + expected_lr = 1 - (num_processes if (step_scheduler_with_optimizer and not split_batches) else 1) / 10 + assert ( + scheduler.get_last_lr()[0] == expected_lr + ), f"Wrong lr found at first step, expected {expected_lr}, got {scheduler.get_last_lr()[0]}" + + # Optimizer has not stepped + optimizer._is_overflow = True + scheduler.step() + if not step_scheduler_with_optimizer: + expected_lr = 1 - 2 / 10 + assert ( + scheduler.get_last_lr()[0] == expected_lr + ), f"Wrong lr found at second step, expected {expected_lr}, got {scheduler.get_last_lr()[0]}" + + +@require_cpu +class SchedulerTester(unittest.TestCase): + def test_lambda_scheduler_steps_with_optimizer_single_process(self): + debug_launcher(partial(lambda_test, num_processes=1), num_processes=1) + debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1) + + def test_one_cycle_scheduler_steps_with_optimizer_single_process(self): + debug_launcher(partial(one_cycle_test, num_processes=1), num_processes=1) + debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1) + + def test_lambda_scheduler_not_step_with_optimizer_single_process(self): + debug_launcher(partial(lambda_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1) + + def test_one_cycle_scheduler_not_step_with_optimizer_single_process(self): + debug_launcher(partial(one_cycle_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1) + + def test_lambda_scheduler_steps_with_optimizer_multiprocess(self): + debug_launcher(lambda_test) + debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1) + + def test_one_cycle_scheduler_steps_with_optimizer_multiprocess(self): + debug_launcher(one_cycle_test) + debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1) + + def test_lambda_scheduler_not_step_with_optimizer_multiprocess(self): + debug_launcher(partial(lambda_test, step_scheduler_with_optimizer=False)) + + def test_one_cycle_scheduler_not_step_with_optimizer_multiprocess(self): + debug_launcher(partial(one_cycle_test, step_scheduler_with_optimizer=False)) diff --git a/testbed/huggingface__accelerate/tests/test_state_checkpointing.py b/testbed/huggingface__accelerate/tests/test_state_checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..dd2f6a98b8ce260ef22b55d48da506454383ba92 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_state_checkpointing.py @@ -0,0 +1,235 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import random +import tempfile +import unittest + +import torch +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +from accelerate import Accelerator +from accelerate.utils import ProjectConfiguration, set_seed + + +logger = logging.getLogger(__name__) + + +def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2): + "Generates a tuple of dummy DataLoaders to test with" + + def get_dataset(n_batches): + x = torch.randn(batch_size * n_batches, 1) + return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1)) + + train_dataset = get_dataset(n_train_batches) + valid_dataset = get_dataset(n_valid_batches) + train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) + valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4) + return (train_dataloader, valid_dataloader) + + +def train(num_epochs, model, dataloader, optimizer, accelerator, scheduler=None): + "Trains for `num_epochs`" + rands = [] + for epoch in range(num_epochs): + # Train quickly + model.train() + for batch in dataloader: + x, y = batch + outputs = model(x) + loss = torch.nn.functional.mse_loss(outputs, y) + accelerator.backward(loss) + optimizer.step() + optimizer.zero_grad() + rands.append(random.random()) # Introduce some randomness + if scheduler is not None: + scheduler.step() + return rands + + +class DummyModel(nn.Module): + "Simple model to do y=mx+b" + + def __init__(self): + super().__init__() + self.a = nn.Parameter(torch.randn(1)) + self.b = nn.Parameter(torch.randn(1)) + + def forward(self, x): + return x * self.a + self.b + + +class CheckpointTest(unittest.TestCase): + def test_with_save_limit(self): + with tempfile.TemporaryDirectory() as tmpdir: + set_seed(42) + model = DummyModel() + optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) + train_dataloader, valid_dataloader = dummy_dataloaders() + project_config = ProjectConfiguration(total_limit=1, project_dir=tmpdir, automatic_checkpoint_naming=True) + # Train baseline + accelerator = Accelerator(project_config=project_config) + model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( + model, optimizer, train_dataloader, valid_dataloader + ) + # Save initial + accelerator.save_state() + + # Save second state + accelerator.save_state() + self.assertEqual(len(os.listdir(accelerator.project_dir)), 1) + + def test_can_resume_training_with_folder(self): + with tempfile.TemporaryDirectory() as tmpdir: + set_seed(42) + model = DummyModel() + optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) + train_dataloader, valid_dataloader = dummy_dataloaders() + # Train baseline + accelerator = Accelerator() + model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( + model, optimizer, train_dataloader, valid_dataloader + ) + # Save initial + initial = os.path.join(tmpdir, "initial") + accelerator.save_state(initial) + (a, b) = model.a.item(), model.b.item() + opt_state = optimizer.state_dict() + ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) + (a1, b1) = model.a.item(), model.b.item() + opt_state1 = optimizer.state_dict() + + # Train partially + set_seed(42) + model = DummyModel() + optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) + train_dataloader, valid_dataloader = dummy_dataloaders() + accelerator = Accelerator() + model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( + model, optimizer, train_dataloader, valid_dataloader + ) + accelerator.load_state(initial) + (a2, b2) = model.a.item(), model.b.item() + opt_state2 = optimizer.state_dict() + self.assertEqual(a, a2) + self.assertEqual(b, b2) + self.assertEqual(opt_state, opt_state2) + + test_rands = train(2, model, train_dataloader, optimizer, accelerator) + # Save everything + checkpoint = os.path.join(tmpdir, "checkpoint") + accelerator.save_state(checkpoint) + + # Load everything back in and make sure all states work + accelerator.load_state(checkpoint) + test_rands += train(1, model, train_dataloader, optimizer, accelerator) + (a3, b3) = model.a.item(), model.b.item() + opt_state3 = optimizer.state_dict() + self.assertEqual(a1, a3) + self.assertEqual(b1, b3) + self.assertEqual(opt_state1, opt_state3) + self.assertEqual(ground_truth_rands, test_rands) + + def test_can_resume_training(self): + with tempfile.TemporaryDirectory() as tmpdir: + set_seed(42) + model = DummyModel() + optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) + train_dataloader, valid_dataloader = dummy_dataloaders() + project_config = ProjectConfiguration(automatic_checkpoint_naming=True) + + # Train baseline + accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) + model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( + model, optimizer, train_dataloader, valid_dataloader + ) + # Save initial + accelerator.save_state() + (a, b) = model.a.item(), model.b.item() + opt_state = optimizer.state_dict() + ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) + (a1, b1) = model.a.item(), model.b.item() + opt_state1 = optimizer.state_dict() + + # Train partially + set_seed(42) + model = DummyModel() + optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) + train_dataloader, valid_dataloader = dummy_dataloaders() + project_config = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=True) + accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) + model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( + model, optimizer, train_dataloader, valid_dataloader + ) + accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) + (a2, b2) = model.a.item(), model.b.item() + opt_state2 = optimizer.state_dict() + self.assertEqual(a, a2) + self.assertEqual(b, b2) + self.assertEqual(opt_state, opt_state2) + + test_rands = train(2, model, train_dataloader, optimizer, accelerator) + # Save everything + accelerator.save_state() + + # Load everything back in and make sure all states work + accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_1")) + test_rands += train(1, model, train_dataloader, optimizer, accelerator) + (a3, b3) = model.a.item(), model.b.item() + opt_state3 = optimizer.state_dict() + self.assertEqual(a1, a3) + self.assertEqual(b1, b3) + self.assertEqual(opt_state1, opt_state3) + self.assertEqual(ground_truth_rands, test_rands) + + def test_invalid_registration(self): + t = torch.tensor([1, 2, 3]) + t1 = torch.tensor([2, 3, 4]) + net = DummyModel() + opt = torch.optim.Adam(net.parameters()) + accelerator = Accelerator() + with self.assertRaises(ValueError) as ve: + accelerator.register_for_checkpointing(t, t1, net, opt) + message = str(ve.exception) + self.assertTrue("Item at index 0" in message) + self.assertTrue("Item at index 1" in message) + self.assertFalse("Item at index 2" in message) + self.assertFalse("Item at index 3" in message) + + def test_with_scheduler(self): + with tempfile.TemporaryDirectory() as tmpdir: + set_seed(42) + model = DummyModel() + optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) + scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) + train_dataloader, valid_dataloader = dummy_dataloaders() + project_config = ProjectConfiguration(automatic_checkpoint_naming=True) + # Train baseline + accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) + model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare( + model, optimizer, train_dataloader, valid_dataloader, scheduler + ) + # Save initial + accelerator.save_state() + scheduler_state = scheduler.state_dict() + train(3, model, train_dataloader, optimizer, accelerator, scheduler) + self.assertNotEqual(scheduler_state, scheduler.state_dict()) + + # Load everything back in and make sure all states work + accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) + self.assertEqual(scheduler_state, scheduler.state_dict()) diff --git a/testbed/huggingface__accelerate/tests/test_tpu.py b/testbed/huggingface__accelerate/tests/test_tpu.py new file mode 100644 index 0000000000000000000000000000000000000000..bffa8b8b4ff212512be4616c6157feb6fab39b6a --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_tpu.py @@ -0,0 +1,38 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import os +import sys +import unittest + +import accelerate +from accelerate.test_utils import execute_subprocess_async, require_tpu + + +class MultiTPUTester(unittest.TestCase): + def setUp(self): + mod_file = inspect.getfile(accelerate.test_utils) + self.test_file_path = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"]) + self.test_dir = os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1]) + + @require_tpu + def test_tpu(self): + distributed_args = f""" + {self.test_dir}/xla_spawn.py + --num_cores 8 + {self.test_file_path} + """.split() + cmd = [sys.executable] + distributed_args + execute_subprocess_async(cmd, env=os.environ.copy()) diff --git a/testbed/huggingface__accelerate/tests/test_tracking.py b/testbed/huggingface__accelerate/tests/test_tracking.py new file mode 100644 index 0000000000000000000000000000000000000000..fec99e1c66005199b04087c507d8d58e2eaec315 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_tracking.py @@ -0,0 +1,292 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +import json +import logging +import os +import re +import subprocess +import tempfile +import unittest +import zipfile +from pathlib import Path +from typing import Optional +from unittest import mock + +# We use TF to parse the logs +from accelerate import Accelerator +from accelerate.test_utils.testing import ( + MockingTestCase, + TempDirTestCase, + require_comet_ml, + require_tensorboard, + require_wandb, + skip, +) +from accelerate.tracking import CometMLTracker, GeneralTracker +from accelerate.utils import is_comet_ml_available + + +if is_comet_ml_available(): + from comet_ml import OfflineExperiment + +logger = logging.getLogger(__name__) + + +@require_tensorboard +class TensorBoardTrackingTest(unittest.TestCase): + def test_init_trackers(self): + project_name = "test_project_with_config" + with tempfile.TemporaryDirectory() as dirpath: + accelerator = Accelerator(log_with="tensorboard", logging_dir=dirpath) + config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"} + accelerator.init_trackers(project_name, config) + accelerator.end_training() + for child in Path(f"{dirpath}/{project_name}").glob("*/**"): + log = list(filter(lambda x: x.is_file(), child.iterdir()))[0] + self.assertNotEqual(str(log), "") + + def test_log(self): + project_name = "test_project_with_log" + with tempfile.TemporaryDirectory() as dirpath: + accelerator = Accelerator(log_with="tensorboard", logging_dir=dirpath) + accelerator.init_trackers(project_name) + values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"} + accelerator.log(values, step=0) + accelerator.end_training() + # Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord + # Names are randomly generated each time + log = list(filter(lambda x: x.is_file(), Path(f"{dirpath}/{project_name}").iterdir()))[0] + self.assertNotEqual(str(log), "") + + def test_project_dir(self): + with self.assertRaisesRegex(ValueError, "Logging with `tensorboard` requires a `logging_dir`"): + _ = Accelerator(log_with="tensorboard") + with tempfile.TemporaryDirectory() as dirpath: + _ = Accelerator(log_with="tensorboard", project_dir=dirpath) + with tempfile.TemporaryDirectory() as dirpath: + _ = Accelerator(log_with="tensorboard", logging_dir=dirpath) + + +@require_wandb +@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"}) +class WandBTrackingTest(TempDirTestCase, MockingTestCase): + def setUp(self): + super().setUp() + # wandb let's us override where logs are stored to via the WANDB_DIR env var + self.add_mocks(mock.patch.dict(os.environ, {"WANDB_DIR": self.tmpdir})) + + @staticmethod + def parse_log(log: str, section: str, record: bool = True): + """ + Parses wandb log for `section` and returns a dictionary of + all items in that section. Section names are based on the + output of `wandb sync --view --verbose` and items starting + with "Record" in that result + """ + # Big thanks to the W&B team for helping us parse their logs + pattern = rf"{section} ([\S\s]*?)\n\n" + if record: + pattern = rf"Record: {pattern}" + cleaned_record = re.findall(pattern, log)[0] + # A config + if section == "config" or section == "history": + cleaned_record = re.findall(r'"([a-zA-Z0-9_.,]+)', cleaned_record) + return {key: val for key, val in zip(cleaned_record[0::2], cleaned_record[1::2])} + # Everything else + else: + return dict(re.findall(r'(\w+): "([^\s]+)"', cleaned_record)) + + @skip + def test_wandb(self): + project_name = "test_project_with_config" + accelerator = Accelerator(log_with="wandb") + config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"} + kwargs = {"wandb": {"tags": ["my_tag"]}} + accelerator.init_trackers(project_name, config, kwargs) + values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"} + accelerator.log(values, step=0) + accelerator.end_training() + # The latest offline log is stored at wandb/latest-run/*.wandb + for child in Path(f"{self.tmpdir}/wandb/latest-run").glob("*"): + if child.is_file() and child.suffix == ".wandb": + content = subprocess.check_output( + ["wandb", "sync", "--view", "--verbose", str(child)], env=os.environ.copy() + ).decode("utf8", "ignore") + break + + # Check HPS through careful parsing and cleaning + logged_items = self.parse_log(content, "config") + self.assertEqual(logged_items["num_iterations"], "12") + self.assertEqual(logged_items["learning_rate"], "0.01") + self.assertEqual(logged_items["some_boolean"], "false") + self.assertEqual(logged_items["some_string"], "some_value") + self.assertEqual(logged_items["some_string"], "some_value") + + # Run tags + logged_items = self.parse_log(content, "run", False) + self.assertEqual(logged_items["tags"], "my_tag") + + # Actual logging + logged_items = self.parse_log(content, "history") + self.assertEqual(logged_items["total_loss"], "0.1") + self.assertEqual(logged_items["iteration"], "1") + self.assertEqual(logged_items["my_text"], "some_value") + self.assertEqual(logged_items["_step"], "0") + + +# Comet has a special `OfflineExperiment` we need to use for testing +def offline_init(self, run_name: str, tmpdir: str): + self.run_name = run_name + self.writer = OfflineExperiment(project_name=run_name, offline_directory=tmpdir) + logger.info(f"Initialized offline CometML project {self.run_name}") + logger.info("Make sure to log any initial configurations with `self.store_init_configuration` before training!") + + +@require_comet_ml +@mock.patch.object(CometMLTracker, "__init__", offline_init) +class CometMLTest(unittest.TestCase): + @staticmethod + def get_value_from_key(log_list, key: str, is_param: bool = False): + "Extracts `key` from Comet `log`" + for log in log_list: + j = json.loads(log)["payload"] + if is_param and "param" in j.keys(): + if j["param"]["paramName"] == key: + return j["param"]["paramValue"] + if "log_other" in j.keys(): + if j["log_other"]["key"] == key: + return j["log_other"]["val"] + if "metric" in j.keys(): + if j["metric"]["metricName"] == key: + return j["metric"]["metricValue"] + + def test_init_trackers(self): + with tempfile.TemporaryDirectory() as d: + tracker = CometMLTracker("test_project_with_config", d) + accelerator = Accelerator(log_with=tracker) + config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"} + accelerator.init_trackers(None, config) + accelerator.end_training() + log = os.listdir(d)[0] # Comet is nice, it's just a zip file here + # We parse the raw logs + p = os.path.join(d, log) + archive = zipfile.ZipFile(p, "r") + log = archive.open("messages.json").read().decode("utf-8") + list_of_json = log.split("\n")[:-1] + self.assertEqual(self.get_value_from_key(list_of_json, "num_iterations", True), 12) + self.assertEqual(self.get_value_from_key(list_of_json, "learning_rate", True), 0.01) + self.assertEqual(self.get_value_from_key(list_of_json, "some_boolean", True), False) + self.assertEqual(self.get_value_from_key(list_of_json, "some_string", True), "some_value") + + def test_log(self): + with tempfile.TemporaryDirectory() as d: + tracker = CometMLTracker("test_project_with_config", d) + accelerator = Accelerator(log_with=tracker) + accelerator.init_trackers(None) + values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"} + accelerator.log(values, step=0) + accelerator.end_training() + log = os.listdir(d)[0] # Comet is nice, it's just a zip file here + # We parse the raw logs + p = os.path.join(d, log) + archive = zipfile.ZipFile(p, "r") + log = archive.open("messages.json").read().decode("utf-8") + list_of_json = log.split("\n")[:-1] + self.assertEqual(self.get_value_from_key(list_of_json, "curr_step", True), 0) + self.assertEqual(self.get_value_from_key(list_of_json, "total_loss"), 0.1) + self.assertEqual(self.get_value_from_key(list_of_json, "iteration"), 1) + self.assertEqual(self.get_value_from_key(list_of_json, "my_text"), "some_value") + + +class MyCustomTracker(GeneralTracker): + "Basic tracker that writes to a csv for testing" + _col_names = [ + "total_loss", + "iteration", + "my_text", + "learning_rate", + "num_iterations", + "some_boolean", + "some_string", + ] + + name = "my_custom_tracker" + requires_logging_directory = False + + def __init__(self, dir: str): + self.f = open(f"{dir}/log.csv", "w+") + self.writer = csv.DictWriter(self.f, fieldnames=self._col_names) + self.writer.writeheader() + + @property + def tracker(self): + return self.writer + + def store_init_configuration(self, values: dict): + logger.info("Call init") + self.writer.writerow(values) + + def log(self, values: dict, step: Optional[int]): + logger.info("Call log") + self.writer.writerow(values) + + def finish(self): + self.f.close() + + +class CustomTrackerTestCase(unittest.TestCase): + def test_init_trackers(self): + with tempfile.TemporaryDirectory() as d: + tracker = MyCustomTracker(d) + accelerator = Accelerator(log_with=tracker) + config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"} + accelerator.init_trackers("Some name", config) + accelerator.end_training() + with open(f"{d}/log.csv", "r") as f: + data = csv.DictReader(f) + data = next(data) + truth = { + "total_loss": "", + "iteration": "", + "my_text": "", + "learning_rate": "0.01", + "num_iterations": "12", + "some_boolean": "False", + "some_string": "some_value", + } + self.assertDictEqual(data, truth) + + def test_log(self): + with tempfile.TemporaryDirectory() as d: + tracker = MyCustomTracker(d) + accelerator = Accelerator(log_with=tracker) + accelerator.init_trackers("Some name") + values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"} + accelerator.log(values, step=0) + accelerator.end_training() + with open(f"{d}/log.csv", "r") as f: + data = csv.DictReader(f) + data = next(data) + truth = { + "total_loss": "0.1", + "iteration": "1", + "my_text": "some_value", + "learning_rate": "", + "num_iterations": "", + "some_boolean": "", + "some_string": "", + } + self.assertDictEqual(data, truth) diff --git a/testbed/huggingface__accelerate/tests/test_utils.py b/testbed/huggingface__accelerate/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7c7629a1a36acb6d8579a8e88c0db2b790daf6cb --- /dev/null +++ b/testbed/huggingface__accelerate/tests/test_utils.py @@ -0,0 +1,103 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import pickle +import unittest +from collections import UserDict, namedtuple + +import torch + +from accelerate.test_utils.testing import require_cuda +from accelerate.test_utils.training import RegressionModel +from accelerate.utils import ( + convert_outputs_to_fp32, + extract_model_from_parallel, + find_device, + patch_environment, + send_to_device, +) + + +ExampleNamedTuple = namedtuple("ExampleNamedTuple", "a b c") + + +class UtilsTester(unittest.TestCase): + def test_send_to_device(self): + tensor = torch.randn(5, 2) + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + + result1 = send_to_device(tensor, device) + self.assertTrue(torch.equal(result1.cpu(), tensor)) + + result2 = send_to_device((tensor, [tensor, tensor], 1), device) + self.assertIsInstance(result2, tuple) + self.assertTrue(torch.equal(result2[0].cpu(), tensor)) + self.assertIsInstance(result2[1], list) + self.assertTrue(torch.equal(result2[1][0].cpu(), tensor)) + self.assertTrue(torch.equal(result2[1][1].cpu(), tensor)) + self.assertEqual(result2[2], 1) + + result2 = send_to_device({"a": tensor, "b": [tensor, tensor], "c": 1}, device) + self.assertIsInstance(result2, dict) + self.assertTrue(torch.equal(result2["a"].cpu(), tensor)) + self.assertIsInstance(result2["b"], list) + self.assertTrue(torch.equal(result2["b"][0].cpu(), tensor)) + self.assertTrue(torch.equal(result2["b"][1].cpu(), tensor)) + self.assertEqual(result2["c"], 1) + + result3 = send_to_device(ExampleNamedTuple(a=tensor, b=[tensor, tensor], c=1), device) + self.assertIsInstance(result3, ExampleNamedTuple) + self.assertTrue(torch.equal(result3.a.cpu(), tensor)) + self.assertIsInstance(result3.b, list) + self.assertTrue(torch.equal(result3.b[0].cpu(), tensor)) + self.assertTrue(torch.equal(result3.b[1].cpu(), tensor)) + self.assertEqual(result3.c, 1) + + result4 = send_to_device(UserDict({"a": tensor, "b": [tensor, tensor], "c": 1}), device) + self.assertIsInstance(result4, UserDict) + self.assertTrue(torch.equal(result4["a"].cpu(), tensor)) + self.assertIsInstance(result4["b"], list) + self.assertTrue(torch.equal(result4["b"][0].cpu(), tensor)) + self.assertTrue(torch.equal(result4["b"][1].cpu(), tensor)) + self.assertEqual(result4["c"], 1) + + def test_patch_environment(self): + with patch_environment(aa=1, BB=2): + self.assertEqual(os.environ.get("AA"), "1") + self.assertEqual(os.environ.get("BB"), "2") + + self.assertNotIn("AA", os.environ) + self.assertNotIn("BB", os.environ) + + def test_can_undo_convert_outputs(self): + model = RegressionModel() + model._original_forward = model.forward + model.forward = convert_outputs_to_fp32(model.forward) + model = extract_model_from_parallel(model) + _ = pickle.dumps(model) + + @require_cuda + def test_can_undo_fp16_conversion(self): + model = RegressionModel() + model._original_forward = model.forward + model.forward = torch.cuda.amp.autocast(dtype=torch.float16)(model.forward) + model.forward = convert_outputs_to_fp32(model.forward) + model = extract_model_from_parallel(model) + _ = pickle.dumps(model) + + def test_find_device(self): + self.assertEqual(find_device([1, "a", torch.tensor([1, 2, 3])]), torch.device("cpu")) + self.assertEqual(find_device({"a": 1, "b": torch.tensor([1, 2, 3])}), torch.device("cpu")) + self.assertIsNone(find_device([1, "a"])) diff --git a/testbed/huggingface__accelerate/tests/xla_spawn.py b/testbed/huggingface__accelerate/tests/xla_spawn.py new file mode 100644 index 0000000000000000000000000000000000000000..1a07af29c3384a382b67b49ada93c9c0fcdc4d90 --- /dev/null +++ b/testbed/huggingface__accelerate/tests/xla_spawn.py @@ -0,0 +1,85 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A simple launcher script for TPU training + +Inspired by https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py + +:: + >>> python xla_spawn.py --num_cores=NUM_CORES_YOU_HAVE + YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other + arguments of your training script) + +""" + + +import importlib +import sys +from argparse import REMAINDER, ArgumentParser +from pathlib import Path + +import torch_xla.distributed.xla_multiprocessing as xmp + + +def parse_args(): + """ + Helper function parsing the command line options + @retval ArgumentParser + """ + parser = ArgumentParser( + description=( + "PyTorch TPU distributed training launch " + "helper utility that will spawn up " + "multiple distributed processes" + ) + ) + + # Optional arguments for the launch helper + parser.add_argument("--num_cores", type=int, default=1, help="Number of TPU cores to use (1 or 8).") + + # positional + parser.add_argument( + "training_script", + type=str, + help=( + "The full path to the single TPU training " + "program/script to be launched in parallel, " + "followed by all the arguments for the " + "training script" + ), + ) + + # rest from the training program + parser.add_argument("training_script_args", nargs=REMAINDER) + + return parser.parse_args() + + +def main(): + args = parse_args() + + # Import training_script as a module. + script_fpath = Path(args.training_script) + sys.path.append(str(script_fpath.parent.resolve())) + mod_name = script_fpath.stem + mod = importlib.import_module(mod_name) + + # Patch sys.argv + sys.argv = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores)] + xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__accelerate/utils/log_reports.py b/testbed/huggingface__accelerate/utils/log_reports.py new file mode 100644 index 0000000000000000000000000000000000000000..7f790dad3f78000abddeeac64f00ca7c37f3cfdf --- /dev/null +++ b/testbed/huggingface__accelerate/utils/log_reports.py @@ -0,0 +1,33 @@ +import json +from pathlib import Path + +failed = [] +passed = [] + +group_info = [] + +total_num_failed = 0 +for log in Path().glob("*.log"): + section_num_failed = 0 + with open(log, "r") as f: + for line in f: + line = json.loads(line) + if line.get("nodeid", "") != "": + test = line["nodeid"] + if line.get("duration", None) is not None: + duration = f'{line["duration"]:.4f}' + if line.get("outcome", "") == "failed": + section_num_failed += 1 + failed.append([test, duration, log.name.split('_')[0]]) + else: + passed.append([test, duration, log.name.split('_')[0]]) + group_info.append([str(log), section_num_failed]) + +if len(failed) > 0: + result = "## Failed Tests:\n" + failed_table = '| Test Location | Test Class | Test Name | PyTorch Version |\n|---|---|---|---|\n| ' + for test in failed: + failed_table += ' | '.join(test[0].split("::")) + failed_table += f" | {test[2]} |" + result += failed_table + print(result) \ No newline at end of file diff --git a/testbed/huggingface__accelerate/utils/stale.py b/testbed/huggingface__accelerate/utils/stale.py new file mode 100644 index 0000000000000000000000000000000000000000..1d8f9020be0786eb1ad4e0a3b06c8f45b4188f9a --- /dev/null +++ b/testbed/huggingface__accelerate/utils/stale.py @@ -0,0 +1,66 @@ +# Copyright 2022 The HuggingFace Team, the AllenNLP library authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Script to close stale issue. Taken in part from the AllenNLP repository. +https://github.com/allenai/allennlp. +""" +from datetime import datetime as dt +import os + +from github import Github + + +LABELS_TO_EXEMPT = [ + "good first issue", + "feature request", + "wip", +] + + +def main(): + g = Github(os.environ["GITHUB_TOKEN"]) + repo = g.get_repo("huggingface/accelerate") + open_issues = repo.get_issues(state="open") + + for issue in open_issues: + comments = sorted([comment for comment in issue.get_comments()], key=lambda i: i.created_at, reverse=True) + last_comment = comments[0] if len(comments) > 0 else None + current_time = dt.utcnow() + days_since_updated = (current_time - issue.updated_at).days + days_since_creation = (current_time - issue.created_at).days + if ( + last_comment is not None and last_comment.user.login == "github-actions[bot]" + and days_since_updated > 7 + and days_since_creation >= 30 + and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) + ): + # Close issue since it has been 7 days of inactivity since bot mention. + issue.edit(state="closed") + elif ( + days_since_updated > 23 + and days_since_creation >= 30 + and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) + ): + # Add stale comment + issue.create_comment( + "This issue has been automatically marked as stale because it has not had " + "recent activity. If you think this still needs to be addressed " + "please comment on this thread.\n\nPlease note that issues that do not follow the " + "[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) " + "are likely to be ignored." + ) + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__datasets/.dvc/.gitignore b/testbed/huggingface__datasets/.dvc/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..528f30c71c687de473bbb506c071e902beba6cd9 --- /dev/null +++ b/testbed/huggingface__datasets/.dvc/.gitignore @@ -0,0 +1,3 @@ +/config.local +/tmp +/cache diff --git a/testbed/huggingface__datasets/.dvc/config b/testbed/huggingface__datasets/.dvc/config new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/.dvc/plots/confusion.json b/testbed/huggingface__datasets/.dvc/plots/confusion.json new file mode 100644 index 0000000000000000000000000000000000000000..0d9a3336542c4a9219297b878a241e5cae4b5d1f --- /dev/null +++ b/testbed/huggingface__datasets/.dvc/plots/confusion.json @@ -0,0 +1,30 @@ +{ + "$schema": "https://vega.github.io/schema/vega-lite/v4.json", + "data": { + "values": "" + }, + "title": "", + "mark": "rect", + "encoding": { + "x": { + "field": "", + "type": "nominal", + "sort": "ascending", + "title": "" + }, + "y": { + "field": "", + "type": "nominal", + "sort": "ascending", + "title": "" + }, + "color": { + "aggregate": "count", + "type": "quantitative" + }, + "facet": { + "field": "rev", + "type": "nominal" + } + } +} diff --git a/testbed/huggingface__datasets/.dvc/plots/default.json b/testbed/huggingface__datasets/.dvc/plots/default.json new file mode 100644 index 0000000000000000000000000000000000000000..d00782a82919f89fdfd63f2b4be97a5c3c71389d --- /dev/null +++ b/testbed/huggingface__datasets/.dvc/plots/default.json @@ -0,0 +1,29 @@ +{ + "$schema": "https://vega.github.io/schema/vega-lite/v4.json", + "data": { + "values": "" + }, + "title": "", + "mark": { + "type": "line" + }, + "encoding": { + "x": { + "field": "", + "type": "quantitative", + "title": "" + }, + "y": { + "field": "", + "type": "quantitative", + "title": "", + "scale": { + "zero": false + } + }, + "color": { + "field": "rev", + "type": "nominal" + } + } +} diff --git a/testbed/huggingface__datasets/.dvc/plots/scatter.json b/testbed/huggingface__datasets/.dvc/plots/scatter.json new file mode 100644 index 0000000000000000000000000000000000000000..90165d4cf67864c94992b83c6c8f027f89fe41c4 --- /dev/null +++ b/testbed/huggingface__datasets/.dvc/plots/scatter.json @@ -0,0 +1,27 @@ +{ + "$schema": "https://vega.github.io/schema/vega-lite/v4.json", + "data": { + "values": "" + }, + "title": "", + "mark": "point", + "encoding": { + "x": { + "field": "", + "type": "quantitative", + "title": "" + }, + "y": { + "field": "", + "type": "quantitative", + "title": "", + "scale": { + "zero": false + } + }, + "color": { + "field": "rev", + "type": "nominal" + } + } +} diff --git a/testbed/huggingface__datasets/.dvc/plots/smooth.json b/testbed/huggingface__datasets/.dvc/plots/smooth.json new file mode 100644 index 0000000000000000000000000000000000000000..d497ce75e9e5375733781bd3c3b8b936b9bdec0b --- /dev/null +++ b/testbed/huggingface__datasets/.dvc/plots/smooth.json @@ -0,0 +1,39 @@ +{ + "$schema": "https://vega.github.io/schema/vega-lite/v4.json", + "data": { + "values": "" + }, + "title": "", + "mark": { + "type": "line" + }, + "encoding": { + "x": { + "field": "", + "type": "quantitative", + "title": "" + }, + "y": { + "field": "", + "type": "quantitative", + "title": "", + "scale": { + "zero": false + } + }, + "color": { + "field": "rev", + "type": "nominal" + } + }, + "transform": [ + { + "loess": "", + "on": "", + "groupby": [ + "rev" + ], + "bandwidth": 0.3 + } + ] +} diff --git a/testbed/huggingface__datasets/.github/ISSUE_TEMPLATE/bug-report.yml b/testbed/huggingface__datasets/.github/ISSUE_TEMPLATE/bug-report.yml new file mode 100644 index 0000000000000000000000000000000000000000..85590fd09ca7f627a0473a0c3335b93f41d70d93 --- /dev/null +++ b/testbed/huggingface__datasets/.github/ISSUE_TEMPLATE/bug-report.yml @@ -0,0 +1,45 @@ +name: Bug report +description: Create a report to help reproduce and fix the bug +body: + - type: textarea + id: description + attributes: + label: Describe the bug + description: A clear and concise description of what the bug is + validations: + required: true + + - type: textarea + id: reproduction + attributes: + label: Steps to reproduce the bug + description: | + Please provide a code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet. + If you have code snippets, error messages, stack traces please provide them here as well. + Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting + Do not use screenshots, as they are hard to read and (more importantly) don't allow others to copy-and-paste your code. + placeholder: | + Steps to reproduce the behavior: + + 1. + 2. + 3. + validations: + required: true + + - type: textarea + id: expected-behavior + validations: + required: true + attributes: + label: Expected behavior + description: A clear and concise description of the expected results. + + - type: textarea + id: environment-info + attributes: + label: Environment info + description: Please share your environemnt info with us. You can run the command `datasets-cli env` and copy-paste its output below. + placeholder: datasets version, platform, python version, ... + validations: + required: true diff --git a/testbed/huggingface__datasets/.github/ISSUE_TEMPLATE/config.yml b/testbed/huggingface__datasets/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..2d3ae9f1e290a1730412f271b7cc901af7c96401 --- /dev/null +++ b/testbed/huggingface__datasets/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,7 @@ +contact_links: + - name: Datasets on the Hugging Face Hub + url: https://huggingface.co/datasets + about: Please use the "Community" tab of the dataset on the Hugging Face Hub to open a discussion or a pull request + - name: Forum + url: https://discuss.huggingface.co/c/datasets/10 + about: Please ask and answer questions here, and engage with other community members diff --git a/testbed/huggingface__datasets/.github/ISSUE_TEMPLATE/feature-request.yml b/testbed/huggingface__datasets/.github/ISSUE_TEMPLATE/feature-request.yml new file mode 100644 index 0000000000000000000000000000000000000000..83e8714f76a6e64afed5b009d7d96ec38d075c24 --- /dev/null +++ b/testbed/huggingface__datasets/.github/ISSUE_TEMPLATE/feature-request.yml @@ -0,0 +1,29 @@ +name: Feature request +description: Suggest an idea for this project +labels: ["enhancement"] +body: + - type: textarea + id: feature-request + attributes: + label: Feature request + description: A clear and concise description of the feature proposal. + validations: + required: true + + - type: textarea + id: motivation + validations: + required: true + attributes: + label: Motivation + description: | + Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too. + + - type: textarea + id: contribution + validations: + required: true + attributes: + label: Your contribution + description: | + Is there any way that you could help, e.g. by submitting a PR? Make sure to read the CONTRIBUTING.MD [readme](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md). diff --git a/testbed/huggingface__datasets/.github/conda/build.sh b/testbed/huggingface__datasets/.github/conda/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..a6609066d90c9754bd83da5f77a7c017e19608d7 --- /dev/null +++ b/testbed/huggingface__datasets/.github/conda/build.sh @@ -0,0 +1 @@ +$PYTHON setup.py install --single-version-externally-managed --record=record.txt diff --git a/testbed/huggingface__datasets/.github/conda/meta.yaml b/testbed/huggingface__datasets/.github/conda/meta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b5645a1355ce0b46b281579417cfa34d04c8032f --- /dev/null +++ b/testbed/huggingface__datasets/.github/conda/meta.yaml @@ -0,0 +1,56 @@ +{% set name = "datasets" %} + +package: + name: "{{ name|lower }}" + version: "{{ DATASETS_VERSION }}" + +source: + path: ../../ + +build: + noarch: python + +requirements: + host: + - python + - pip + - numpy >=1.17 + - pyarrow >=8.0.0 + - python-xxhash + - dill + - pandas + - requests >=2.19.0 + - tqdm >=4.62.1 + - dataclasses + - multiprocess + - fsspec + - huggingface_hub >=0.19.4,<1.0.0 + - packaging + - aiohttp + run: + - python + - pip + - numpy >=1.17 + - pyarrow >=8.0.0 + - pyarrow-hotfix + - python-xxhash + - dill + - pandas + - requests >=2.19.0 + - tqdm >=4.62.1 + - dataclasses + - multiprocess + - fsspec + - huggingface_hub >=0.19.4,<1.0.0 + - packaging + - aiohttp + +test: + imports: + - datasets + +about: + home: https://huggingface.co + license: Apache License 2.0 + license_file: LICENSE + summary: "🤗 The largest hub of ready-to-use NLP datasets for ML models with fast, easy-to-use and efficient data manipulation tools" diff --git a/testbed/huggingface__datasets/.github/workflows/benchmarks.yaml b/testbed/huggingface__datasets/.github/workflows/benchmarks.yaml new file mode 100644 index 0000000000000000000000000000000000000000..beddaf05e0a765c3a1e00a04fd104ab151471520 --- /dev/null +++ b/testbed/huggingface__datasets/.github/workflows/benchmarks.yaml @@ -0,0 +1,49 @@ +name: benchmarks +on: [push] +jobs: + run: + runs-on: [ubuntu-latest] + container: ghcr.io/iterative/cml + steps: + - uses: actions/checkout@v3 + - name: cml_run + env: + CML_DRIVER_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # See https://github.com/actions/checkout/issues/760 + git config --global --add safe.directory /__w/datasets/datasets + + # Your ML workflow goes here + + pip install --upgrade pip + pip install setuptools wheel + pip install -e .[benchmarks] + + # pyarrow==8.0.0 + pip install pyarrow==8.0.0 + + dvc repro --force + + git fetch --prune + dvc metrics diff --json main > report.json + + python ./benchmarks/format.py report.json report.md + + echo "
\nShow benchmarks\n\nPyArrow==8.0.0\n" > final_report.md + cat report.md >> final_report.md + + # pyarrow + pip install pyarrow --upgrade + + dvc repro --force + + git fetch --prune + dvc metrics diff --json main > report.json + + python ./benchmarks/format.py report.json report.md + + echo "\nPyArrow==latest\n" >> final_report.md + cat report.md >> final_report.md + echo "\n
" >> final_report.md + + cml comment create final_report.md diff --git a/testbed/huggingface__datasets/.github/workflows/build_documentation.yml b/testbed/huggingface__datasets/.github/workflows/build_documentation.yml new file mode 100644 index 0000000000000000000000000000000000000000..9d15aae065ccb6b6a3eae53f928b007de380dde4 --- /dev/null +++ b/testbed/huggingface__datasets/.github/workflows/build_documentation.yml @@ -0,0 +1,20 @@ +name: Build documentation + +on: + push: + branches: + - main + - doc-builder* + - v*-release + - v*-patch + +jobs: + build: + uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main + with: + commit_sha: ${{ github.sha }} + package: datasets + notebook_folder: datasets_doc + secrets: + token: ${{ secrets.HUGGINGFACE_PUSH }} + hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }} diff --git a/testbed/huggingface__datasets/.github/workflows/build_pr_documentation.yml b/testbed/huggingface__datasets/.github/workflows/build_pr_documentation.yml new file mode 100644 index 0000000000000000000000000000000000000000..90952fef0b42901b9988e023109290000c910a7c --- /dev/null +++ b/testbed/huggingface__datasets/.github/workflows/build_pr_documentation.yml @@ -0,0 +1,16 @@ +name: Build PR Documentation + +on: + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + build: + uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main + with: + commit_sha: ${{ github.event.pull_request.head.sha }} + pr_number: ${{ github.event.number }} + package: datasets diff --git a/testbed/huggingface__datasets/.github/workflows/ci.yml b/testbed/huggingface__datasets/.github/workflows/ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..d76ae7209ceb70af9235c36953afa57fd3b35b04 --- /dev/null +++ b/testbed/huggingface__datasets/.github/workflows/ci.yml @@ -0,0 +1,95 @@ +name: CI + +on: + pull_request: + branches: + - main + push: + branches: + - main + - ci-* + +env: + HF_ALLOW_CODE_EVAL: 1 + +jobs: + + check_code_quality: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.8" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install .[quality] + - name: Check quality + run: | + ruff check tests src benchmarks metrics utils setup.py # linter + ruff format --check tests src benchmarks metrics utils setup.py # formatter + + test: + needs: check_code_quality + strategy: + matrix: + test: ['unit', 'integration'] + os: [ubuntu-latest, windows-latest] + deps_versions: [deps-latest, deps-minimum] + continue-on-error: ${{ matrix.test == 'integration' }} + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Set up Python 3.8 + uses: actions/setup-python@v4 + with: + python-version: "3.8" + - name: Upgrade pip + run: python -m pip install --upgrade pip + - name: Pin setuptools-scm + if: ${{ matrix.os == 'ubuntu-latest' }} + run: echo "installing pinned version of setuptools-scm to fix seqeval installation on 3.7" && pip install "setuptools-scm==6.4.2" + - name: Install dependencies + run: | + pip install .[tests,metrics-tests] + pip install -r additional-tests-requirements.txt --no-deps + python -m spacy download en_core_web_sm + python -m spacy download fr_core_news_sm + - name: Install dependencies (latest versions) + if: ${{ matrix.deps_versions == 'deps-latest' }} + run: pip install --upgrade pyarrow huggingface-hub dill + - name: Install dependencies (minimum versions) + if: ${{ matrix.deps_versions != 'deps-latest' }} + run: pip install pyarrow==8.0.0 huggingface-hub==0.19.4 transformers dill==0.3.1.1 + - name: Test with pytest + run: | + python -m pytest -rfExX -m ${{ matrix.test }} -n 2 --dist loadfile -sv ./tests/ + + test_py310: + needs: check_code_quality + strategy: + matrix: + test: ['unit'] + os: [ubuntu-latest, windows-latest] + deps_versions: [deps-latest] + continue-on-error: false + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Set up Python 3.10 + uses: actions/setup-python@v4 + with: + python-version: "3.10" + - name: Upgrade pip + run: python -m pip install --upgrade pip + - name: Install dependencies + run: pip install .[tests] + - name: Test with pytest + run: | + python -m pytest -rfExX -m ${{ matrix.test }} -n 2 --dist loadfile -sv ./tests/ diff --git a/testbed/huggingface__datasets/.github/workflows/delete_doc_comment.yml b/testbed/huggingface__datasets/.github/workflows/delete_doc_comment.yml new file mode 100644 index 0000000000000000000000000000000000000000..8604019d76eb507fb41c6446ab8875452337e40a --- /dev/null +++ b/testbed/huggingface__datasets/.github/workflows/delete_doc_comment.yml @@ -0,0 +1,14 @@ +name: Delete doc comment + +on: + workflow_run: + workflows: ["Delete doc comment trigger"] + types: + - completed + + +jobs: + delete: + uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main + secrets: + comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }} \ No newline at end of file diff --git a/testbed/huggingface__datasets/.github/workflows/delete_doc_comment_trigger.yml b/testbed/huggingface__datasets/.github/workflows/delete_doc_comment_trigger.yml new file mode 100644 index 0000000000000000000000000000000000000000..5e39e253974df54fd284cf44bb1e52afbefecded --- /dev/null +++ b/testbed/huggingface__datasets/.github/workflows/delete_doc_comment_trigger.yml @@ -0,0 +1,12 @@ +name: Delete doc comment trigger + +on: + pull_request: + types: [ closed ] + + +jobs: + delete: + uses: huggingface/doc-builder/.github/workflows/delete_doc_comment_trigger.yml@main + with: + pr_number: ${{ github.event.number }} \ No newline at end of file diff --git a/testbed/huggingface__datasets/.github/workflows/release-conda.yml b/testbed/huggingface__datasets/.github/workflows/release-conda.yml new file mode 100644 index 0000000000000000000000000000000000000000..260f5d4a588f111a06abc48dc062992297ff6925 --- /dev/null +++ b/testbed/huggingface__datasets/.github/workflows/release-conda.yml @@ -0,0 +1,45 @@ +name: Release - Conda + +on: + push: + tags: + - "[0-9]+.[0-9]+.[0-9]+*" + +env: + ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_API_TOKEN }} + +jobs: + build_and_package: + runs-on: ubuntu-latest + defaults: + run: + shell: bash -l {0} + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Install miniconda + uses: conda-incubator/setup-miniconda@v2 + with: + auto-update-conda: true + auto-activate-base: false + activate-environment: "build-datasets" + python-version: 3.8 + channels: huggingface,conda-forge + + - name: Setup conda env + run: | + conda install -c defaults anaconda-client conda-build + + - name: Extract version + run: echo "DATASETS_VERSION=`python setup.py --version`" >> $GITHUB_ENV + + - name: Build conda packages + run: | + conda info + conda-build .github/conda + + - name: Upload to Anaconda + run: | + anaconda upload `conda-build .github/conda --output -c conda-forge` --force diff --git a/testbed/huggingface__datasets/.github/workflows/self-assign.yaml b/testbed/huggingface__datasets/.github/workflows/self-assign.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4c1f423ad6ddd0da1daf36f64196fca8c1545173 --- /dev/null +++ b/testbed/huggingface__datasets/.github/workflows/self-assign.yaml @@ -0,0 +1,16 @@ +name: Self-assign +on: + issue_comment: + types: created +jobs: + one: + runs-on: ubuntu-latest + if: >- + (github.event.comment.body == '#take' || + github.event.comment.body == '#self-assign') + && !github.event.issue.assignee + steps: + - run: | + echo "Assigning issue ${{ github.event.issue.number }} to ${{ github.event.comment.user.login }}" + curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -d '{"assignees": ["${{ github.event.comment.user.login }}"]}' https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.issue.number }}/assignees + curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -X "DELETE" https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.issue.number }}/labels/help%20wanted diff --git a/testbed/huggingface__datasets/.github/workflows/upload_pr_documentation.yml b/testbed/huggingface__datasets/.github/workflows/upload_pr_documentation.yml new file mode 100644 index 0000000000000000000000000000000000000000..9977120fa58c6a28d6493a3aee44363bc489f9ca --- /dev/null +++ b/testbed/huggingface__datasets/.github/workflows/upload_pr_documentation.yml @@ -0,0 +1,16 @@ +name: Upload PR Documentation + +on: + workflow_run: + workflows: ["Build PR Documentation"] + types: + - completed + +jobs: + build: + uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main + with: + package_name: datasets + secrets: + hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }} + comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }} \ No newline at end of file diff --git a/testbed/huggingface__datasets/benchmarks/benchmark_getitem_100B.py b/testbed/huggingface__datasets/benchmarks/benchmark_getitem_100B.py new file mode 100644 index 0000000000000000000000000000000000000000..a8e4f0dd762804e818b801b12d12653647ee42cb --- /dev/null +++ b/testbed/huggingface__datasets/benchmarks/benchmark_getitem_100B.py @@ -0,0 +1,78 @@ +import json +import os +from dataclasses import dataclass + +import numpy as np +import pyarrow as pa + +import datasets +from utils import get_duration + + +SPEED_TEST_N_EXAMPLES = 100_000_000_000 +SPEED_TEST_CHUNK_SIZE = 10_000 + +RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) +RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) + + +def generate_100B_dataset(num_examples: int, chunk_size: int) -> datasets.Dataset: + table = pa.Table.from_pydict({"col": [0] * chunk_size}) + table = pa.concat_tables([table] * (num_examples // chunk_size)) + return datasets.Dataset(table, fingerprint="table_100B") + + +@dataclass +class RandIter: + low: int + high: int + size: int + seed: int + + def __post_init__(self): + rng = np.random.default_rng(self.seed) + self._sampled_values = rng.integers(low=self.low, high=self.high, size=self.size).tolist() + + def __iter__(self): + return iter(self._sampled_values) + + def __len__(self): + return self.size + + +@get_duration +def get_first_row(dataset: datasets.Dataset): + _ = dataset[0] + + +@get_duration +def get_last_row(dataset: datasets.Dataset): + _ = dataset[-1] + + +@get_duration +def get_batch_of_1024_rows(dataset: datasets.Dataset): + _ = dataset[range(len(dataset) // 2, len(dataset) // 2 + 1024)] + + +@get_duration +def get_batch_of_1024_random_rows(dataset: datasets.Dataset): + _ = dataset[RandIter(0, len(dataset), 1024, seed=42)] + + +def benchmark_table_100B(): + times = {"num examples": SPEED_TEST_N_EXAMPLES} + functions = (get_first_row, get_last_row, get_batch_of_1024_rows, get_batch_of_1024_random_rows) + print("generating dataset") + dataset = generate_100B_dataset(num_examples=SPEED_TEST_N_EXAMPLES, chunk_size=SPEED_TEST_CHUNK_SIZE) + print("Functions") + for func in functions: + print(func.__name__) + times[func.__name__] = func(dataset) + + with open(RESULTS_FILE_PATH, "wb") as f: + f.write(json.dumps(times).encode("utf-8")) + + +if __name__ == "__main__": # useful to run the profiler + benchmark_table_100B() diff --git a/testbed/huggingface__datasets/benchmarks/benchmark_indices_mapping.py b/testbed/huggingface__datasets/benchmarks/benchmark_indices_mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..d35d9fb184cfb6caa45a81aa45036a2c3893a766 --- /dev/null +++ b/testbed/huggingface__datasets/benchmarks/benchmark_indices_mapping.py @@ -0,0 +1,60 @@ +import json +import os +import tempfile + +import datasets +from utils import generate_example_dataset, get_duration + + +SPEED_TEST_N_EXAMPLES = 500_000 + +RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) +RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) + + +@get_duration +def select(dataset: datasets.Dataset): + _ = dataset.select(range(0, len(dataset), 2)) + + +@get_duration +def sort(dataset: datasets.Dataset): + _ = dataset.sort("numbers") + + +@get_duration +def shuffle(dataset: datasets.Dataset): + _ = dataset.shuffle() + + +@get_duration +def train_test_split(dataset: datasets.Dataset): + _ = dataset.train_test_split(0.1) + + +@get_duration +def shard(dataset: datasets.Dataset, num_shards=10): + for shard_id in range(num_shards): + _ = dataset.shard(num_shards, shard_id) + + +def benchmark_indices_mapping(): + times = {"num examples": SPEED_TEST_N_EXAMPLES} + functions = (select, sort, shuffle, train_test_split, shard) + with tempfile.TemporaryDirectory() as tmp_dir: + print("generating dataset") + features = datasets.Features({"text": datasets.Value("string"), "numbers": datasets.Value("float32")}) + dataset = generate_example_dataset( + os.path.join(tmp_dir, "dataset.arrow"), features, num_examples=SPEED_TEST_N_EXAMPLES + ) + print("Functions") + for func in functions: + print(func.__name__) + times[func.__name__] = func(dataset) + + with open(RESULTS_FILE_PATH, "wb") as f: + f.write(json.dumps(times).encode("utf-8")) + + +if __name__ == "__main__": # useful to run the profiler + benchmark_indices_mapping() diff --git a/testbed/huggingface__datasets/benchmarks/results/.gitkeep b/testbed/huggingface__datasets/benchmarks/results/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/benchmarks/results/benchmark_indices_mapping.json b/testbed/huggingface__datasets/benchmarks/results/benchmark_indices_mapping.json new file mode 100644 index 0000000000000000000000000000000000000000..0af6c0fa722916d6dcb7f0bca44bc99f078193ed --- /dev/null +++ b/testbed/huggingface__datasets/benchmarks/results/benchmark_indices_mapping.json @@ -0,0 +1 @@ +{"num examples": 500000, "select": 0.03741131999413483, "sort": 0.7371353159978753, "shuffle": 0.17655655200360343, "train_test_split": 0.29633847798686475, "shard": 0.01452581599005498} \ No newline at end of file diff --git a/testbed/huggingface__datasets/docs/README.md b/testbed/huggingface__datasets/docs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8ec1b07e1c2a19a37fb99ecdbae02de81731b5cb --- /dev/null +++ b/testbed/huggingface__datasets/docs/README.md @@ -0,0 +1,260 @@ + + +# Generating the documentation + +To generate the documentation, you first have to build it. Several packages are necessary to build the doc, +you can install them with the following command, at the root of the code repository: + +```bash +pip install -e ".[docs]" +``` + +Then you need to install our special tool that builds the documentation: + +```bash +pip install git+https://github.com/huggingface/doc-builder +``` + +--- +**NOTE** + +You only need to generate the documentation to inspect it locally (if you're planning changes and want to +check how they look before committing for instance). You don't have to `git commit` the built documentation. + +--- + +## Building the documentation + +Once you have setup the `doc-builder` and additional packages, you can generate the documentation by typing +the following command: + +```bash +doc-builder build datasets docs/source/ --build_dir ~/tmp/test-build +``` + +You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate +the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite +Markdown editor. + +## Previewing the documentation + +To preview the docs, first install the `watchdog` module with: + +```bash +pip install watchdog +``` + +Then run the following command: + +```bash +doc-builder preview datasets docs/source/ +``` + +The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. + +--- +**NOTE** + +The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). + +## Adding a new element to the navigation bar + +Accepted files are Markdown (.md or .mdx). + +Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting +the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/datasets/blob/main/docs/source/_toctree.yml) file. + +## Renaming section headers and moving sections + +It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. + +Therefore we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. + +So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file: + +``` +Sections that were moved: + +[ Section A ] +``` +and of course if you moved it to another file, then: + +``` +Sections that were moved: + +[ Section A ] +``` + +Use the relative style to link to the new file so that the versioned docs continue to work. + +For an example of a rich moved sections set please see the very end of [the transformers Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md). + + +## Writing Documentation - Specification + +The `huggingface/datasets` documentation follows the +[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings, +although we can write them directly in Markdown. + +### Adding a new tutorial + +Adding a new tutorial or section is done in two steps: + +- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md). +- Link that file in `./source/_toctree.yml` on the correct toc-tree. + +Make sure to put your new file under the proper section. If you have a doubt, feel free to ask in a Github Issue or PR. + +### Writing source documentation + +Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names +and objects like True, None or any strings should usually be put in `code`. + +When mentioning a class, function or method, it is recommended to use our syntax for internal links so that our tool +adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or +function to be in the main package. + +If you want to create a link to some internal class or function, you need to +provide its path. For instance: \[\`table.InMemoryTable\`\]. This will be converted into a link with +`table.InMemoryTable` in the description. To get rid of the path and only keep the name of the object you are +linking to in the description, add a ~: \[\`~table.InMemoryTable\`\] will generate a link with `InMemoryTable` in the description. + +The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\`XXXClass.method\`\]. + +#### Defining arguments in a method + +Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and +an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon and its +description: + +``` + Args: + n_layers (`int`): The number of layers of the model. +``` + +If the description is too long to fit in one line, another indentation is necessary before writing the description +after the argument. + +Here's an example showcasing everything so far: + +``` + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AlbertTokenizer`]. See [`~PreTrainedTokenizer.encode`] and + [`~PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) +``` + +For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the +following signature: + +``` +def my_function(x: str = None, a: float = 1): +``` + +then its documentation should look like this: + +``` + Args: + x (`str`, *optional*): + This argument controls ... + a (`float`, *optional*, defaults to 1): + This argument is used to ... +``` + +Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even +if the first line describing your argument type and its default gets long, you can't break it into several lines. You can +however write as many lines as you want in the indented description (see the example above with `input_ids`). + +#### Writing a multi-line code block + +Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown: + + +```` +``` +# first line of code +# second line +# etc +``` +```` + +#### Writing a return block + +The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation. +The first line should be the type of the return, followed by a line return. No need to indent further for the elements +building the return. + +Here's an example of a single value return: + +``` + Returns: + `List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token. +``` + +Here's an example of tuple return, comprising several objects: + +``` + Returns: + `tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs: + - ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` -- + Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. + - **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) -- + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). +``` + +#### Adding an image + +Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos and other non-text files. We prefer to leverage a hf.co hosted `dataset` like +the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference +them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). +If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images +to this dataset. + +## Writing documentation examples + +The syntax for Example docstrings can look as follows: + +``` + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> def add_prefix(example): + ... example["text"] = "Review: " + example["text"] + ... return example + >>> ds = ds.map(add_prefix) + >>> ds[0:3]["text"] + ['Review: compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .', + 'Review: the soundtrack alone is worth the price of admission .', + 'Review: rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .'] + + # process a batch of examples + >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True) + # set number of processors + >>> ds = ds.map(add_prefix, num_proc=4) + ``` +``` + +The docstring should give a minimal, clear example of how the respective class or function is to be used in practice and also include the expected (ideally sensible) output. +Often, readers will try out the example before even going through the function +or class definitions. Therefore, it is of utmost importance that the example +works as expected. diff --git a/testbed/huggingface__datasets/docs/source/_config.py b/testbed/huggingface__datasets/docs/source/_config.py new file mode 100644 index 0000000000000000000000000000000000000000..f8a135cb3e43d58bc9743b771b89115e0f53b2e8 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/_config.py @@ -0,0 +1,11 @@ +# docstyle-ignore +INSTALL_CONTENT = """ +# Datasets installation +! pip install datasets transformers +# To install from source instead of the last release, comment the command above and uncomment the following one. +# ! pip install git+https://github.com/huggingface/datasets.git +""" + +notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}] +default_branch_name = "main" +version_prefix = "" diff --git a/testbed/huggingface__datasets/docs/source/_redirects.yml b/testbed/huggingface__datasets/docs/source/_redirects.yml new file mode 100644 index 0000000000000000000000000000000000000000..50373bfea2f2f45f8a46cd2ede67881e46bb3a83 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/_redirects.yml @@ -0,0 +1,14 @@ +# This first_section was backported from nginx +loading_datasets: loading +share_dataset: share +quicktour: quickstart +dataset_streaming: stream +torch_tensorflow: use_dataset +splits: loading#slice-splits +processing: process +faiss_and_ea: faiss_es +features: about_dataset_features +using_metrics: how_to_metrics +exploring: access +package_reference/logging_methods: package_reference/utilities +# end of first_section diff --git a/testbed/huggingface__datasets/docs/source/_toctree.yml b/testbed/huggingface__datasets/docs/source/_toctree.yml new file mode 100644 index 0000000000000000000000000000000000000000..e3155286c200f2ebbdbbde953bdf4ea4854303a5 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/_toctree.yml @@ -0,0 +1,128 @@ +- sections: + - local: index + title: 🤗 Datasets + - local: quickstart + title: Quickstart + - local: installation + title: Installation + title: Get started +- sections: + - local: tutorial + title: Overview + - local: load_hub + title: Load a dataset from the Hub + - local: access + title: Know your dataset + - local: use_dataset + title: Preprocess + - local: metrics + title: Evaluate predictions + - local: create_dataset + title: Create a dataset + - local: upload_dataset + title: Share a dataset to the Hub + title: "Tutorials" +- sections: + - local: how_to + title: Overview + - sections: + - local: loading + title: Load + - local: process + title: Process + - local: stream + title: Stream + - local: use_with_tensorflow + title: Use with TensorFlow + - local: use_with_pytorch + title: Use with PyTorch + - local: use_with_jax + title: Use with JAX + - local: use_with_spark + title: Use with Spark + - local: cache + title: Cache management + - local: filesystems + title: Cloud storage + - local: faiss_es + title: Search index + - local: how_to_metrics + title: Metrics + - local: beam + title: Beam Datasets + title: "General usage" + - sections: + - local: audio_load + title: Load audio data + - local: audio_process + title: Process audio data + - local: audio_dataset + title: Create an audio dataset + title: "Audio" + - sections: + - local: image_load + title: Load image data + - local: image_process + title: Process image data + - local: image_dataset + title: Create an image dataset + - local: depth_estimation + title: Depth estimation + - local: image_classification + title: Image classification + - local: semantic_segmentation + title: Semantic segmentation + - local: object_detection + title: Object detection + title: "Vision" + - sections: + - local: nlp_load + title: Load text data + - local: nlp_process + title: Process text data + title: "Text" + - sections: + - local: tabular_load + title: Load tabular data + title: "Tabular" + - sections: + - local: share + title: Share + - local: dataset_card + title: Create a dataset card + - local: repository_structure + title: Structure your repository + - local: dataset_script + title: Create a dataset loading script + title: "Dataset repository" + title: "How-to guides" +- sections: + - local: about_arrow + title: Datasets 🤝 Arrow + - local: about_cache + title: The cache + - local: about_mapstyle_vs_iterable + title: Dataset or IterableDataset + - local: about_dataset_features + title: Dataset features + - local: about_dataset_load + title: Build and load + - local: about_map_batch + title: Batch mapping + - local: about_metrics + title: All about metrics + title: "Conceptual guides" +- sections: + - local: package_reference/main_classes + title: Main classes + - local: package_reference/builder_classes + title: Builder classes + - local: package_reference/loading_methods + title: Loading methods + - local: package_reference/table_classes + title: Table Classes + - local: package_reference/utilities + title: Utilities + - local: package_reference/task_templates + title: Task templates + title: "Reference" diff --git a/testbed/huggingface__datasets/docs/source/about_arrow.md b/testbed/huggingface__datasets/docs/source/about_arrow.md new file mode 100644 index 0000000000000000000000000000000000000000..88b67e7d6f37e5bcde91de1165e95c00eec2118e --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/about_arrow.md @@ -0,0 +1,50 @@ +# Datasets 🤝 Arrow + +## What is Arrow? + +[Arrow](https://arrow.apache.org/) enables large amounts of data to be processed and moved quickly. It is a specific data format that stores data in a columnar memory layout. This provides several significant advantages: + +* Arrow's standard format allows [zero-copy reads](https://en.wikipedia.org/wiki/Zero-copy) which removes virtually all serialization overhead. +* Arrow is language-agnostic so it supports different programming languages. +* Arrow is column-oriented so it is faster at querying and processing slices or columns of data. +* Arrow allows for copy-free hand-offs to standard machine learning tools such as NumPy, Pandas, PyTorch, and TensorFlow. +* Arrow supports many, possibly nested, column types. + +## Memory-mapping + +🤗 Datasets uses Arrow for its local caching system. It allows datasets to be backed by an on-disk cache, which is memory-mapped for fast lookup. +This architecture allows for large datasets to be used on machines with relatively small device memory. + +For example, loading the full English Wikipedia dataset only takes a few MB of RAM: + +```python +>>> import os; import psutil; import timeit +>>> from datasets import load_dataset + +# Process.memory_info is expressed in bytes, so convert to megabytes +>>> mem_before = psutil.Process(os.getpid()).memory_info().rss / (1024 * 1024) +>>> wiki = load_dataset("wikipedia", "20220301.en", split="train") +>>> mem_after = psutil.Process(os.getpid()).memory_info().rss / (1024 * 1024) + +>>> print(f"RAM memory used: {(mem_after - mem_before)} MB") +RAM memory used: 50 MB +``` + +This is possible because the Arrow data is actually memory-mapped from disk, and not loaded in memory. +Memory-mapping allows access to data on disk, and leverages virtual memory capabilities for fast lookups. + +## Performance + +Iterating over a memory-mapped dataset using Arrow is fast. Iterating over Wikipedia on a laptop gives you speeds of 1-3 Gbit/s: + +```python +>>> s = """batch_size = 1000 +... for batch in wiki.iter(batch_size): +... ... +... """ + +>>> elapsed_time = timeit.timeit(stmt=s, number=1, globals=globals()) +>>> print(f"Time to iterate over the {wiki.dataset_size >> 30} GB dataset: {elapsed_time:.1f} sec, " +... f"ie. {float(wiki.dataset_size >> 27)/elapsed_time:.1f} Gb/s") +Time to iterate over the 18 GB dataset: 31.8 sec, ie. 4.8 Gb/s +``` diff --git a/testbed/huggingface__datasets/docs/source/about_cache.mdx b/testbed/huggingface__datasets/docs/source/about_cache.mdx new file mode 100644 index 0000000000000000000000000000000000000000..cac9eb7634e1d7d74fd5ae036e6f514d93c8989a --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/about_cache.mdx @@ -0,0 +1,55 @@ +# The cache + +The cache is one of the reasons why 🤗 Datasets is so efficient. It stores previously downloaded and processed datasets so when you need to use them again, they are reloaded directly from the cache. This avoids having to download a dataset all over again, or reapplying processing functions. Even after you close and start another Python session, 🤗 Datasets will reload your dataset directly from the cache! + +## Fingerprint + +How does the cache keeps track of what transforms are applied to a dataset? Well, 🤗 Datasets assigns a fingerprint to the cache file. A fingerprint keeps track of the current state of a dataset. The initial fingerprint is computed using a hash from the Arrow table, or a hash of the Arrow files if the dataset is on disk. Subsequent fingerprints are computed by combining the fingerprint of the previous state, and a hash of the latest transform applied. + + + +Transforms are any of the processing methods from the [How-to Process](./process) guides such as [`Dataset.map`] or [`Dataset.shuffle`]. + + + +Here are what the actual fingerprints look like: + +```py +>>> from datasets import Dataset +>>> dataset1 = Dataset.from_dict({"a": [0, 1, 2]}) +>>> dataset2 = dataset1.map(lambda x: {"a": x["a"] + 1}) +>>> print(dataset1._fingerprint, dataset2._fingerprint) +d19493523d95e2dc 5b86abacd4b42434 +``` + +In order for a transform to be hashable, it needs to be picklable by [dill](https://dill.readthedocs.io/en/latest/) or [pickle](https://docs.python.org/3/library/pickle). + +When you use a non-hashable transform, 🤗 Datasets uses a random fingerprint instead and raises a warning. The non-hashable transform is considered different from the previous transforms. As a result, 🤗 Datasets will recompute all the transforms. Make sure your transforms are serializable with pickle or dill to avoid this! + +An example of when 🤗 Datasets recomputes everything is when caching is disabled. When this happens, the cache files are generated every time and they get written to a temporary directory. Once your Python session ends, the cache files in the temporary directory are deleted. A random hash is assigned to these cache files, instead of a fingerprint. + + + +When caching is disabled, use [`Dataset.save_to_disk`] to save your transformed dataset or it will be deleted once the session ends. + + + +## Hashing + +The fingerprint of a dataset is updated by hashing the function passed to `map` as well as the `map` parameters (`batch_size`, `remove_columns`, etc.). + +You can check the hash of any Python object using the [`fingerprint.Hasher`]: + +```py +>>> from datasets.fingerprint import Hasher +>>> my_func = lambda example: {"length": len(example["text"])} +>>> print(Hasher.hash(my_func)) +'3d35e2b3e94c81d6' +``` + +The hash is computed by dumping the object using a `dill` pickler and hashing the dumped bytes. +The pickler recursively dumps all the variables used in your function, so any change you do to an object that is used in your function, will cause the hash to change. + +If one of your functions doesn't seem to have the same hash across sessions, it means at least one of its variables contains a Python object that is not deterministic. +When this happens, feel free to hash any object you find suspicious to try to find the object that caused the hash to change. +For example, if you use a list for which the order of its elements is not deterministic across sessions, then the hash won't be the same across sessions either. diff --git a/testbed/huggingface__datasets/docs/source/about_dataset_features.mdx b/testbed/huggingface__datasets/docs/source/about_dataset_features.mdx new file mode 100644 index 0000000000000000000000000000000000000000..12a85477645f85a7b408cb86fa35644e6a98f426 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/about_dataset_features.mdx @@ -0,0 +1,149 @@ +# Dataset features + +[`Features`] defines the internal structure of a dataset. It is used to specify the underlying serialization format. What's more interesting to you though is that [`Features`] contains high-level information about everything from the column names and types, to the [`ClassLabel`]. You can think of [`Features`] as the backbone of a dataset. + +The [`Features`] format is simple: `dict[column_name, column_type]`. It is a dictionary of column name and column type pairs. The column type provides a wide range of options for describing the type of data you have. + +Let's have a look at the features of the MRPC dataset from the GLUE benchmark: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset('glue', 'mrpc', split='train') +>>> dataset.features +{'idx': Value(dtype='int32', id=None), + 'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None), + 'sentence1': Value(dtype='string', id=None), + 'sentence2': Value(dtype='string', id=None), +} +``` + +The [`Value`] feature tells 🤗 Datasets: + +- The `idx` data type is `int32`. +- The `sentence1` and `sentence2` data types are `string`. + +🤗 Datasets supports many other data types such as `bool`, `float32` and `binary` to name just a few. + + + +Refer to [`Value`] for a full list of supported data types. + + + +The [`ClassLabel`] feature informs 🤗 Datasets the `label` column contains two classes. The classes are labeled `not_equivalent` and `equivalent`. Labels are stored as integers in the dataset. When you retrieve the labels, [`ClassLabel.int2str`] and [`ClassLabel.str2int`] carries out the conversion from integer value to label name, and vice versa. + +If your data type contains a list of objects, then you want to use the [`Sequence`] feature. Remember the SQuAD dataset? + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset('squad', split='train') +>>> dataset.features +{'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), +'context': Value(dtype='string', id=None), +'id': Value(dtype='string', id=None), +'question': Value(dtype='string', id=None), +'title': Value(dtype='string', id=None)} +``` + +The `answers` field is constructed using the [`Sequence`] feature because it contains two subfields, `text` and `answer_start`, which are lists of `string` and `int32`, respectively. + + + +See the [flatten](./process#flatten) section to learn how you can extract the nested subfields as their own independent columns. + + + +The array feature type is useful for creating arrays of various sizes. You can create arrays with two dimensions using [`Array2D`], and even arrays with five dimensions using [`Array5D`]. + +```py +>>> features = Features({'a': Array2D(shape=(1, 3), dtype='int32')}) +``` + +The array type also allows the first dimension of the array to be dynamic. This is useful for handling sequences with variable lengths such as sentences, without having to pad or truncate the input to a uniform shape. + +```py +>>> features = Features({'a': Array3D(shape=(None, 5, 2), dtype='int32')}) +``` + +## Audio feature + +Audio datasets have a column with type [`Audio`], which contains three important fields: + +* `array`: the decoded audio data represented as a 1-dimensional array. +* `path`: the path to the downloaded audio file. +* `sampling_rate`: the sampling rate of the audio data. + +When you load an audio dataset and call the audio column, the [`Audio`] feature automatically decodes and resamples the audio file: + +```py +>>> from datasets import load_dataset, Audio + +>>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") +>>> dataset[0]["audio"] +{'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, + 0. , 0. ], dtype=float32), + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', + 'sampling_rate': 8000} +``` + + + +Index into an audio dataset using the row index first and then the `audio` column - `dataset[0]["audio"]` - to avoid decoding and resampling all the audio files in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset. + + + +With `decode=False`, the [`Audio`] type simply gives you the path or the bytes of the audio file, without decoding it into an `array`, + +```py +>>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train").cast_column("audio", Audio(decode=False)) +>>> dataset[0] +{'audio': {'bytes': None, + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav'}, + 'english_transcription': 'I would like to set up a joint account with my partner', + 'intent_class': 11, + 'lang_id': 4, + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', + 'transcription': 'I would like to set up a joint account with my partner'} +``` + +## Image feature + +Image datasets have a column with type [`Image`], which loads `PIL.Image` objects from images stored as bytes: + +When you load an image dataset and call the image column, the [`Image`] feature automatically decodes the image file: + +```py +>>> from datasets import load_dataset, Image + +>>> dataset = load_dataset("beans", split="train") +>>> dataset[0]["image"] + +``` + + + +Index into an image dataset using the row index first and then the `image` column - `dataset[0]["image"]` - to avoid decoding all the image files in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset. + + + +With `decode=False`, the [`Image`] type simply gives you the path or the bytes of the image file, without decoding it into an `PIL.Image`, + +```py +>>> dataset = load_dataset("beans", split="train").cast_column("image", Image(decode=False)) +>>> dataset[0]["image"] +{'bytes': None, + 'path': '/Users/username/.cache/huggingface/datasets/downloads/extracted/772e7c1fba622cff102b85dd74bcce46e8168634df4eaade7bedd3b8d91d3cd7/train/healthy/healthy_train.265.jpg'} +``` + +Depending on the dataset, you may get the path to the local downloaded image, or the content of the image as bytes if the dataset is not made of individual files. + +You can also define a dataset of images from numpy arrays: + +```python +>>> ds = Dataset.from_dict({"i": [np.zeros(shape=(16, 16, 3), dtype=np.uint8)]}, features=Features({"i": Image()})) +``` + +And in this case the numpy arrays are encoded into PNG (or TIFF if the pixels values precision is important). + +For multi-channels arrays like RGB or RGBA, only uint8 is supported. If you use a larger precision, you get a warning and the array is downcasted to uint8. +For gray-scale images you can use the integer or float precision you want as long as it is compatible with `Pillow`. A warning is shown if your image integer or float precision is too high, and in this case the array is downcated: an int64 array is downcasted to int32, and a float64 array is downcasted to float32. diff --git a/testbed/huggingface__datasets/docs/source/about_dataset_load.mdx b/testbed/huggingface__datasets/docs/source/about_dataset_load.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2498ae22a6bd65196d37e18e5640fdb01a654942 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/about_dataset_load.mdx @@ -0,0 +1,120 @@ +# Build and load + +Nearly every deep learning workflow begins with loading a dataset, which makes it one of the most important steps. With 🤗 Datasets, there are more than 900 datasets available to help you get started with your NLP task. All you have to do is call: [`load_dataset`] to take your first step. This function is a true workhorse in every sense because it builds and loads every dataset you use. + +## ELI5: `load_dataset` + +Let's begin with a basic Explain Like I'm Five. + +A dataset is a directory that contains: + +- Some data files in generic formats (JSON, CSV, Parquet, text, etc.) +- A dataset card named `README.md` that contains documentation about the dataset as well as a YAML header to define the datasets tags and configurations +- An optional dataset script if it requires some code to read the data files. This is sometimes used to load files of specific formats and structures. + +The [`load_dataset`] function fetches the requested dataset locally or from the Hugging Face Hub. +The Hub is a central repository where all the Hugging Face datasets and models are stored. + +If the dataset only contains data files, then [`load_dataset`] automatically infers how to load the data files from their extensions (json, csv, parquet, txt, etc.). +Under the hood, 🤗 Datasets will use an appropriate [`DatasetBuilder`] based on the data files format. There exist one builder per data file format in 🤗 Datasets: + +* [`datasets.packaged_modules.text.Text`] for text +* [`datasets.packaged_modules.csv.Csv`] for CSV and TSV +* [`datasets.packaged_modules.json.Json`] for JSON and JSONL +* [`datasets.packaged_modules.parquet.Parquet`] for Parquet +* [`datasets.packaged_modules.arrow.Arrow`] for Arrow (streaming file format) +* [`datasets.packaged_modules.sql.Sql`] for SQL databases +* [`datasets.packaged_modules.imagefolder.ImageFolder`] for image folders +* [`datasets.packaged_modules.audiofolder.AudioFolder`] for audio folders + +If the dataset has a dataset script, then it downloads and imports it from the Hugging Face Hub. +Code in the dataset script defines a custom [`DatasetBuilder`] the dataset information (description, features, URL to the original files, etc.), and tells 🤗 Datasets how to generate and display examples from it. + + + +Read the [Share](./upload_dataset) section to learn more about how to share a dataset. This section also provides a step-by-step guide on how to write your own dataset loading script! + + + +🤗 Datasets downloads the dataset files from the original URL, generates the dataset and caches it in an Arrow table on your drive. +If you've downloaded the dataset before, then 🤗 Datasets will reload it from the cache to save you the trouble of downloading it again. + +Now that you have a high-level understanding about how datasets are built, let's take a closer look at the nuts and bolts of how all this works. + +## Building a dataset + +When you load a dataset for the first time, 🤗 Datasets takes the raw data file and builds it into a table of rows and typed columns. There are two main classes responsible for building a dataset: [`BuilderConfig`] and [`DatasetBuilder`]. + + +
+ +
+ +### BuilderConfig[[datasets-builderconfig]] + +[`BuilderConfig`] is the configuration class of [`DatasetBuilder`]. The [`BuilderConfig`] contains the following basic attributes about a dataset: + +| Attribute | Description | +|---------------|--------------------------------------------------------------| +| `name` | Short name of the dataset. | +| `version` | Dataset version identifier. | +| `data_dir` | Stores the path to a local folder containing the data files. | +| `data_files` | Stores paths to local data files. | +| `description` | Description of the dataset. | + +If you want to add additional attributes to your dataset such as the class labels, you can subclass the base [`BuilderConfig`] class. There are two ways to populate the attributes of a [`BuilderConfig`] class or subclass: + +- Provide a list of predefined [`BuilderConfig`] class (or subclass) instances in the datasets [`DatasetBuilder.BUILDER_CONFIGS`] attribute. + +- When you call [`load_dataset`], any keyword arguments that are not specific to the method will be used to set the associated attributes of the [`BuilderConfig`] class. This will override the predefined attributes if a specific configuration was selected. + +You can also set the [`DatasetBuilder.BUILDER_CONFIG_CLASS`] to any custom subclass of [`BuilderConfig`]. + +### DatasetBuilder[[datasets-datasetbuilder]] + +[`DatasetBuilder`] accesses all the attributes inside [`BuilderConfig`] to build the actual dataset. + +
+ +
+ +There are three main methods in [`DatasetBuilder`]: + +1. [`DatasetBuilder._info`] is in charge of defining the dataset attributes. When you call `dataset.info`, 🤗 Datasets returns the information stored here. Likewise, the [`Features`] are also specified here. Remember, the [`Features`] are like the skeleton of the dataset. It provides the names and types of each column. + +2. [`DatasetBuilder._split_generator`] downloads or retrieves the requested data files, organizes them into splits, and defines specific arguments for the generation process. This method has a [`DownloadManager`] that downloads files or fetches them from your local filesystem. Within the [`DownloadManager`], there is a [`DownloadManager.download_and_extract`] method that accepts a dictionary of URLs to the original data files, and downloads the requested files. Accepted inputs include: a single URL or path, or a list/dictionary of URLs or paths. Any compressed file types like TAR, GZIP and ZIP archives will be automatically extracted. + + Once the files are downloaded, [`SplitGenerator`] organizes them into splits. The [`SplitGenerator`] contains the name of the split, and any keyword arguments that are provided to the [`DatasetBuilder._generate_examples`] method. The keyword arguments can be specific to each split, and typically comprise at least the local path to the data files for each split. + +3. [`DatasetBuilder._generate_examples`] reads and parses the data files for a split. Then it yields dataset examples according to the format specified in the `features` from [`DatasetBuilder._info`]. The input of [`DatasetBuilder._generate_examples`] is actually the `filepath` provided in the keyword arguments of the last method. + + The dataset is generated with a Python generator, which doesn't load all the data in memory. As a result, the generator can handle large datasets. However, before the generated samples are flushed to the dataset file on disk, they are stored in an `ArrowWriter` buffer. This means the generated samples are written by batch. If your dataset samples consumes a lot of memory (images or videos), then make sure to specify a low value for the `DEFAULT_WRITER_BATCH_SIZE` attribute in [`DatasetBuilder`]. We recommend not exceeding a size of 200 MB. + +## Maintaining integrity + +To ensure a dataset is complete, [`load_dataset`] will perform a series of tests on the downloaded files to make sure everything is there. This way, you don't encounter any surprises when your requested dataset doesn't get generated as expected. [`load_dataset`] verifies: + +- The number of splits in the generated `DatasetDict`. +- The number of samples in each split of the generated `DatasetDict`. +- The list of downloaded files. +- The SHA256 checksums of the downloaded files (disabled by defaut). + +If the dataset doesn't pass the verifications, it is likely that the original host of the dataset made some changes in the data files. + + + +If it is your own dataset, you'll need to recompute the information above and update the `README.md` file in your dataset repository. Take a look at this [section](dataset_script#optional-generate-dataset-metadata) to learn how to generate and update this metadata. + + + +In this case, an error is raised to alert that the dataset has changed. +To ignore the error, one needs to specify `verification_mode="no_checks"` in [`load_dataset`]. +Anytime you see a verification error, feel free to open a discussion or pull request in the corresponding dataset "Community" tab, so that the integrity checks for that dataset are updated. + +## Security + +The dataset repositories on the Hub are scanned for malware, see more information [here](https://huggingface.co/docs/hub/security#malware-scanning). + +Moreover the datasets without a namespace (originally contributed on our GitHub repository) have all been reviewed by our maintainers. +The code of these datasets is considered **safe**. +It concerns datasets that are not under a namespace, e.g. "squad" or "glue", unlike the other datasets that are named "username/dataset_name" or "org/dataset_name". diff --git a/testbed/huggingface__datasets/docs/source/about_map_batch.mdx b/testbed/huggingface__datasets/docs/source/about_map_batch.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4ebbdf9acaf31f523fef8c91d0a02964b82e9118 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/about_map_batch.mdx @@ -0,0 +1,40 @@ +# Batch mapping + +Combining the utility of [`Dataset.map`] with batch mode is very powerful. It allows you to speed up processing, and freely control the size of the generated dataset. + +## Need for speed + +The primary objective of batch mapping is to speed up processing. Often times, it is faster to work with batches of data instead of single examples. Naturally, batch mapping lends itself to tokenization. For example, the 🤗 [Tokenizers](https://huggingface.co/docs/tokenizers/python/latest/) library works faster with batches because it parallelizes the tokenization of all the examples in a batch. + +## Input size != output size + +The ability to control the size of the generated dataset can be leveraged for many interesting use-cases. In the How-to [map](#map) section, there are examples of using batch mapping to: + +- Split long sentences into shorter chunks. +- Augment a dataset with additional tokens. + +It is helpful to understand how this works, so you can come up with your own ways to use batch mapping. At this point, you may be wondering how you can control the size of the generated dataset. The answer is: **the mapped function does not have to return an output batch of the same size**. + +In other words, your mapped function input can be a batch of size `N` and return a batch of size `M`. The output `M` can be greater than or less than `N`. This means you can concatenate your examples, divide it up, and even add more examples! + +However, remember that all values in the output dictionary must contain the **same number of elements** as the other fields in the output dictionary. Otherwise, it is not possible to define the number of examples in the output returned by the mapped function. The number can vary between successive batches processed by the mapped function. For a single batch though, all values of the output dictionary should have the same length (i.e., the number of elements). + +For example, from a dataset of 1 column and 3 rows, if you use `map` to return a new column with twice as many rows, then you will have an error. +In this case, you end up with one column with 3 rows, and one column with 6 rows. As you can see, the table will not be valid: + +```py +>>> from datasets import Dataset +>>> dataset = Dataset.from_dict({"a": [0, 1, 2]}) +>>> dataset.map(lambda batch: {"b": batch["a"] * 2}, batched=True) # new column with 6 elements: [0, 1, 2, 0, 1, 2] +'ArrowInvalid: Column 1 named b expected length 3 but got length 6' +``` + +To make it valid, you have to drop one of the columns: + +```py +>>> from datasets import Dataset +>>> dataset = Dataset.from_dict({"a": [0, 1, 2]}) +>>> dataset_with_duplicates = dataset.map(lambda batch: {"b": batch["a"] * 2}, remove_columns=["a"], batched=True) +>>> len(dataset_with_duplicates) +6 +``` diff --git a/testbed/huggingface__datasets/docs/source/about_mapstyle_vs_iterable.mdx b/testbed/huggingface__datasets/docs/source/about_mapstyle_vs_iterable.mdx new file mode 100644 index 0000000000000000000000000000000000000000..eeffbbb9330361e598143d9da383358a0d228e90 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/about_mapstyle_vs_iterable.mdx @@ -0,0 +1,219 @@ +# Differences between Dataset and IterableDataset + +There are two types of dataset objects, a [`Dataset`] and an [`IterableDataset`]. +Whichever type of dataset you choose to use or create depends on the size of the dataset. +In general, an [`IterableDataset`] is ideal for big datasets (think hundreds of GBs!) due to its lazy behavior and speed advantages, while a [`Dataset`] is great for everything else. +This page will compare the differences between a [`Dataset`] and an [`IterableDataset`] to help you pick the right dataset object for you. + +## Downloading and streaming + +When you have a regular [`Dataset`], you can access it using `my_dataset[0]`. This provides random access to the rows. +Such datasets are also called "map-style" datasets. +For example you can download ImageNet-1k like this and access any row: + +```python +from datasets import load_dataset + +imagenet = load_dataset("imagenet-1k", split="train") # downloads the full dataset +print(imagenet[0]) +``` + +But one caveat is that you must have the entire dataset stored on your disk or in memory, which blocks you from accessing datasets bigger than the disk. +Because it can become inconvenient for big datasets, there exists another type of dataset, the [`IterableDataset`]. +When you have an `IterableDataset`, you can access it using a `for` loop to load the data progressively as you iterate over the dataset. +This way, only a small fraction of examples is loaded in memory, and you don't write anything on disk. + +For example, you can stream the ImageNet-1k dataset without downloading it on disk: + +```python +from datasets import load_dataset + +imagenet = load_dataset("imagenet-1k", split="train", streaming=True) # will start loading the data when iterated over +for example in imagenet: + print(example) + break +``` + +Streaming can read online data without writing any file to disk. +For example, you can stream datasets made out of multiple shards, each of which is hundreds of gigabytes like [C4](https://huggingface.co/datasets/c4), [OSCAR](https://huggingface.co/datasets/oscar) or [LAION-2B](https://huggingface.co/datasets/laion/laion2B-en). +Learn more about how to stream a dataset in the [Dataset Streaming Guide](./stream). + +This is not the only difference though, because the "lazy" behavior of an `IterableDataset` is also present when it comes to dataset creation and processing. + +## Creating map-style datasets and iterable datasets + +You can create a [`Dataset`] using lists or dictionaries, and the data is entirely converted to Arrow so you can easily access any row: +```python +my_dataset = Dataset.from_dict({"col_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}) +print(my_dataset[0]) +``` + +To create an `IterableDataset` on the other hand, you must provide a "lazy" way to load the data. +In Python, we generally use generator functions. These functions `yield` one example at a time, which means you can't access a row by slicing it like a regular `Dataset`: +```python +def my_generator(n): + for i in range(n): + yield {"col_1": i} + +my_iterable_dataset = IterableDataset.from_generator(my_generator, gen_kwargs={"n": 10}) +for example in my_iterable_dataset: + print(example) + break +``` + +## Loading local files entirely and progressively + +It is possible to convert local or remote data files to an Arrow [`Dataset`] using [`load_dataset`]: +```python +data_files = {"train": ["path/to/data.csv"]} +my_dataset = load_dataset("csv", data_files=data_files, split="train") +print(my_dataset[0]) +``` + +However, this requires a conversion step from CSV to Arrow format, which takes time and disk space if your dataset is big. + +To save disk space and skip the conversion step, you can define an `IterableDataset` by streaming from the local files directly. +This way, the data is read progressively from the local files as you iterate over the dataset: + +```python +data_files = {"train": ["path/to/data.csv"]} +my_iterable_dataset = load_dataset("csv", data_files=data_files, split="train", streaming=True) +for example in my_iterable_dataset: # this reads the CSV file progressively as you iterate over the dataset + print(example) + break +``` + +Many file formats are supported, like CSV, JSONL, and Parquet, as well as image and audio files. +You can find more information in the corresponding guides for loading [tabular](./tabular_load), [text](./nlp_load), [vision](./image_load), and [audio](./audio_load]) datasets. + +## Eager data processing and lazy data processing + +When you process a [`Dataset`] object using [`Dataset.map`], the entire dataset is processed immediately and returned. +This is similar to how `pandas` works for example. + +```python +my_dataset = my_dataset.map(process_fn) # process_fn is applied on all the examples of the dataset +print(my_dataset[0]) +``` + +On the other hand, due to the "lazy" nature of an `IterableDataset`, calling [`IterableDataset.map`] does not apply your `map` function over the full dataset. +Instead, your `map` function is applied on-the-fly. + +Because of that, you can chain multiple processing steps and they will all run at once when you start iterating over the dataset: + +```python +my_iterable_dataset = my_iterable_dataset.map(process_fn_1) +my_iterable_dataset = my_iterable_dataset.filter(filter_fn) +my_iterable_dataset = my_iterable_dataset.map(process_fn_2) + +# process_fn_1, filter_fn and process_fn_2 are applied on-the-fly when iterating over the dataset +for example in my_iterable_dataset: + print(example) + break +``` + +## Exact and fast approximate shuffling + +When you shuffle a [`Dataset`] using [`Dataset.shuffle`], you apply an exact shuffling of the dataset. +It works by taking a list of indices `[0, 1, 2, ... len(my_dataset) - 1]` and shuffling this list. +Then, accessing `my_dataset[0]` returns the row and index defined by the first element of the indices mapping that has been shuffled: +```python +my_dataset = my_dataset.shuffle(seed=42) +print(my_dataset[0]) +``` + +Since we don't have random access to the rows in the case of an `IterableDataset`, we can't use a shuffled list of indices and access a row at an arbitrary position. +This prevents the use of exact shuffling. +Instead, a fast approximate shuffling is used in [`IterableDataset.shuffle`]. +It uses a shuffle buffer to sample random examples iteratively from the dataset. +Since the dataset is still read iteratively, it provides excellent speed performance: +```python +my_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100) +for example in my_iterable_dataset: + print(example) + break +``` + +But using a shuffle buffer is not enough to provide a satisfactory shuffling for machine learning model training. So [`IterableDataset.shuffle`] also shuffles the dataset shards if your dataset is made of multiple files or sources: + +```python +# Stream from the internet +my_iterable_dataset = load_dataset("c4", "en", split="train", streaming=True) +my_iterable_dataset.n_shards # 1024 + +# Stream from local files +data_files = {"train": [f"path/to/data_{i}.csv" for i in range(1024)]} +my_iterable_dataset = load_dataset("csv", data_files=data_files, split="train", streaming=True) +my_iterable_dataset.n_shards # 1024 + +# From a generator function +def my_generator(n, sources): + for source in sources: + for example_id_for_current_source in range(n): + yield {"example_id": f"{source}_{example_id_for_current_source}"} + +gen_kwargs = {"n": 10, "sources": [f"path/to/data_{i}" for i in range(1024)]} +my_iterable_dataset = IterableDataset.from_generator(my_generator, gen_kwargs=gen_kwargs) +my_iterable_dataset.n_shards # 1024 +``` + +## Speed differences + +Regular [`Dataset`] objects are based on Arrow which provides fast random access to the rows. +Thanks to memory mapping and the fact that Arrow is an in-memory format, reading data from disk doesn't do expensive system calls and deserialization. +It provides even faster data loading when iterating using a `for` loop by iterating on contiguous Arrow record batches. + +However as soon as your [`Dataset`] has an indices mapping (via [`Dataset.shuffle`] for example), the speed can become 10x slower. +This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore. +To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping. +This may take a lot of time depending of the size of your dataset though: + +```python +my_dataset[0] # fast +my_dataset = my_dataset.shuffle(seed=42) +my_dataset[0] # up to 10x slower +my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data +my_dataset[0] # fast again +``` + + +In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`]. +It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal. +You can also reshuffle the dataset easily: + +```python +for example in enumerate(my_iterable_dataset): # fast + pass + +shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100) + +for example in enumerate(shuffled_iterable_dataset): # as fast as before + pass + +shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=1337, buffer_size=100) # reshuffling using another seed is instantaneous + +for example in enumerate(shuffled_iterable_dataset): # still as fast as before + pass +``` + +If you're using your dataset on multiple epochs, the effective seed to shuffle the shards order in the shuffle buffer is `seed + epoch`. +It makes it easy to reshuffle a dataset between epochs: +```python +for epoch in range(n_epochs): + my_iterable_dataset.set_epoch(epoch) + for example in my_iterable_dataset: # fast + reshuffled at each epoch using `effective_seed = seed + epoch` + pass +``` + +## Switch from map-style to iterable + +If you want to benefit from the "lazy" behavior of an [`IterableDataset`] or their speed advantages, you can switch your map-style [`Dataset`] to an [`IterableDataset`]: +```python +my_iterable_dataset = my_dataset.to_iterable_dataset() +``` + +If you want to shuffle your dataset or [use it with a PyTorch DataLoader](./use_with_pytorch#stream-data), we recommend generating a sharded [`IterableDataset`]: +```python +my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=1024) +my_iterable_dataset.n_shards # 1024 +``` diff --git a/testbed/huggingface__datasets/docs/source/about_metrics.mdx b/testbed/huggingface__datasets/docs/source/about_metrics.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2e5b722f988a8f58342a09e798347e65522d2ac2 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/about_metrics.mdx @@ -0,0 +1,25 @@ +# All about metrics + + + +Metrics is deprecated in 🤗 Datasets. To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets. + + + +🤗 Datasets provides access to a wide range of NLP metrics. You can load metrics associated with benchmark datasets like GLUE or SQuAD, and complex metrics like BLEURT or BERTScore, with a single command: [`load_metric`]. Once you've loaded a metric, easily compute and evaluate a model's performance. + +## ELI5: `load_metric` + +Loading a dataset and loading a metric share many similarities. This was an intentional design choice because we wanted to create a simple and unified experience. When you call [`load_metric`], the metric loading script is downloaded and imported from GitHub (if it hasn't already been downloaded before). It contains information about the metric such as it's citation, homepage, and description. + +The metric loading script will instantiate and return a [`Metric`] object. This stores the predictions and references, which you need to compute the metric values. The [`Metric`] object is stored as an Apache Arrow table. As a result, the predictions and references are stored directly on disk with memory-mapping. This enables 🤗 Datasets to do a lazy computation of the metric, and makes it easier to gather all the predictions in a distributed setting. + +## Distributed evaluation + +Computing metrics in a distributed environment can be tricky. Metric evaluation is executed in separate Python processes, or nodes, on different subsets of a dataset. Typically, when a metric score is additive (`f(AuB) = f(A) + f(B)`), you can use distributed reduce operations to gather the scores for each subset of the dataset. But when a metric is non-additive (`f(AuB) ≠ f(A) + f(B)`), it's not that simple. For example, you can't take the sum of the [F1](https://huggingface.co/metrics/f1) scores of each data subset as your **final metric**. + +A common way to overcome this issue is to fallback on single process evaluation. The metrics are evaluated on a single GPU, which becomes inefficient. + +🤗 Datasets solves this issue by only computing the final metric on the first node. The predictions and references are computed and provided to the metric separately for each node. These are temporarily stored in an Apache Arrow table, avoiding cluttering the GPU or CPU memory. When you are ready to [`Metric.compute`] the final metric, the first node is able to access the predictions and references stored on all the other nodes. Once it has gathered all the predictions and references, [`Metric.compute`] will perform the final metric evaluation. + +This solution allows 🤗 Datasets to perform distributed predictions, which is important for evaluation speed in distributed settings. At the same time, you can also use complex non-additive metrics without wasting valuable GPU or CPU memory. \ No newline at end of file diff --git a/testbed/huggingface__datasets/docs/source/access.mdx b/testbed/huggingface__datasets/docs/source/access.mdx new file mode 100644 index 0000000000000000000000000000000000000000..ecdfbbf446b0dbbb4260f43095e795184d299755 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/access.mdx @@ -0,0 +1,152 @@ +# Know your dataset + +There are two types of dataset objects, a regular [`Dataset`] and then an ✨ [`IterableDataset`] ✨. A [`Dataset`] provides fast random access to the rows, and memory-mapping so that loading even large datasets only uses a relatively small amount of device memory. But for really, really big datasets that won't even fit on disk or in memory, an [`IterableDataset`] allows you to access and use the dataset without waiting for it to download completely! + +This tutorial will show you how to load and access a [`Dataset`] and an [`IterableDataset`]. + +## Dataset + +When you load a dataset split, you'll get a [`Dataset`] object. You can do many things with a [`Dataset`] object, which is why it's important to learn how to manipulate and interact with the data stored inside. + +This tutorial uses the [rotten_tomatoes](https://huggingface.co/datasets/rotten_tomatoes) dataset, but feel free to load any dataset you'd like and follow along! + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("rotten_tomatoes", split="train") +``` + +### Indexing + +A [`Dataset`] contains columns of data, and each column can be a different type of data. The *index*, or axis label, is used to access examples from the dataset. For example, indexing by the row returns a dictionary of an example from the dataset: + +```py +# Get the first row in the dataset +>>> dataset[0] +{'label': 1, + 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} +``` + +Use the `-` operator to start from the end of the dataset: + +```py +# Get the last row in the dataset +>>> dataset[-1] +{'label': 0, + 'text': 'things really get weird , though not particularly scary : the movie is all portent and no content .'} +``` + +Indexing by the column name returns a list of all the values in the column: + +```py +>>> dataset["text"] +['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', + 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .', + 'effective but too-tepid biopic', + ..., + 'things really get weird , though not particularly scary : the movie is all portent and no content .'] +``` + +You can combine row and column name indexing to return a specific value at a position: + +```py +>>> dataset[0]["text"] +'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .' +``` + +But it is important to remember that indexing order matters, especially when working with large audio and image datasets. Indexing by the column name returns all the values in the column first, then loads the value at that position. For large datasets, it may be slower to index by the column name first. + +```py +>>> import time + +>>> start_time = time.time() +>>> text = dataset[0]["text"] +>>> end_time = time.time() +>>> print(f"Elapsed time: {end_time - start_time:.4f} seconds") +Elapsed time: 0.0031 seconds + +>>> start_time = time.time() +>>> text = dataset["text"][0] +>>> end_time = time.time() +>>> print(f"Elapsed time: {end_time - start_time:.4f} seconds") +Elapsed time: 0.0094 seconds +``` + +### Slicing + +Slicing returns a slice - or subset - of the dataset, which is useful for viewing several rows at once. To slice a dataset, use the `:` operator to specify a range of positions. + +```py +# Get the first three rows +>>> dataset[:3] +{'label': [1, 1, 1], + 'text': ['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', + 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .', + 'effective but too-tepid biopic']} + +# Get rows between three and six +>>> dataset[3:6] +{'label': [1, 1, 1], + 'text': ['if you sometimes like to go to the movies to have fun , wasabi is a good place to start .', + "emerges as something rare , an issue movie that's so honest and keenly observed that it doesn't feel like one .", + 'the film provides some great insight into the neurotic mindset of all comics -- even those who have reached the absolute top of the game .']} +``` + +## IterableDataset + +An [`IterableDataset`] is loaded when you set the `streaming` parameter to `True` in [`~datasets.load_dataset`]: + +```py +>>> from datasets import load_dataset + +>>> iterable_dataset = load_dataset("food101", split="train", streaming=True) +>>> for example in iterable_dataset: +... print(example) +... break +{'image': , 'label': 6} +``` + +You can also create an [`IterableDataset`] from an *existing* [`Dataset`], but it is faster than streaming mode because the dataset is streamed from local files: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("rotten_tomatoes", split="train") +>>> iterable_dataset = dataset.to_iterable_dataset() +``` + +An [`IterableDataset`] progressively iterates over a dataset one example at a time, so you don't have to wait for the whole dataset to download before you can use it. As you can imagine, this is quite useful for large datasets you want to use immediately! + +However, this means an [`IterableDataset`]'s behavior is different from a regular [`Dataset`]. You don't get random access to examples in an [`IterableDataset`]. Instead, you should iterate over its elements, for example, by calling `next(iter())` or with a `for` loop to return the next item from the [`IterableDataset`]: + +```py +>>> next(iter(iterable_dataset)) +{'image': , + 'label': 6} + +>>> for example in iterable_dataset: +... print(example) +... break +{'image': , 'label': 6} +``` + +You can return a subset of the dataset with a specific number of examples in it with [`IterableDataset.take`]: + +```py +# Get first three examples +>>> list(iterable_dataset.take(3)) +[{'image': , + 'label': 6}, + {'image': , + 'label': 6}, + {'image': , + 'label': 6}] +``` + +But unlike [slicing](access/#slicing), [`IterableDataset.take`] creates a new [`IterableDataset`]. + +## Next steps + +Interested in learning more about the differences between these two types of datasets? Learn more about them in the [Differences between `Dataset` and `IterableDataset`](about_mapstyle_vs_iterable) conceptual guide. + +To get more hands-on with these datasets types, check out the [Process](process) guide to learn how to preprocess a [`Dataset`] or the [Stream](stream) guide to learn how to preprocess an [`IterableDataset`]. diff --git a/testbed/huggingface__datasets/docs/source/audio_dataset.mdx b/testbed/huggingface__datasets/docs/source/audio_dataset.mdx new file mode 100644 index 0000000000000000000000000000000000000000..519f25e23d043a297184a608ead81832e9345046 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/audio_dataset.mdx @@ -0,0 +1,624 @@ +# Create an audio dataset + +You can share a dataset with your team or with anyone in the community by creating a dataset repository on the Hugging Face Hub: + +```py +from datasets import load_dataset + +dataset = load_dataset("/my_dataset") +``` + +There are several methods for creating and sharing an audio dataset: + +* Create an audio dataset from local files in python with [`Dataset.push_to_hub`]. This is an easy way that requires only a few steps in python. + +* Create an audio dataset repository with the `AudioFolder` builder. This is a no-code solution for quickly creating an audio dataset with several thousand audio files. + +* Create an audio dataset by writing a loading script. This method is for advanced users and requires more effort and coding, but you have greater flexibility over how a dataset is defined, downloaded, and generated which can be useful for more complex or large scale audio datasets. + + + + +You can control access to your dataset by requiring users to share their contact information first. Check out the [Gated datasets](https://huggingface.co/docs/hub/datasets-gated) guide for more information about how to enable this feature on the Hub. + + + +## Local files + +You can load your own dataset using the paths to your audio files. Use the [`~Dataset.cast_column`] function to take a column of audio file paths, and cast it to the [`Audio`] feature: + +```py +>>> audio_dataset = Dataset.from_dict({"audio": ["path/to/audio_1", "path/to/audio_2", ..., "path/to/audio_n"]}).cast_column("audio", Audio()) +>>> audio_dataset[0]["audio"] +{'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, + 0. , 0. ], dtype=float32), + 'path': 'path/to/audio_1', + 'sampling_rate': 16000} +``` + +Then upload the dataset to the Hugging Face Hub using [`Dataset.push_to_hub`]: + +```py +audio_dataset.push_to_hub("/my_dataset") +``` + +This will create a dataset repository containing your audio dataset: + +``` +my_dataset/ +├── README.md +└── data/ + └── train-00000-of-00001.parquet +``` + +## AudioFolder + +The `AudioFolder` is a dataset builder designed to quickly load an audio dataset with several thousand audio files without requiring you to write any code. +Any additional information about your dataset - such as transcription, speaker accent, or speaker intent - is automatically loaded by `AudioFolder` as long as you include this information in a metadata file (`metadata.csv`/`metadata.jsonl`). + + + +💡 Take a look at the [Split pattern hierarchy](repository_structure#split-pattern-hierarchy) to learn more about how `AudioFolder` creates dataset splits based on your dataset repository structure. + + + +Create a dataset repository on the Hugging Face Hub and upload your dataset directory following the `AudioFolder` structure: + +``` +my_dataset/ +├── README.md +├── metadata.csv +└── data/ +``` + +The `data` folder can be any name you want. + + + +It can be helpful to store your metadata as a `jsonl` file if the data columns contain a more complex format (like a list of floats) to avoid parsing errors or reading complex values as strings. + + + +The metadata file should include a `file_name` column to link an audio file to it's metadata: + +```csv +file_name,transcription +data/first_audio_file.mp3,znowu się duch z ciałem zrośnie w młodocianej wstaniesz wiosnie i możesz skutkiem tych leków umierać wstawać wiek wieków dalej tam były przestrogi jak siekać głowę jak nogi +data/second_audio_file.mp3,już u źwierzyńca podwojów król zasiada przy nim książęta i panowie rada a gdzie wzniosły krążył ganek rycerze obok kochanek król skinął palcem zaczęto igrzysko +data/third_audio_file.mp3,pewnie kędyś w obłędzie ubite minęły szlaki zaczekajmy dzień jaki poślemy szukać wszędzie dziś jutro pewnie będzie posłali wszędzie sługi czekali dzień i drugi gdy nic nie doczekali z płaczem chcą jechać dali +``` + +Then you can store your dataset in a directory structure like this: + +``` +metadata.csv +data/first_audio_file.mp3 +data/second_audio_file.mp3 +data/third_audio_file.mp3 + +``` + +Users can now load your dataset and the associated metadata by specifying `audiofolder` in [`load_dataset`] and the dataset directory in `data_dir`: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("audiofolder", data_dir="/path/to/data") +>>> dataset["train"][0] +{'audio': + {'path': '/path/to/extracted/audio/first_audio_file.mp3', + 'array': array([ 0.00088501, 0.0012207 , 0.00131226, ..., -0.00045776, -0.00054932, -0.00054932], dtype=float32), + 'sampling_rate': 16000}, + 'transcription': 'znowu się duch z ciałem zrośnie w młodocianej wstaniesz wiosnie i możesz skutkiem tych leków umierać wstawać wiek wieków dalej tam były przestrogi jak siekać głowę jak nogi' +} +``` + +You can also use `audiofolder` to load datasets involving multiple splits. To do so, your dataset directory might have the following structure: + +``` +data/train/first_train_audio_file.mp3 +data/train/second_train_audio_file.mp3 + +data/test/first_test_audio_file.mp3 +data/test/second_test_audio_file.mp3 + +``` + + + + Note that if audio files are located not right next to a metadata file, `file_name` column should be a full relative path to an audio file, not just its filename. + + + +For audio datasets that don't have any associated metadata, `AudioFolder` automatically infers the class labels of the dataset based on the directory name. It might be useful for audio classification tasks. Your dataset directory might look like: + +``` +data/train/electronic/01.mp3 +data/train/punk/01.mp3 + +data/test/electronic/09.mp3 +data/test/punk/09.mp3 +``` + +Load the dataset with `AudioFolder`, and it will create a `label` column from the directory name (language id): + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("audiofolder", data_dir="/path/to/data") +>>> dataset["train"][0] +{'audio': + {'path': '/path/to/electronic/01.mp3', + 'array': array([ 3.9714024e-07, 7.3031038e-07, 7.5640685e-07, ..., + -1.1963668e-01, -1.1681189e-01, -1.1244172e-01], dtype=float32), + 'sampling_rate': 44100}, + 'label': 0 # "electronic" +} +>>> dataset["train"][-1] +{'audio': + {'path': '/path/to/punk/01.mp3', + 'array': array([0.15237972, 0.13222949, 0.10627693, ..., 0.41940814, 0.37578005, + 0.33717662], dtype=float32), + 'sampling_rate': 44100}, + 'label': 1 # "punk" +} +``` + + + +If all audio files are contained in a single directory or if they are not on the same level of directory structure, `label` column won't be added automatically. If you need it, set `drop_labels=False` explicitly. + + + + + + +Some audio datasets, like those found in [Kaggle competitions](https://www.kaggle.com/competitions/kaggle-pog-series-s01e02/overview), have separate metadata files for each split. Provided the metadata features are the same for each split, `audiofolder` can be used to load all splits at once. If the metadata features differ across each split, you should load them with separate `load_dataset()` calls. + + + +## Loading script + +Write a dataset loading script to manually create a dataset. +It defines a dataset's splits and configurations, and handles downloading and generating the dataset examples. +The script should have the same name as your dataset folder or repository: + +``` +my_dataset/ +├── README.md +├── my_dataset.py +└── data/ +``` + +The `data` folder can be any name you want, it doesn't have to be `data`. This folder is optional, unless you're hosting your dataset on the Hub. + +This directory structure allows your dataset to be loaded in one line: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("path/to/my_dataset") +``` + +This guide will show you how to create a dataset loading script for audio datasets, which is a bit different from creating a loading script for text datasets. +Audio datasets are commonly stored in `tar.gz` archives which requires a particular approach to support streaming mode. While streaming is not required, we highly encourage implementing streaming support in your audio dataset because: + +1. Users without a lot of disk space can use your dataset without downloading it. Learn more about streaming in the [Stream](./stream) guide! +2. Users can preview a dataset in the dataset viewer. + +Here is an example using TAR archives: + +``` +my_dataset/ +├── README.md +├── my_dataset.py +└── data/ + ├── train.tar.gz + ├── test.tar.gz + └── metadata.csv +``` + +In addition to learning how to create a streamable dataset, you'll also learn how to: + +* Create a dataset builder class. +* Create dataset configurations. +* Add dataset metadata. +* Download and define the dataset splits. +* Generate the dataset. +* Upload the dataset to the Hub. + +The best way to learn is to open up an existing audio dataset loading script, like [Vivos](https://huggingface.co/datasets/vivos/blob/main/vivos.py), and follow along! + + + + This guide shows how to process audio data stored in TAR archives - the most frequent case for audio datasets. Check out [minds14](https://huggingface.co/datasets/PolyAI/minds14/blob/main/minds14.py) dataset for an example of an audio script which uses ZIP archives. + + + + + +To help you get started, we created a loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py) you can copy and use as a starting point! + + + +### Create a dataset builder class + +[`GeneratorBasedBuilder`] is the base class for datasets generated from a dictionary generator. Within this class, there are three methods to help create your dataset: + +* `_info` stores information about your dataset like its description, license, and features. +* `_split_generators` downloads the dataset and defines its splits. +* `_generate_examples` generates the dataset's samples containing the audio data and other features specified in `info` for each split. + +Start by creating your dataset class as a subclass of [`GeneratorBasedBuilder`] and add the three methods. Don't worry about filling in each of these methods yet, you'll develop those over the next few sections: + +```py +class VivosDataset(datasets.GeneratorBasedBuilder): + """VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for + Vietnamese Automatic Speech Recognition task.""" + + def _info(self): + + def _split_generators(self, dl_manager): + + def _generate_examples(self, prompts_path, path_to_clips, audio_files): + +``` + +#### Multiple configurations + +In some cases, a dataset may have more than one configuration. For example, [LibriVox Indonesia](https://huggingface.co/datasets/indonesian-nlp/librivox-indonesia) dataset has several configurations corresponding to different languages. + +To create different configurations, use the [`BuilderConfig`] class to create a subclass of your dataset. The only required parameter is the `name` of the configuration, which must be passed to the configuration's superclass `__init__()`. Otherwise, you can specify any custom parameters you want in your configuration class. + +```py +class LibriVoxIndonesiaConfig(datasets.BuilderConfig): + """BuilderConfig for LibriVoxIndonesia.""" + + def __init__(self, name, version, **kwargs): + self.language = kwargs.pop("language", None) + self.release_date = kwargs.pop("release_date", None) + self.num_clips = kwargs.pop("num_clips", None) + self.num_speakers = kwargs.pop("num_speakers", None) + self.validated_hr = kwargs.pop("validated_hr", None) + self.total_hr = kwargs.pop("total_hr", None) + self.size_bytes = kwargs.pop("size_bytes", None) + self.size_human = size_str(self.size_bytes) + description = ( + f"LibriVox-Indonesia speech to text dataset in {self.language} released on {self.release_date}. " + f"The dataset comprises {self.validated_hr} hours of transcribed speech data" + ) + super(LibriVoxIndonesiaConfig, self).__init__( + name=name, + version=datasets.Version(version), + description=description, + **kwargs, + ) +``` + +Define your configurations in the `BUILDER_CONFIGS` class variable inside [`GeneratorBasedBuilder`]. In this example, the author imports the languages from a separate `release_stats.py` [file](https://huggingface.co/datasets/indonesian-nlp/librivox-indonesia/blob/main/release_stats.py) from their repository, and then loops through each language to create a configuration: + +```py +class LibriVoxIndonesia(datasets.GeneratorBasedBuilder): + DEFAULT_CONFIG_NAME = "all" + + BUILDER_CONFIGS = [ + LibriVoxIndonesiaConfig( + name=lang, + version=STATS["version"], + language=LANGUAGES[lang], + release_date=STATS["date"], + num_clips=lang_stats["clips"], + num_speakers=lang_stats["users"], + total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None, + size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None, + ) + for lang, lang_stats in STATS["locales"].items() + ] +``` + + + +Typically, users need to specify a configuration to load in [`load_dataset`], otherwise a `ValueError` is raised. You can avoid this by setting a default dataset configuration to load in `DEFAULT_CONFIG_NAME`. + + + +Now if users want to load the Balinese (`bal`) configuration, they can use the configuration name: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("indonesian-nlp/librivox-indonesia", "bal", split="train") +``` + +### Add dataset metadata + +Adding information about your dataset helps users to learn more about it. This information is stored in the [`DatasetInfo`] class which is returned by the `info` method. Users can access this information by: + +```py +>>> from datasets import load_dataset_builder +>>> ds_builder = load_dataset_builder("vivos") +>>> ds_builder.info +``` + +There is a lot of information you can include about your dataset, but some important ones are: + +1. `description` provides a concise description of the dataset. +2. `features` specify the dataset column types. Since you're creating an audio loading script, you'll need to include the [`Audio`] feature and the `sampling_rate` of the dataset. +3. `homepage` provides a link to the dataset homepage. +4. `license` specify the permissions for using a dataset as defined by the license type. +5. `citation` is a BibTeX citation of the dataset. + + + +You'll notice a lot of the dataset information is defined earlier in the loading script which can make it easier to read. There are also other [`~Dataset.Features`] you can input, so be sure to check out the full list and [features guide](./about_dataset_features) for more details. + + + +```py +def _info(self): + return datasets.DatasetInfo( + description=_DESCRIPTION, + features=datasets.Features( + { + "speaker_id": datasets.Value("string"), + "path": datasets.Value("string"), + "audio": datasets.Audio(sampling_rate=16_000), + "sentence": datasets.Value("string"), + } + ), + supervised_keys=None, + homepage=_HOMEPAGE, + license=_LICENSE, + citation=_CITATION, + ) +``` + +### Download and define the dataset splits + +Now that you've added some information about your dataset, the next step is to download the dataset and define the splits. + +1. Use the [`~DownloadManager.download`] method to download metadata file at `_PROMPTS_URLS` and audio TAR archive at `_DATA_URL`. This method returns the path to the local file/archive. In streaming mode, it doesn't download the file(s) and just returns a URL to stream the data from. This method accepts: + + * a relative path to a file inside a Hub dataset repository (for example, in the `data/` folder) + * a URL to a file hosted somewhere else + * a (nested) list or dictionary of file names or URLs + +2. After you've downloaded the dataset, use the [`SplitGenerator`] to organize the audio files and sentence prompts in each split. Name each split with a standard name like: `Split.TRAIN`, `Split.TEST`, and `SPLIT.Validation`. + + In the `gen_kwargs` parameter, specify the file path to the `prompts_path` and `path_to_clips`. For `audio_files`, you'll need to use [`~DownloadManager.iter_archive`] to iterate over the audio files in the TAR archive. This enables streaming for your dataset. All of these file paths are passed onto the next step where you'll actually generate the dataset. + +```py +def _split_generators(self, dl_manager): + """Returns SplitGenerators.""" + prompts_paths = dl_manager.download(_PROMPTS_URLS) + archive = dl_manager.download(_DATA_URL) + train_dir = "vivos/train" + test_dir = "vivos/test" + + return [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={ + "prompts_path": prompts_paths["train"], + "path_to_clips": train_dir + "/waves", + "audio_files": dl_manager.iter_archive(archive), + }, + ), + datasets.SplitGenerator( + name=datasets.Split.TEST, + gen_kwargs={ + "prompts_path": prompts_paths["test"], + "path_to_clips": test_dir + "/waves", + "audio_files": dl_manager.iter_archive(archive), + }, + ), + ] +``` + + + + +This implementation does not extract downloaded archives. If you want to extract files after download, you need to additionally use [`~DownloadManager.extract`], see the [(Advanced) Extract TAR archives](#advanced-extract-tar-archives-locally) section. + + + + +### Generate the dataset + +The last method in the [`GeneratorBasedBuilder`] class actually generates the samples in the dataset. It yields a dataset according to the structure specified in `features` from the `info` method. As you can see, `generate_examples` accepts the `prompts_path`, `path_to_clips`, and `audio_files` from the previous method as arguments. + +Files inside TAR archives are accessed and yielded sequentially. This means you need to have the metadata associated with the audio files in the TAR file in hand first so you can yield it with its corresponding audio file. + +```py +examples = {} +with open(prompts_path, encoding="utf-8") as f: + for row in f: + data = row.strip().split(" ", 1) + speaker_id = data[0].split("_")[0] + audio_path = "/".join([path_to_clips, speaker_id, data[0] + ".wav"]) + examples[audio_path] = { + "speaker_id": speaker_id, + "path": audio_path, + "sentence": data[1], + } +``` + +Finally, iterate over files in `audio_files` and yield them along with their corresponding metadata. [`~DownloadManager.iter_archive`] yields a tuple of (`path`, `f`) where `path` is a **relative** path to a file inside TAR archive and `f` is a file object itself. + +```py +inside_clips_dir = False +id_ = 0 +for path, f in audio_files: + if path.startswith(path_to_clips): + inside_clips_dir = True + if path in examples: + audio = {"path": path, "bytes": f.read()} + yield id_, {**examples[path], "audio": audio} + id_ += 1 + elif inside_clips_dir: + break +``` + +Put these two steps together, and the whole `_generate_examples` method looks like: + +```py +def _generate_examples(self, prompts_path, path_to_clips, audio_files): + """Yields examples as (key, example) tuples.""" + examples = {} + with open(prompts_path, encoding="utf-8") as f: + for row in f: + data = row.strip().split(" ", 1) + speaker_id = data[0].split("_")[0] + audio_path = "/".join([path_to_clips, speaker_id, data[0] + ".wav"]) + examples[audio_path] = { + "speaker_id": speaker_id, + "path": audio_path, + "sentence": data[1], + } + inside_clips_dir = False + id_ = 0 + for path, f in audio_files: + if path.startswith(path_to_clips): + inside_clips_dir = True + if path in examples: + audio = {"path": path, "bytes": f.read()} + yield id_, {**examples[path], "audio": audio} + id_ += 1 + elif inside_clips_dir: + break +``` + +### Upload the dataset to the Hub + +Once your script is ready, [create a dataset card](./dataset_card) and [upload it to the Hub](./share). + +Congratulations, you can now load your dataset from the Hub! 🥳 + +```py +>>> from datasets import load_dataset +>>> load_dataset("/my_dataset") +``` + +### (Advanced) Extract TAR archives locally + +In the example above downloaded archives are not extracted and therefore examples do not contain information about where they are stored locally. +To explain how to do the extraction in a way that it also supports streaming, we will briefly go through the [LibriVox Indonesia](https://huggingface.co/datasets/indonesian-nlp/librivox-indonesia/blob/main/librivox-indonesia.py) loading script. + +#### Download and define the dataset splits + +1. Use the [`~DownloadManager.download`] method to download the audio data at `_AUDIO_URL`. + +2. To extract audio TAR archive locally, use the [`~DownloadManager.extract`]. You can use this method only in non-streaming mode (when `dl_manager.is_streaming=False`). This returns a local path to the extracted archive directory: + + ```py + local_extracted_archive = dl_manager.extract(audio_path) if not dl_manager.is_streaming else None + ``` + +3. Use the [`~DownloadManager.iter_archive`] method to iterate over the archive at `audio_path`, just like in the Vivos example above. [`~DownloadManager.iter_archive`] doesn't provide any information about the full paths of files from the archive, even if it has been extracted. As a result, you need to pass the `local_extracted_archive` path to the next step in `gen_kwargs`, in order to preserve information about where the archive was extracted to. This is required to construct the correct paths to the local files when you generate the examples. + + + +The reason you need to use a combination of [`~DownloadManager.download`] and [`~DownloadManager.iter_archive`] is because files in TAR archives can't be accessed directly by their paths. Instead, you'll need to iterate over the files within the archive! You can use [`~DownloadManager.download_and_extract`] and [`~DownloadManager.extract`] with TAR archives only in non-streaming mode, otherwise it would throw an error. + + + +4. Use the [`~DownloadManager.download_and_extract`] method to download the metadata file specified in `_METADATA_URL`. This method returns a path to a local file in non-streaming mode. In streaming mode, it doesn't download file locally and returns the same URL. + +5. Now use the [`SplitGenerator`] to organize the audio files and metadata in each split. Name each split with a standard name like: `Split.TRAIN`, `Split.TEST`, and `SPLIT.Validation`. + + In the `gen_kwargs` parameter, specify the file paths to `local_extracted_archive`, `audio_files`, `metadata_path`, and `path_to_clips`. Remember, for `audio_files`, you need to use [`~DownloadManager.iter_archive`] to iterate over the audio files in the TAR archives. This enables streaming for your dataset! All of these file paths are passed onto the next step where the dataset samples are generated. + +```py +def _split_generators(self, dl_manager): + """Returns SplitGenerators.""" + dl_manager.download_config.ignore_url_params = True + + audio_path = dl_manager.download(_AUDIO_URL) + local_extracted_archive = dl_manager.extract(audio_path) if not dl_manager.is_streaming else None + path_to_clips = "librivox-indonesia" + + return [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={ + "local_extracted_archive": local_extracted_archive, + "audio_files": dl_manager.iter_archive(audio_path), + "metadata_path": dl_manager.download_and_extract(_METADATA_URL + "/metadata_train.csv.gz"), + "path_to_clips": path_to_clips, + }, + ), + datasets.SplitGenerator( + name=datasets.Split.TEST, + gen_kwargs={ + "local_extracted_archive": local_extracted_archive, + "audio_files": dl_manager.iter_archive(audio_path), + "metadata_path": dl_manager.download_and_extract(_METADATA_URL + "/metadata_test.csv.gz"), + "path_to_clips": path_to_clips, + }, + ), + ] +``` + +#### Generate the dataset + +Here `_generate_examples` accepts `local_extracted_archive`, `audio_files`, `metadata_path`, and `path_to_clips` from the previous method as arguments. + +1. TAR files are accessed and yielded sequentially. This means you need to have the metadata in `metadata_path` associated with the audio files in the TAR file in hand first so that you can yield it with its corresponding audio file further: + + ```py + with open(metadata_path, "r", encoding="utf-8") as f: + reader = csv.DictReader(f) + for row in reader: + if self.config.name == "all" or self.config.name == row["language"]: + row["path"] = os.path.join(path_to_clips, row["path"]) + # if data is incomplete, fill with empty values + for field in data_fields: + if field not in row: + row[field] = "" + metadata[row["path"]] = row + ``` + +2. Now you can yield the files in `audio_files` archive. When you use [`~DownloadManager.iter_archive`], it yielded a tuple of (`path`, `f`) where `path` is a **relative path** to a file inside the archive, and `f` is the file object itself. To get the **full path** to the locally extracted file, join the path of the directory (`local_extracted_path`) where the archive is extracted to and the relative audio file path (`path`): + + ```py + for path, f in audio_files: + if path in metadata: + result = dict(metadata[path]) + # set the audio feature and the path to the extracted file + path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path + result["audio"] = {"path": path, "bytes": f.read()} + result["path"] = path + yield id_, result + id_ += 1 + ```` + +Put both of these steps together, and the whole `_generate_examples` method should look like: + +```py +def _generate_examples( + self, + local_extracted_archive, + audio_files, + metadata_path, + path_to_clips, + ): + """Yields examples.""" + data_fields = list(self._info().features.keys()) + metadata = {} + with open(metadata_path, "r", encoding="utf-8") as f: + reader = csv.DictReader(f) + for row in reader: + if self.config.name == "all" or self.config.name == row["language"]: + row["path"] = os.path.join(path_to_clips, row["path"]) + # if data is incomplete, fill with empty values + for field in data_fields: + if field not in row: + row[field] = "" + metadata[row["path"]] = row + id_ = 0 + for path, f in audio_files: + if path in metadata: + result = dict(metadata[path]) + # set the audio feature and the path to the extracted file + path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path + result["audio"] = {"path": path, "bytes": f.read()} + result["path"] = path + yield id_, result + id_ += 1 +``` diff --git a/testbed/huggingface__datasets/docs/source/audio_load.mdx b/testbed/huggingface__datasets/docs/source/audio_load.mdx new file mode 100644 index 0000000000000000000000000000000000000000..c066bc31ce95cb227c0e4c4d869df118d11589e8 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/audio_load.mdx @@ -0,0 +1,93 @@ +# Load audio data + +You can load an audio dataset using the [`Audio`] feature that automatically decodes and resamples the audio files when you access the examples. +Audio decoding is based on the [`soundfile`](https://github.com/bastibe/python-soundfile) python package, which uses the [`libsndfile`](https://github.com/libsndfile/libsndfile) C library under the hood. + +## Installation + +To work with audio datasets, you need to have the `audio` dependencies installed. +Check out the [installation](./installation#audio) guide to learn how to install it. + + +## Local files + +You can load your own dataset using the paths to your audio files. Use the [`~Dataset.cast_column`] function to take a column of audio file paths, and cast it to the [`Audio`] feature: + +```py +>>> audio_dataset = Dataset.from_dict({"audio": ["path/to/audio_1", "path/to/audio_2", ..., "path/to/audio_n"]}).cast_column("audio", Audio()) +>>> audio_dataset[0]["audio"] +{'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, + 0. , 0. ], dtype=float32), + 'path': 'path/to/audio_1', + 'sampling_rate': 16000} +``` + +## AudioFolder + +You can also load a dataset with an `AudioFolder` dataset builder. It does not require writing a custom dataloader, making it useful for quickly creating and loading audio datasets with several thousand audio files. + +## AudioFolder with metadata + +To link your audio files with metadata information, make sure your dataset has a `metadata.csv` file. Your dataset structure might look like: + +``` +folder/train/metadata.csv +folder/train/first_audio_file.mp3 +folder/train/second_audio_file.mp3 +folder/train/third_audio_file.mp3 +``` + +Your `metadata.csv` file must have a `file_name` column which links audio files with their metadata. An example `metadata.csv` file might look like: + +```text +file_name,transcription +first_audio_file.mp3,znowu się duch z ciałem zrośnie w młodocianej wstaniesz wiosnie i możesz skutkiem tych leków umierać wstawać wiek wieków dalej tam były przestrogi jak siekać głowę jak nogi +second_audio_file.mp3,już u źwierzyńca podwojów król zasiada przy nim książęta i panowie rada a gdzie wzniosły krążył ganek rycerze obok kochanek król skinął palcem zaczęto igrzysko +third_audio_file.mp3,pewnie kędyś w obłędzie ubite minęły szlaki zaczekajmy dzień jaki poślemy szukać wszędzie dziś jutro pewnie będzie posłali wszędzie sługi czekali dzień i drugi gdy nic nie doczekali z płaczem chcą jechać dali +``` + +`AudioFolder` will load audio data and create a `transcription` column containing texts from `metadata.csv`: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder") +>>> # OR by specifying the list of files +>>> dataset = load_dataset("audiofolder", data_files=["path/to/audio_1", "path/to/audio_2", ..., "path/to/audio_n"]) +``` + +You can load remote datasets from their URLs with the data_files parameter: + +```py +>>> dataset = load_dataset("audiofolder", data_files=["https://foo.bar/audio_1", "https://foo.bar/audio_2", ..., "https://foo.bar/audio_n"] +>>> # for example, pass SpeechCommands archive: +>>> dataset = load_dataset("audiofolder", data_files="https://s3.amazonaws.com/datasets.huggingface.co/SpeechCommands/v0.01/v0.01_test.tar.gz") +``` + +Metadata can also be specified as JSON Lines, in which case use `metadata.jsonl` as the name of the metadata file. This format is helpful in scenarios when one of the columns is complex, e.g. a list of floats, to avoid parsing errors or reading the complex values as strings. + +To ignore the information in the metadata file, set `drop_metadata=True` in [`load_dataset`]: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder", drop_metadata=True) +``` + +If you don't have a metadata file, `AudioFolder` automatically infers the label name from the directory name. +If you want to drop automatically created labels, set `drop_labels=True`. +In this case, your dataset will only contain an audio column: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder_without_metadata", drop_labels=True) +``` + + + +For more information about creating your own `AudioFolder` dataset, take a look at the [Create an audio dataset](./audio_dataset) guide. + + + +For a guide on how to load any type of dataset, take a look at the general loading guide. diff --git a/testbed/huggingface__datasets/docs/source/audio_process.mdx b/testbed/huggingface__datasets/docs/source/audio_process.mdx new file mode 100644 index 0000000000000000000000000000000000000000..26bb7f3493a53766ba42a90b81ce4150c4cb8b58 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/audio_process.mdx @@ -0,0 +1,72 @@ +# Process audio data + +This guide shows specific methods for processing audio datasets. Learn how to: + +- Resample the sampling rate. +- Use [`~Dataset.map`] with audio datasets. + +For a guide on how to process any type of dataset, take a look at the general process guide. + + +## Cast + +The [`~Dataset.cast_column`] function is used to cast a column to another feature to be decoded. When you use this function with the [`Audio`] feature, you can resample the sampling rate: + +```py +>>> from datasets import load_dataset, Audio + +>>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") +>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) +``` + +Audio files are decoded and resampled on-the-fly, so the next time you access an example, the audio file is resampled to 16kHz: + +```py +>>> dataset[0]["audio"] +{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., + 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', + 'sampling_rate': 16000} +``` + +
+ + +
+ +## Map + +The [`~Dataset.map`] function helps preprocess your entire dataset at once. Depending on the type of model you're working with, you'll need to either load a [feature extractor](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoFeatureExtractor) or a [processor](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoProcessor). + +- For pretrained speech recognition models, load a feature extractor and tokenizer and combine them in a `processor`: + + ```py + >>> from transformers import AutoTokenizer, AutoFeatureExtractor, AutoProcessor + + >>> model_checkpoint = "facebook/wav2vec2-large-xlsr-53" + # after defining a vocab.json file you can instantiate a tokenizer object: + >>> tokenizer = AutoTokenizer("./vocab.json", unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|") + >>> feature_extractor = AutoFeatureExtractor.from_pretrained(model_checkpoint) + >>> processor = AutoProcessor.from_pretrained(feature_extractor=feature_extractor, tokenizer=tokenizer) + ``` + +- For fine-tuned speech recognition models, you only need to load a `processor`: + + ```py + >>> from transformers import AutoProcessor + + >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") + ``` + +When you use [`~Dataset.map`] with your preprocessing function, include the `audio` column to ensure you're actually resampling the audio data: + +```py +>>> def prepare_dataset(batch): +... audio = batch["audio"] +... batch["input_values"] = processor(audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0] +... batch["input_length"] = len(batch["input_values"]) +... with processor.as_target_processor(): +... batch["labels"] = processor(batch["sentence"]).input_ids +... return batch +>>> dataset = dataset.map(prepare_dataset, remove_columns=dataset.column_names) +``` diff --git a/testbed/huggingface__datasets/docs/source/beam.mdx b/testbed/huggingface__datasets/docs/source/beam.mdx new file mode 100644 index 0000000000000000000000000000000000000000..17eb5d1e8c9c57537d7ebf3c592f2188b12b093b --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/beam.mdx @@ -0,0 +1,46 @@ +# Beam Datasets + +Some datasets are too large to be processed on a single machine. Instead, you can process them with [Apache Beam](https://beam.apache.org/), a library for parallel data processing. The processing pipeline is executed on a distributed processing backend such as [Apache Flink](https://flink.apache.org/), [Apache Spark](https://spark.apache.org/), or [Google Cloud Dataflow](https://cloud.google.com/dataflow). + +We have already created Beam pipelines for some of the larger datasets like [wikipedia](https://huggingface.co/datasets/wikipedia), and [wiki40b](https://huggingface.co/datasets/wiki40b). You can load these normally with [`load_dataset`]. But if you want to run your own Beam pipeline with Dataflow, here is how: + +1. Specify the dataset and configuration you want to process: + +``` +DATASET_NAME=your_dataset_name # ex: wikipedia +CONFIG_NAME=your_config_name # ex: 20220301.en +``` + +2. Input your Google Cloud Platform information: + +``` +PROJECT=your_project +BUCKET=your_bucket +REGION=your_region +``` + +3. Specify your Python requirements: + +``` +echo "datasets" > /tmp/beam_requirements.txt +echo "apache_beam" >> /tmp/beam_requirements.txt +``` + +4. Run the pipeline: + +``` +datasets-cli run_beam datasets/$DATASET_NAME \ +--name $CONFIG_NAME \ +--save_info \ +--cache_dir gs://$BUCKET/cache/datasets \ +--beam_pipeline_options=\ +"runner=DataflowRunner,project=$PROJECT,job_name=$DATASET_NAME-gen,"\ +"staging_location=gs://$BUCKET/binaries,temp_location=gs://$BUCKET/temp,"\ +"region=$REGION,requirements_file=/tmp/beam_requirements.txt" +``` + + + +When you run your pipeline, you can adjust the parameters to change the runner (Flink or Spark), output location (S3 bucket or HDFS), and the number of workers. + + diff --git a/testbed/huggingface__datasets/docs/source/cache.mdx b/testbed/huggingface__datasets/docs/source/cache.mdx new file mode 100644 index 0000000000000000000000000000000000000000..49bf550d3925ac26f00c773ab29ba0334b89b994 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/cache.mdx @@ -0,0 +1,101 @@ +# Cache management + +When you download a dataset, the processing scripts and data are stored locally on your computer. The cache allows 🤗 Datasets to avoid re-downloading or processing the entire dataset every time you use it. + +This guide will show you how to: + +- Change the cache directory. +- Control how a dataset is loaded from the cache. +- Clean up cache files in the directory. +- Enable or disable caching. + +## Cache directory + +The default cache directory is `~/.cache/huggingface/datasets`. Change the cache location by setting the shell environment variable, `HF_DATASETS_CACHE` to another directory: + +``` +$ export HF_DATASETS_CACHE="/path/to/another/directory" +``` + +When you load a dataset, you also have the option to change where the data is cached. Change the `cache_dir` parameter to the path you want: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset('LOADING_SCRIPT', cache_dir="PATH/TO/MY/CACHE/DIR") +``` + +Similarly, you can change where a metric is cached with the `cache_dir` parameter: + +```py +>>> from datasets import load_metric +>>> metric = load_metric('glue', 'mrpc', cache_dir="MY/CACHE/DIRECTORY") +``` + +## Download mode + +After you download a dataset, control how it is loaded by [`load_dataset`] with the `download_mode` parameter. By default, 🤗 Datasets will reuse a dataset if it exists. But if you need the original dataset without any processing functions applied, re-download the files as shown below: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset('squad', download_mode='force_redownload') +``` + +Refer to [`DownloadMode`] for a full list of download modes. + +## Cache files + +Clean up the cache files in the directory with [`Dataset.cleanup_cache_files`]: + +```py +# Returns the number of removed cache files +>>> dataset.cleanup_cache_files() +2 +``` + +## Enable or disable caching + +If you're using a cached file locally, it will automatically reload the dataset with any previous transforms you applied to the dataset. Disable this behavior by setting the argument `load_from_cache_file=False` in [`Dataset.map`]: + +```py +>>> updated_dataset = small_dataset.map(add_prefix, load_from_cache_file=False) +``` + +In the example above, 🤗 Datasets will execute the function `add_prefix` over the entire dataset again instead of loading the dataset from its previous state. + +Disable caching on a global scale with [`disable_caching`]: + +```py +>>> from datasets import disable_caching +>>> disable_caching() +``` + +When you disable caching, 🤗 Datasets will no longer reload cached files when applying transforms to datasets. Any transform you apply on your dataset will be need to be reapplied. + + + +If you want to reuse a dataset from scratch, try setting the `download_mode` parameter in [`load_dataset`] instead. + + + +You can also avoid caching your metric entirely, and keep it in CPU memory instead: + +```py +>>> from datasets import load_metric +>>> metric = load_metric('glue', 'mrpc', keep_in_memory=True) +``` + + + +Keeping the predictions in-memory is not possible in a distributed setting since the CPU memory spaces of the various processes are not shared. + + + + + +## Improve performance + +Disabling the cache and copying the dataset in-memory will speed up dataset operations. There are two options for copying the dataset in-memory: + +1. Set `datasets.config.IN_MEMORY_MAX_SIZE` to a nonzero value (in bytes) that fits in your RAM memory. + +2. Set the environment variable `HF_DATASETS_IN_MEMORY_MAX_SIZE` to a nonzero value. Note that the first method takes higher precedence. diff --git a/testbed/huggingface__datasets/docs/source/create_dataset.mdx b/testbed/huggingface__datasets/docs/source/create_dataset.mdx new file mode 100644 index 0000000000000000000000000000000000000000..29eee57f4fd8bb96ff71ca302ab08565996b1c64 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/create_dataset.mdx @@ -0,0 +1,112 @@ +# Create a dataset + +Sometimes, you may need to create a dataset if you're working with your own data. Creating a dataset with 🤗 Datasets confers all the advantages of the library to your dataset: fast loading and processing, [stream enormous datasets](stream), [memory-mapping](https://huggingface.co/course/chapter5/4?fw=pt#the-magic-of-memory-mapping), and more. You can easily and rapidly create a dataset with 🤗 Datasets low-code approaches, reducing the time it takes to start training a model. In many cases, it is as easy as [dragging and dropping](upload_dataset#upload-with-the-hub-ui) your data files into a dataset repository on the Hub. + +In this tutorial, you'll learn how to use 🤗 Datasets low-code methods for creating all types of datasets: + +* Folder-based builders for quickly creating an image or audio dataset +* `from_` methods for creating datasets from local files + +## Folder-based builders + +There are two folder-based builders, [`ImageFolder`] and [`AudioFolder`]. These are low-code methods for quickly creating an image or speech and audio dataset with several thousand examples. They are great for rapidly prototyping computer vision and speech models before scaling to a larger dataset. Folder-based builders takes your data and automatically generates the dataset's features, splits, and labels. Under the hood: + +* [`ImageFolder`] uses the [`~datasets.Image`] feature to decode an image file. Many image extension formats are supported, such as jpg and png, but other formats are also supported. You can check the complete [list](https://github.com/huggingface/datasets/blob/b5672a956d5de864e6f5550e493527d962d6ae55/src/datasets/packaged_modules/imagefolder/imagefolder.py#L39) of supported image extensions. +* [`AudioFolder`] uses the [`~datasets.Audio`] feature to decode an audio file. Audio extensions such as wav and mp3 are supported, and you can check the complete [list](https://github.com/huggingface/datasets/blob/b5672a956d5de864e6f5550e493527d962d6ae55/src/datasets/packaged_modules/audiofolder/audiofolder.py#L39) of supported audio extensions. + +The dataset splits are generated from the repository structure, and the label names are automatically inferred from the directory name. + +For example, if your image dataset (it is the same for an audio dataset) is stored like this: + +``` +pokemon/train/grass/bulbasaur.png +pokemon/train/fire/charmander.png +pokemon/train/water/squirtle.png + +pokemon/test/grass/ivysaur.png +pokemon/test/fire/charmeleon.png +pokemon/test/water/wartortle.png +``` + +Then this is how the folder-based builder generates an example: + +
+ +
+ +Create the image dataset by specifying `imagefolder` in [`load_dataset`]: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("imagefolder", data_dir="/path/to/pokemon") +``` + +An audio dataset is created in the same way, except you specify `audiofolder` in [`load_dataset`] instead: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder") +``` + +Any additional information about your dataset, such as text captions or transcriptions, can be included with a `metadata.csv` file in the folder containing your dataset. The metadata file needs to have a `file_name` column that links the image or audio file to its corresponding metadata: + +``` +file_name, text +bulbasaur.png, There is a plant seed on its back right from the day this Pokémon is born. +charmander.png, It has a preference for hot things. +squirtle.png, When it retracts its long neck into its shell, it squirts out water with vigorous force. +``` + +To learn more about each of these folder-based builders, check out the and ImageFolder or AudioFolder guides. + +## From local files + +You can also create a dataset from local files by specifying the path to the data files. There are two ways you can create a dataset using the `from_` methods: + + * The [`~Dataset.from_generator`] method is the most memory-efficient way to create a dataset from a [generator](https://wiki.python.org/moin/Generators) due to a generators iterative behavior. This is especially useful when you're working with a really large dataset that may not fit in memory, since the dataset is generated on disk progressively and then memory-mapped. + + ```py + >>> from datasets import Dataset + >>> def gen(): + ... yield {"pokemon": "bulbasaur", "type": "grass"} + ... yield {"pokemon": "squirtle", "type": "water"} + >>> ds = Dataset.from_generator(gen) + >>> ds[0] + {"pokemon": "bulbasaur", "type": "grass"} + ``` + + A generator-based [`IterableDataset`] needs to be iterated over with a `for` loop for example: + + ```py + >>> from datasets import IterableDataset + >>> ds = IterableDataset.from_generator(gen) + >>> for example in ds: + ... print(example) + {"pokemon": "bulbasaur", "type": "grass"} + {"pokemon": "squirtle", "type": "water"} + ``` + + * The [`~Dataset.from_dict`] method is a straightforward way to create a dataset from a dictionary: + + ```py + >>> from datasets import Dataset + >>> ds = Dataset.from_dict({"pokemon": ["bulbasaur", "squirtle"], "type": ["grass", "water"]}) + >>> ds[0] + {"pokemon": "bulbasaur", "type": "grass"} + ``` + + To create an image or audio dataset, chain the [`~Dataset.cast_column`] method with [`~Dataset.from_dict`] and specify the column and feature type. For example, to create an audio dataset: + + ```py + >>> audio_dataset = Dataset.from_dict({"audio": ["path/to/audio_1", ..., "path/to/audio_n"]}).cast_column("audio", Audio()) + ``` + +## Next steps + +We didn't mention this in the tutorial, but you can also create a dataset with a loading script. A loading script is a more manual and code-intensive method for creating a dataset, but it also gives you the most flexibility and control over how a dataset is generated. It lets you configure additional options such as creating multiple configurations within a dataset, or enabling your dataset to be streamed. + +To learn more about how to write loading scripts, take a look at the image loading script, audio loading script, and text loading script guides. + +Now that you know how to create a dataset, consider sharing it on the Hub so the community can also benefit from your work! Go on to the next section to learn how to share your dataset. diff --git a/testbed/huggingface__datasets/docs/source/dataset_card.mdx b/testbed/huggingface__datasets/docs/source/dataset_card.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2925ddc97b5f8e906a61c8580ef754f671803687 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/dataset_card.mdx @@ -0,0 +1,30 @@ +# Create a dataset card + +Each dataset should have a dataset card to promote responsible usage and inform users of any potential biases within the dataset. +This idea was inspired by the Model Cards proposed by [Mitchell, 2018](https://arxiv.org/abs/1810.03993). +Dataset cards help users understand a dataset's contents, the context for using the dataset, how it was created, and any other considerations a user should be aware of. + +Creating a dataset card is easy and can be done in a just a few steps: + +1. Go to your dataset repository on the [Hub](https://hf.co/new-dataset) and click on **Create Dataset Card** to create a new `README.md` file in your repository. + +2. Use the **Metadata UI** to select the tags that describe your dataset. You can add a license, language, pretty_name, the task_categories, size_categories, and any other tags that you think are relevant. These tags help users discover and find your dataset on the Hub. + +
+ + +
+ + + + For a complete, but not required, set of tag options you can also look at the [Dataset Card specifications](https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1). This'll have a few more tag options like `multilinguality` and `language_creators` which are useful but not absolutely necessary. + + + +3. Click on the **Import dataset card template** link to automatically create a template with all the relevant fields to complete. Fill out the template sections to the best of your ability. Take a look at the [Dataset Card Creation Guide](https://github.com/huggingface/datasets/blob/main/templates/README_guide.md) for more detailed information about what to include in each section of the card. For fields you are unable to complete, you can write **[More Information Needed]**. + +4. Once you're done, commit the changes to the `README.md` file and you'll see the completed dataset card on your repository. + +YAML also allows you to customize the way your dataset is loaded by [defining splits and/or configurations](./repository_structure#define-your-splits-and-subsets-in-yaml) without the need to write any code. + +Feel free to take a look at the [SNLI](https://huggingface.co/datasets/snli), [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail), and [Allociné](https://huggingface.co/datasets/allocine) dataset cards as examples to help you get started. diff --git a/testbed/huggingface__datasets/docs/source/dataset_script.mdx b/testbed/huggingface__datasets/docs/source/dataset_script.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7cb5ba4f6561baddd9dedc292fd7206caf557aaa --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/dataset_script.mdx @@ -0,0 +1,366 @@ +# Create a dataset loading script + + + + +The dataset script is likely not needed if your dataset is in one of the following formats: CSV, JSON, JSON lines, text or Parquet. +With those formats, you should be able to load your dataset automatically with [`~datasets.load_dataset`], +as long as your dataset repository has a [required structure](./repository_structure). + + + +Write a dataset script to load and share datasets that consist of data files in unsupported formats or require more complex data preparation. +This is a more advanced way to define a dataset than using [YAML metadata in the dataset card](./repository_structure#define-your-splits-in-yaml). +A dataset script is a Python file that defines the different configurations and splits of your dataset, as well as how to download and process the data. + +The script can download data files from any website, or from the same dataset repository. + +A dataset loading script should have the same name as a dataset repository or directory. For example, a repository named `my_dataset` should contain `my_dataset.py` script. This way it can be loaded with: + +``` +my_dataset/ +├── README.md +└── my_dataset.py +``` + +```py +>>> from datasets import load_dataset +>>> load_dataset("path/to/my_dataset") +``` + +The following guide includes instructions for dataset scripts for how to: + +- Add dataset metadata. +- Download data files. +- Generate samples. +- Generate dataset metadata. +- Upload a dataset to the Hub. + +Open the [SQuAD dataset loading script](https://huggingface.co/datasets/squad/blob/main/squad.py) template to follow along on how to share a dataset. + + + +To help you get started, try beginning with the dataset loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py)! + + + +## Add dataset attributes + +The first step is to add some information, or attributes, about your dataset in [`DatasetBuilder._info`]. The most important attributes you should specify are: + +1. `DatasetInfo.description` provides a concise description of your dataset. The description informs the user what's in the dataset, how it was collected, and how it can be used for a NLP task. + +2. `DatasetInfo.features` defines the name and type of each column in your dataset. This will also provide the structure for each example, so it is possible to create nested subfields in a column if you want. Take a look at [`Features`] for a full list of feature types you can use. + +```py +datasets.Features( + { + "id": datasets.Value("string"), + "title": datasets.Value("string"), + "context": datasets.Value("string"), + "question": datasets.Value("string"), + "answers": datasets.Sequence( + { + "text": datasets.Value("string"), + "answer_start": datasets.Value("int32"), + } + ), + } +) +``` + +3. `DatasetInfo.homepage` contains the URL to the dataset homepage so users can find more details about the dataset. + +4. `DatasetInfo.citation` contains a BibTeX citation for the dataset. + +After you've filled out all these fields in the template, it should look like the following example from the SQuAD loading script: + +```py +def _info(self): + return datasets.DatasetInfo( + description=_DESCRIPTION, + features=datasets.Features( + { + "id": datasets.Value("string"), + "title": datasets.Value("string"), + "context": datasets.Value("string"), + "question": datasets.Value("string"), + "answers": datasets.features.Sequence( + {"text": datasets.Value("string"), "answer_start": datasets.Value("int32"),} + ), + } + ), + # No default supervised_keys (as we have to pass both question + # and context as input). + supervised_keys=None, + homepage="https://rajpurkar.github.io/SQuAD-explorer/", + citation=_CITATION, + ) +``` + +### Multiple configurations + +In some cases, your dataset may have multiple configurations. For example, the [SuperGLUE](https://huggingface.co/datasets/super_glue) dataset is a collection of 5 datasets designed to evaluate language understanding tasks. 🤗 Datasets provides [`BuilderConfig`] which allows you to create different configurations for the user to select from. + +Let's study the [SuperGLUE loading script](https://huggingface.co/datasets/super_glue/blob/main/super_glue.py) to see how you can define several configurations. + +1. Create a [`BuilderConfig`] subclass with attributes about your dataset. These attributes can be the features of your dataset, label classes, and a URL to the data files. + +```py +class SuperGlueConfig(datasets.BuilderConfig): + """BuilderConfig for SuperGLUE.""" + + def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs): + """BuilderConfig for SuperGLUE. + + Args: + features: *list[string]*, list of the features that will appear in the + feature dict. Should not include "label". + data_url: *string*, url to download the zip file from. + citation: *string*, citation for the data set. + url: *string*, url for information about the data set. + label_classes: *list[string]*, the list of classes for the label if the + label is present as a string. Non-string labels will be cast to either + 'False' or 'True'. + **kwargs: keyword arguments forwarded to super. + """ + # Version history: + # 1.0.2: Fixed non-nondeterminism in ReCoRD. + # 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to + # the full release (v2.0). + # 1.0.0: S3 (new shuffling, sharding and slicing mechanism). + # 0.0.2: Initial version. + super().__init__(version=datasets.Version("1.0.2"), **kwargs) + self.features = features + self.label_classes = label_classes + self.data_url = data_url + self.citation = citation + self.url = url +``` + +2. Create instances of your config to specify the values of the attributes of each configuration. This gives you the flexibility to specify all the name and description of each configuration. These sub-class instances should be listed under `DatasetBuilder.BUILDER_CONFIGS`: + +```py +class SuperGlue(datasets.GeneratorBasedBuilder): + """The SuperGLUE benchmark.""" + + BUILDER_CONFIG_CLASS = SuperGlueConfig + + BUILDER_CONFIGS = [ + SuperGlueConfig( + name="boolq", + description=_BOOLQ_DESCRIPTION, + features=["question", "passage"], + data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/BoolQ.zip", + citation=_BOOLQ_CITATION, + url="https://github.com/google-research-datasets/boolean-questions", + ), + ... + ... + SuperGlueConfig( + name="axg", + description=_AXG_DESCRIPTION, + features=["premise", "hypothesis"], + label_classes=["entailment", "not_entailment"], + data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/AX-g.zip", + citation=_AXG_CITATION, + url="https://github.com/rudinger/winogender-schemas", + ), +``` + +3. Now, users can load a specific configuration of the dataset with the configuration `name`: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset('super_glue', 'boolq') +``` + +Additionally, users can instantiate a custom builder configuration by passing the builder configuration arguments to [`load_dataset`]: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset('super_glue', data_url="https://custom_url") +``` + +### Default configurations + +Users must specify a configuration name when they load a dataset with multiple configurations. Otherwise, 🤗 Datasets will raise a `ValueError`, and prompt the user to select a configuration name. You can avoid this by setting a default dataset configuration with the `DEFAULT_CONFIG_NAME` attribute: + +```py +class NewDataset(datasets.GeneratorBasedBuilder): + +VERSION = datasets.Version("1.1.0") + +BUILDER_CONFIGS = [ + datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"), + datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"), +] + +DEFAULT_CONFIG_NAME = "first_domain" +``` + + + +Only use a default configuration when it makes sense. Don't set one because it may be more convenient for the user to not specify a configuration when they load your dataset. For example, multi-lingual datasets often have a separate configuration for each language. An appropriate default may be an aggregated configuration that loads all the languages of the dataset if the user doesn't request a particular one. + + + +## Download data files and organize splits + +After you've defined the attributes of your dataset, the next step is to download the data files and organize them according to their splits. + +1. Create a dictionary of URLs in the loading script that point to the original SQuAD data files: + +```py +_URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/" +_URLS = { + "train": _URL + "train-v1.1.json", + "dev": _URL + "dev-v1.1.json", +} +``` + + + +If the data files live in the same folder or repository of the dataset script, you can just pass the relative paths to the files instead of URLs. + + + +2. [`DownloadManager.download_and_extract`] takes this dictionary and downloads the data files. Once the files are downloaded, use [`SplitGenerator`] to organize each split in the dataset. This is a simple class that contains: + + - The `name` of each split. You should use the standard split names: `Split.TRAIN`, `Split.TEST`, and `Split.VALIDATION`. + + - `gen_kwargs` provides the file paths to the data files to load for each split. + +Your `DatasetBuilder._split_generator()` should look like this now: + +```py +def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: + urls_to_download = self._URLS + downloaded_files = dl_manager.download_and_extract(urls_to_download) + + return [ + datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), + datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), + ] +``` + +## Generate samples + +At this point, you have: + +- Added the dataset attributes. +- Provided instructions for how to download the data files. +- Organized the splits. + +The next step is to actually generate the samples in each split. + +1. `DatasetBuilder._generate_examples` takes the file path provided by `gen_kwargs` to read and parse the data files. You need to write a function that loads the data files and extracts the columns. + +2. Your function should yield a tuple of an `id_`, and an example from the dataset. + +```py +def _generate_examples(self, filepath): + """This function returns the examples in the raw (text) form.""" + logger.info("generating examples from = %s", filepath) + with open(filepath) as f: + squad = json.load(f) + for article in squad["data"]: + title = article.get("title", "").strip() + for paragraph in article["paragraphs"]: + context = paragraph["context"].strip() + for qa in paragraph["qas"]: + question = qa["question"].strip() + id_ = qa["id"] + + answer_starts = [answer["answer_start"] for answer in qa["answers"]] + answers = [answer["text"].strip() for answer in qa["answers"]] + + # Features currently used are "context", "question", and "answers". + # Others are extracted here for the ease of future expansions. + yield id_, { + "title": title, + "context": context, + "question": question, + "id": id_, + "answers": {"answer_start": answer_starts, "text": answers,}, + } +``` + +## (Optional) Generate dataset metadata + +Adding dataset metadata is a great way to include information about your dataset. The metadata is stored in the dataset card `README.md` in YAML. It includes information like the number of examples required to confirm the dataset was correctly generated, and information about the dataset like its `features`. + +Run the following command to generate your dataset metadata in `README.md` and make sure your new dataset loading script works correctly: + +``` +datasets-cli test path/to/ --save_info --all_configs +``` + +If your dataset loading script passed the test, you should now have a `README.md` file in your dataset folder containing a `dataset_info` field with some metadata. + +## Upload to the Hub + +Once your script is ready, [create a dataset card](dataset_card) and [upload it to the Hub](share). + +Congratulations, you can now load your dataset from the Hub! 🥳 + +```py +>>> from datasets import load_dataset +>>> load_dataset("/my_dataset") +``` + +## Advanced features + +### Sharding + +If your dataset is made of many big files, 🤗 Datasets automatically runs your script in parallel to make it super fast! +It can help if you have hundreds or thousands of TAR archives, or JSONL files like [oscar](https://huggingface.co/datasets/oscar/blob/main/oscar.py) for example. + +To make it work, we consider lists of files in `gen_kwargs` to be shards. +Therefore 🤗 Datasets can automatically spawn several workers to run `_generate_examples` in parallel, and each worker is given a subset of shards to process. + + +```python + +class MyShardedDataset(datasets.GeneratorBasedBuilder): + + def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: + downloaded_files = dl_manager.download([f"data/shard_{i}.jsonl" for i in range(1024)]) + return [ + datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files}), + ] + + def _generate_examples(self, filepaths): + # Each worker can be given a slice of the original `filepaths` list defined in the `gen_kwargs` + # so that this code can run in parallel on several shards at the same time + for filepath in filepaths: + ... +``` + +Users can also specify `num_proc=` in `load_dataset()` to specify the number of processes to use as workers. + +### ArrowBasedBuilder + +For some datasets it can be much faster to yield batches of data rather than examples one by one. +You can speed up the dataset generation by yielding Arrow tables directly, instead of examples. +This is especially useful if your data comes from Pandas DataFrames for example, since the conversion from Pandas to Arrow is as simple as: + +```python +import pyarrow as pa +pa_table = pa.Table.from_pandas(df) +``` + +To yield Arrow tables instead of single examples, make your dataset builder inherit from [`ArrowBasedBuilder`] instead of [`GeneratorBasedBuilder`], and use `_generate_tables` instead of `_generate_examples`: + +```python +class MySuperFastDataset(datasets.ArrowBasedBuilder): + + def _generate_tables(self, filepaths): + idx = 0 + for filepath in filepaths: + ... + yield idx, pa_table + idx += 1 +``` + +Don't forget to keep your script memory efficient, in case users run them on machines with a low amount of RAM. diff --git a/testbed/huggingface__datasets/docs/source/depth_estimation.mdx b/testbed/huggingface__datasets/docs/source/depth_estimation.mdx new file mode 100644 index 0000000000000000000000000000000000000000..47b186c29dd365328906cd3dda7651ce13aff65d --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/depth_estimation.mdx @@ -0,0 +1,223 @@ +# Depth estimation + +Depth estimation datasets are used to train a model to approximate the relative distance of every pixel in an +image from the camera, also known as depth. The applications enabled by these datasets primarily lie in areas like visual machine +perception and perception in robotics. Example applications include mapping streets for self-driving cars. This guide will show you how to apply transformations +to a depth estimation dataset. + +Before you start, make sure you have up-to-date versions of `albumentations` installed: + +```bash +pip install -U albumentations +``` + +[Albumentations](https://albumentations.ai/) is a Python library for performing data augmentation +for computer vision. It supports various computer vision tasks such as image classification, object +detection, segmentation, and keypoint estimation. + +This guide uses the [NYU Depth V2](https://huggingface.co/datasets/sayakpaul/nyu_depth_v2) dataset which is +comprised of video sequences from various indoor scenes, recorded by RGB and depth cameras. The dataset consists of scenes from 3 cities and provides images along with +their depth maps as labels. + +Load the `train` split of the dataset and take a look at an example: + +```py +>>> from datasets import load_dataset + +>>> train_dataset = load_dataset("sayakpaul/nyu_depth_v2", split="train") +>>> index = 17 +>>> example = train_dataset[index] +>>> example +{'image': , + 'depth_map': } +``` + +The dataset has two fields: + +* `image`: a PIL PNG image object with `uint8` data type. +* `depth_map`: a PIL Tiff image object with `float32` data type which is the depth map of the image. + +It is mention-worthy that JPEG/PNG format can only store `uint8` or `uint16` data. As the depth map is `float32` data, it can't be stored using PNG/JPEG. However, we can save the depth map using TIFF format as it supports a wider range of data types, including `float32` data. + +Next, check out an image with: + +```py +>>> example["image"] +``` + +
+ +
+ +Before we look at the depth map, we need to first convert its data type to `uint8` using `.convert('RGB')` as PIL can't display `float32` images. Now take a look at its corresponding depth map: + +```py +>>> example["depth_map"].convert("RGB") +``` + +
+ +
+ +It's all black! You'll need to add some color to the depth map to visualize it properly. To do that, either we can apply color automatically during display using `plt.imshow()` or create a colored depth map using `plt.cm` and then display it. In this example, we have used the latter one, as we can save/write the colored depth map later. (the utility below is taken from the [FastDepth repository](https://github.com/dwofk/fast-depth/blob/master/utils.py)). + +```py +>>> import numpy as np +>>> import matplotlib.pyplot as plt + +>>> cmap = plt.cm.viridis + +>>> def colored_depthmap(depth, d_min=None, d_max=None): +... if d_min is None: +... d_min = np.min(depth) +... if d_max is None: +... d_max = np.max(depth) +... depth_relative = (depth - d_min) / (d_max - d_min) +... return 255 * cmap(depth_relative)[:,:,:3] + +>>> def show_depthmap(depth_map): +... if not isinstance(depth_map, np.ndarray): +... depth_map = np.array(depth_map) +... if depth_map.ndim == 3: +... depth_map = depth_map.squeeze() + +... d_min = np.min(depth_map) +... d_max = np.max(depth_map) +... depth_map = colored_depthmap(depth_map, d_min, d_max) + +... plt.imshow(depth_map.astype("uint8")) +... plt.axis("off") +... plt.show() + +>>> show_depthmap(example["depth_map"]) +``` + +
+ +
+ +You can also visualize several different images and their corresponding depth maps. + +```py +>>> def merge_into_row(input_image, depth_target): +... if not isinstance(input_image, np.ndarray): +... input_image = np.array(input_image) +... +... d_min = np.min(depth_target) +... d_max = np.max(depth_target) +... depth_target_col = colored_depthmap(depth_target, d_min, d_max) +... img_merge = np.hstack([input_image, depth_target_col]) +... +... return img_merge + +>>> random_indices = np.random.choice(len(train_dataset), 9).tolist() +>>> plt.figure(figsize=(15, 6)) +>>> for i, idx in enumerate(random_indices): +... example = train_dataset[idx] +... ax = plt.subplot(3, 3, i + 1) +... image_viz = merge_into_row( +... example["image"], example["depth_map"] +... ) +... plt.imshow(image_viz.astype("uint8")) +... plt.axis("off") +``` + +
+ +
+ +Now apply some augmentations with `albumentations`. The augmentation transformations include: + +* Random horizontal flipping +* Random cropping +* Random brightness and contrast +* Random gamma correction +* Random hue saturation + +```py +>>> import albumentations as A + +>>> crop_size = (448, 576) +>>> transforms = [ +... A.HorizontalFlip(p=0.5), +... A.RandomCrop(crop_size[0], crop_size[1]), +... A.RandomBrightnessContrast(), +... A.RandomGamma(), +... A.HueSaturationValue() +... ] +``` + +Additionally, define a mapping to better reflect the target key name. + +```py +>>> additional_targets = {"depth": "mask"} +>>> aug = A.Compose(transforms=transforms, additional_targets=additional_targets) +``` + +With `additional_targets` defined, you can pass the target depth maps to the `depth` argument of `aug` instead of `mask`. You'll notice this change +in the `apply_transforms()` function defined below. + +Create a function to apply the transformation to the images as well as their depth maps: + +```py +>>> def apply_transforms(examples): +... transformed_images, transformed_maps = [], [] +... for image, depth_map in zip(examples["image"], examples["depth_map"]): +... image, depth_map = np.array(image), np.array(depth_map) +... transformed = aug(image=image, depth=depth_map) +... transformed_images.append(transformed["image"]) +... transformed_maps.append(transformed["depth"]) +... +... examples["pixel_values"] = transformed_images +... examples["labels"] = transformed_maps +... return examples +``` + +Use the [`~Dataset.set_transform`] function to apply the transformation on-the-fly to batches of the dataset to consume less disk space: + +```py +>>> train_dataset.set_transform(apply_transforms) +``` + +You can verify the transformation worked by indexing into the `pixel_values` and `labels` of an example image: + +```py +>>> example = train_dataset[index] + +>>> plt.imshow(example["pixel_values"]) +>>> plt.axis("off") +>>> plt.show() +``` + +
+ +
+ +Visualize the same transformation on the image's corresponding depth map: + +```py +>>> show_depthmap(example["labels"]) +``` + +
+ +
+ +You can also visualize multiple training samples reusing the previous `random_indices`: + +```py +>>> plt.figure(figsize=(15, 6)) + +>>> for i, idx in enumerate(random_indices): +... ax = plt.subplot(3, 3, i + 1) +... example = train_dataset[idx] +... image_viz = merge_into_row( +... example["pixel_values"], example["labels"] +... ) +... plt.imshow(image_viz.astype("uint8")) +... plt.axis("off") +``` + +
+ +
\ No newline at end of file diff --git a/testbed/huggingface__datasets/docs/source/faiss_es.mdx b/testbed/huggingface__datasets/docs/source/faiss_es.mdx new file mode 100644 index 0000000000000000000000000000000000000000..120f146b4583401fdf0caa002f973c9a9b4b0625 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/faiss_es.mdx @@ -0,0 +1,133 @@ +# Search index + +[FAISS](https://github.com/facebookresearch/faiss) and [Elasticsearch](https://www.elastic.co/elasticsearch/) enables searching for examples in a dataset. This can be useful when you want to retrieve specific examples from a dataset that are relevant to your NLP task. For example, if you are working on a Open Domain Question Answering task, you may want to only return examples that are relevant to answering your question. + +This guide will show you how to build an index for your dataset that will allow you to search it. + +## FAISS + +FAISS retrieves documents based on the similarity of their vector representations. In this example, you will generate the vector representations with the [DPR](https://huggingface.co/transformers/model_doc/dpr.html) model. + +1. Download the DPR model from 🤗 Transformers: + +```py +>>> from transformers import DPRContextEncoder, DPRContextEncoderTokenizer +>>> import torch +>>> torch.set_grad_enabled(False) +>>> ctx_encoder = DPRContextEncoder.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base") +>>> ctx_tokenizer = DPRContextEncoderTokenizer.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base") +``` + +2. Load your dataset and compute the vector representations: + +```py +>>> from datasets import load_dataset +>>> ds = load_dataset('crime_and_punish', split='train[:100]') +>>> ds_with_embeddings = ds.map(lambda example: {'embeddings': ctx_encoder(**ctx_tokenizer(example["line"], return_tensors="pt"))[0][0].numpy()}) +``` + +3. Create the index with [`Dataset.add_faiss_index`]: + +```py +>>> ds_with_embeddings.add_faiss_index(column='embeddings') +``` + +4. Now you can query your dataset with the `embeddings` index. Load the DPR Question Encoder, and search for a question with [`Dataset.get_nearest_examples`]: + +```py +>>> from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer +>>> q_encoder = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base") +>>> q_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base") + +>>> question = "Is it serious ?" +>>> question_embedding = q_encoder(**q_tokenizer(question, return_tensors="pt"))[0][0].numpy() +>>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', question_embedding, k=10) +>>> retrieved_examples["line"][0] +'_that_ serious? It is not serious at all. It’s simply a fantasy to amuse\r\n' +``` + +5. You can access the index with [`Dataset.get_index`] and use it for special operations, e.g. query it using `range_search`: + +```py +>>> faiss_index = ds_with_embeddings.get_index('embeddings').faiss_index +>>> limits, distances, indices = faiss_index.range_search(x=question_embedding.reshape(1, -1), thresh=0.95) +``` + +6. When you are done querying, save the index on disk with [`Dataset.save_faiss_index`]: + +```py +>>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss') +``` + +7. Reload it at a later time with [`Dataset.load_faiss_index`]: + +```py +>>> ds = load_dataset('crime_and_punish', split='train[:100]') +>>> ds.load_faiss_index('embeddings', 'my_index.faiss') +``` + +## Elasticsearch + +Unlike FAISS, Elasticsearch retrieves documents based on exact matches. + +Start Elasticsearch on your machine, or see the [Elasticsearch installation guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html) if you don't already have it installed. + +1. Load the dataset you want to index: + +```py +>>> from datasets import load_dataset +>>> squad = load_dataset('squad', split='validation') +``` + +2. Build the index with [`Dataset.add_elasticsearch_index`]: + +```py +>>> squad.add_elasticsearch_index("context", host="localhost", port="9200") +``` + +3. Then you can query the `context` index with [`Dataset.get_nearest_examples`]: + +```py +>>> query = "machine" +>>> scores, retrieved_examples = squad.get_nearest_examples("context", query, k=10) +>>> retrieved_examples["title"][0] +'Computational_complexity_theory' +``` + +4. If you want to reuse the index, define the `es_index_name` parameter when you build the index: + +```py +>>> from datasets import load_dataset +>>> squad = load_dataset('squad', split='validation') +>>> squad.add_elasticsearch_index("context", host="localhost", port="9200", es_index_name="hf_squad_val_context") +>>> squad.get_index("context").es_index_name +hf_squad_val_context +``` + +5. Reload it later with the index name when you call [`Dataset.load_elasticsearch_index`]: + +```py +>>> from datasets import load_dataset +>>> squad = load_dataset('squad', split='validation') +>>> squad.load_elasticsearch_index("context", host="localhost", port="9200", es_index_name="hf_squad_val_context") +>>> query = "machine" +>>> scores, retrieved_examples = squad.get_nearest_examples("context", query, k=10) +``` + +For more advanced Elasticsearch usage, you can specify your own configuration with custom settings: + +```py +>>> import elasticsearch as es +>>> import elasticsearch.helpers +>>> from elasticsearch import Elasticsearch +>>> es_client = Elasticsearch([{"host": "localhost", "port": "9200"}]) # default client +>>> es_config = { +... "settings": { +... "number_of_shards": 1, +... "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, +... }, +... "mappings": {"properties": {"text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}}}, +... } # default config +>>> es_index_name = "hf_squad_context" # name of the index in Elasticsearch +>>> squad.add_elasticsearch_index("context", es_client=es_client, es_config=es_config, es_index_name=es_index_name) +``` diff --git a/testbed/huggingface__datasets/docs/source/filesystems.mdx b/testbed/huggingface__datasets/docs/source/filesystems.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2d0e02c42e3d6fa5f3db64c3a6992825f78a27f8 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/filesystems.mdx @@ -0,0 +1,230 @@ +# Cloud storage + +🤗 Datasets supports access to cloud storage providers through a `fsspec` FileSystem implementations. +You can save and load datasets from any cloud storage in a Pythonic way. +Take a look at the following table for some example of supported cloud storage providers: + +| Storage provider | Filesystem implementation | +|----------------------|---------------------------------------------------------------| +| Amazon S3 | [s3fs](https://s3fs.readthedocs.io/en/latest/) | +| Google Cloud Storage | [gcsfs](https://gcsfs.readthedocs.io/en/latest/) | +| Azure Blob/DataLake | [adlfs](https://github.com/fsspec/adlfs) | +| Dropbox | [dropboxdrivefs](https://github.com/MarineChap/dropboxdrivefs)| +| Google Drive | [gdrivefs](https://github.com/intake/gdrivefs) | +| Oracle Cloud Storage | [ocifs](https://ocifs.readthedocs.io/en/latest/) | + +This guide will show you how to save and load datasets with any cloud storage. +Here are examples for S3, Google Cloud Storage, Azure Blob Storage, and Oracle Cloud Object Storage. + +## Set up your cloud storage FileSystem + +### Amazon S3 + +1. Install the S3 FileSystem implementation: + +``` +>>> pip install s3fs +``` + +2. Define your credentials + +To use an anonymous connection, use `anon=True`. +Otherwise, include your `aws_access_key_id` and `aws_secret_access_key` whenever you are interacting with a private S3 bucket. + +```py +>>> storage_options = {"anon": True} # for anonymous connection +# or use your credentials +>>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key} # for private buckets +# or use a botocore session +>>> import aiobotocore.session +>>> s3_session = aiobotocore.session.AioSession(profile="my_profile_name") +>>> storage_options = {"session": s3_session} +``` + +3. Create your FileSystem instance + +```py +>>> import s3fs +>>> fs = s3fs.S3FileSystem(**storage_options) +``` + +### Google Cloud Storage + +1. Install the Google Cloud Storage implementation: + +``` +>>> conda install -c conda-forge gcsfs +# or install with pip +>>> pip install gcsfs +``` + +2. Define your credentials + +```py +>>> storage_options={"token": "anon"} # for anonymous connection +# or use your credentials of your default gcloud credentials or from the google metadata service +>>> storage_options={"project": "my-google-project"} +# or use your credentials from elsewhere, see the documentation at https://gcsfs.readthedocs.io/ +>>> storage_options={"project": "my-google-project", "token": TOKEN} +``` + +3. Create your FileSystem instance + +```py +>>> import gcsfs +>>> fs = gcsfs.GCSFileSystem(**storage_options) +``` + +### Azure Blob Storage + +1. Install the Azure Blob Storage implementation: + +``` +>>> conda install -c conda-forge adlfs +# or install with pip +>>> pip install adlfs +``` + +2. Define your credentials + +```py +>>> storage_options = {"anon": True} # for anonymous connection +# or use your credentials +>>> storage_options = {"account_name": ACCOUNT_NAME, "account_key": ACCOUNT_KEY} # gen 2 filesystem +# or use your credentials with the gen 1 filesystem +>>> storage_options={"tenant_id": TENANT_ID, "client_id": CLIENT_ID, "client_secret": CLIENT_SECRET} +``` + +3. Create your FileSystem instance + +```py +>>> import adlfs +>>> fs = adlfs.AzureBlobFileSystem(**storage_options) +``` + +### Oracle Cloud Object Storage + +1. Install the OCI FileSystem implementation: + +``` +>>> pip install ocifs +``` + +2. Define your credentials + +```py +>>> storage_options = {"config": "~/.oci/config", "region": "us-ashburn-1"} +``` + +3. Create your FileSystem instance + +```py +>>> import ocifs +>>> fs = ocifs.OCIFileSystem(**storage_options) +``` + +## Load and Save your datasets using your cloud storage FileSystem + +### Download and prepare a dataset into a cloud storage + +You can download and prepare a dataset into your cloud storage by specifying a remote `output_dir` in `download_and_prepare`. +Don't forget to use the previously defined `storage_options` containing your credentials to write into a private cloud storage. + +The `download_and_prepare` method works in two steps: +1. it first downloads the raw data files (if any) in your local cache. You can set your cache directory by passing `cache_dir` to [`load_dataset_builder`] +2. then it generates the dataset in Arrow or Parquet format in your cloud storage by iterating over the raw data files. + +Load a dataset builder from the Hugging Face Hub (see [how to load from the Hugging Face Hub](./loading#hugging-face-hub)): + +```py +>>> output_dir = "s3://my-bucket/imdb" +>>> builder = load_dataset_builder("imdb") +>>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet") +``` + +Load a dataset builder using a loading script (see [how to load a local loading script](./loading#local-loading-script)): + +```py +>>> output_dir = "s3://my-bucket/imdb" +>>> builder = load_dataset_builder("path/to/local/loading_script/loading_script.py") +>>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet") +``` + +Use your own data files (see [how to load local and remote files](./loading#local-and-remote-files)): + +```py +>>> data_files = {"train": ["path/to/train.csv"]} +>>> output_dir = "s3://my-bucket/imdb" +>>> builder = load_dataset_builder("csv", data_files=data_files) +>>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet") +``` + +It is highly recommended to save the files as compressed Parquet files to optimize I/O by specifying `file_format="parquet"`. +Otherwise the dataset is saved as an uncompressed Arrow file. + +You can also specify the size of the shards using `max_shard_size` (default is 500MB): + +```py +>>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet", max_shard_size="1GB") +``` + +#### Dask + +Dask is a parallel computing library and it has a pandas-like API for working with larger than memory Parquet datasets in parallel. +Dask can use multiple threads or processes on a single machine, or a cluster of machines to process data in parallel. +Dask supports local data but also data from a cloud storage. + +Therefore you can load a dataset saved as sharded Parquet files in Dask with + +```py +import dask.dataframe as dd + +df = dd.read_parquet(output_dir, storage_options=storage_options) + +# or if your dataset is split into train/valid/test +df_train = dd.read_parquet(output_dir + f"/{builder.name}-train-*.parquet", storage_options=storage_options) +df_valid = dd.read_parquet(output_dir + f"/{builder.name}-validation-*.parquet", storage_options=storage_options) +df_test = dd.read_parquet(output_dir + f"/{builder.name}-test-*.parquet", storage_options=storage_options) +``` + +You can find more about dask dataframes in their [documentation](https://docs.dask.org/en/stable/dataframe.html). + +## Saving serialized datasets + +After you have processed your dataset, you can save it to your cloud storage with [`Dataset.save_to_disk`]: + +```py +# saves encoded_dataset to amazon s3 +>>> encoded_dataset.save_to_disk("s3://my-private-datasets/imdb/train", storage_options=storage_options) +# saves encoded_dataset to google cloud storage +>>> encoded_dataset.save_to_disk("gcs://my-private-datasets/imdb/train", storage_options=storage_options) +# saves encoded_dataset to microsoft azure blob/datalake +>>> encoded_dataset.save_to_disk("adl://my-private-datasets/imdb/train", storage_options=storage_options) +``` + + + +Remember to define your credentials in your [FileSystem instance](#set-up-your-cloud-storage-filesystem) `fs` whenever you are interacting with a private cloud storage. + + + +## Listing serialized datasets + +List files from a cloud storage with your FileSystem instance `fs`, using `fs.ls`: + +```py +>>> fs.ls("my-private-datasets/imdb/train", detail=False) +["dataset_info.json.json","dataset.arrow","state.json"] +``` + +### Load serialized datasets + +When you are ready to use your dataset again, reload it with [`Dataset.load_from_disk`]: + +```py +>>> from datasets import load_from_disk +# load encoded_dataset from cloud storage +>>> dataset = load_from_disk("s3://a-public-datasets/imdb/train", storage_options=storage_options) +>>> print(len(dataset)) +25000 +``` diff --git a/testbed/huggingface__datasets/docs/source/how_to.md b/testbed/huggingface__datasets/docs/source/how_to.md new file mode 100644 index 0000000000000000000000000000000000000000..7e6cf8f719e8f239f89d270faaf5e94499f56c93 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/how_to.md @@ -0,0 +1,22 @@ +# Overview + +The how-to guides offer a more comprehensive overview of all the tools 🤗 Datasets offers and how to use them. This will help you tackle messier real-world datasets where you may need to manipulate the dataset structure or content to get it ready for training. + +The guides assume you are familiar and comfortable with the 🤗 Datasets basics. We recommend newer users check out our [tutorials](tutorial) first. + + + +Interested in learning more? Take a look at [Chapter 5](https://huggingface.co/course/chapter5/1?fw=pt) of the Hugging Face course! + + + +The guides are organized into six sections: + +- General usage: Functions for general dataset loading and processing. The functions shown in this section are applicable across all dataset modalities. +- Audio: How to load, process, and share audio datasets. +- Vision: How to load, process, and share image datasets. +- Text: How to load, process, and share text datasets. +- Tabular: How to load, process, and share tabular datasets. +- Dataset repository: How to share and upload a dataset to the Hub. + +If you have any questions about 🤗 Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10). diff --git a/testbed/huggingface__datasets/docs/source/how_to_metrics.mdx b/testbed/huggingface__datasets/docs/source/how_to_metrics.mdx new file mode 100644 index 0000000000000000000000000000000000000000..157214e5559b2c12b9c6213b178d75cb7d287af4 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/how_to_metrics.mdx @@ -0,0 +1,232 @@ +# Metrics + + + +Metrics is deprecated in 🤗 Datasets. To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets. + + + +Metrics are important for evaluating a model's predictions. In the tutorial, you learned how to compute a metric over an entire evaluation set. You have also seen how to load a metric. + +This guide will show you how to: + +- Add predictions and references. +- Compute metrics using different methods. +- Write your own metric loading script. + +## Add predictions and references + +When you want to add model predictions and references to a [`Metric`] instance, you have two options: + +- [`Metric.add`] adds a single `prediction` and `reference`. + +- [`Metric.add_batch`] adds a batch of `predictions` and `references`. + +Use [`Metric.add_batch`] by passing it your model predictions, and the references the model predictions should be evaluated against: + +```py +>>> import datasets +>>> metric = datasets.load_metric('my_metric') +>>> for model_input, gold_references in evaluation_dataset: +... model_predictions = model(model_inputs) +... metric.add_batch(predictions=model_predictions, references=gold_references) +>>> final_score = metric.compute() +``` + + + +Metrics accepts various input formats (Python lists, NumPy arrays, PyTorch tensors, etc.) and converts them to an appropriate format for storage and computation. + + + +## Compute scores + +The most straightforward way to calculate a metric is to call [`Metric.compute`]. But some metrics have additional arguments that allow you to modify the metrics behavior. + +Let's load the [SacreBLEU](https://huggingface.co/metrics/sacrebleu) metric, and compute it with a different smoothing method. + +1. Load the SacreBLEU metric: + +```py +>>> import datasets +>>> metric = datasets.load_metric('sacrebleu') +``` + +2. Inspect the different argument methods for computing the metric: + +```py +>>> print(metric.inputs_description) +Produces BLEU scores along with its sufficient statistics +from a source against one or more references. + +Args: + predictions: The system stream (a sequence of segments). + references: A list of one or more reference streams (each a sequence of segments). + smooth_method: The smoothing method to use. (Default: 'exp'). + smooth_value: The smoothing value. Only valid for 'floor' and 'add-k'. (Defaults: floor: 0.1, add-k: 1). + tokenize: Tokenization method to use for BLEU. If not provided, defaults to 'zh' for Chinese, 'ja-mecab' for Japanese and '13a' (mteval) otherwise. + lowercase: Lowercase the data. If True, enables case-insensitivity. (Default: False). + force: Insist that your tokenized input is actually detokenized. +... +``` + +3. Compute the metric with the `floor` method, and a different `smooth_value`: + +```py +>>> score = metric.compute(smooth_method="floor", smooth_value=0.2) +``` + + + +## Custom metric loading script + +Write a metric loading script to use your own custom metric (or one that is not on the Hub). Then you can load it as usual with [`load_metric`]. + +To help you get started, open the [SQuAD metric loading script](https://github.com/huggingface/datasets/blob/main/metrics/squad/squad.py) and follow along. + + + +Get jump started with our metric loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_metric_script.py)! + + + +### Add metric attributes + +Start by adding some information about your metric in [`Metric._info`]. The most important attributes you should specify are: + +1. [`MetricInfo.description`] provides a brief description about your metric. + +2. [`MetricInfo.citation`] contains a BibTex citation for the metric. + +3. [`MetricInfo.inputs_description`] describes the expected inputs and outputs. It may also provide an example usage of the metric. + +4. [`MetricInfo.features`] defines the name and type of the predictions and references. + +After you've filled out all these fields in the template, it should look like the following example from the SQuAD metric script: + +```py +class Squad(datasets.Metric): + def _info(self): + return datasets.MetricInfo( + description=_DESCRIPTION, + citation=_CITATION, + inputs_description=_KWARGS_DESCRIPTION, + features=datasets.Features( + { + "predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")}, + "references": { + "id": datasets.Value("string"), + "answers": datasets.features.Sequence( + { + "text": datasets.Value("string"), + "answer_start": datasets.Value("int32"), + } + ), + }, + } + ), + codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], + reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], + ) +``` + +### Download metric files + +If your metric needs to download, or retrieve local files, you will need to use the [`Metric._download_and_prepare`] method. For this example, let's examine the [BLEURT metric loading script](https://github.com/huggingface/datasets/blob/main/metrics/bleurt/bleurt.py). + +1. Provide a dictionary of URLs that point to the metric files: + +```py +CHECKPOINT_URLS = { + "bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip", + "bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip", + "bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip", + "bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip", + "bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip", + "bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip", +} +``` + + + +If the files are stored locally, provide a dictionary of path(s) instead of URLs. + + + +2. [`Metric._download_and_prepare`] will take the URLs and download the metric files specified: + +```py +def _download_and_prepare(self, dl_manager): + + # check that config name specifies a valid BLEURT model + if self.config_name == "default": + logger.warning( + "Using default BLEURT-Base checkpoint for sequence maximum length 128. " + "You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." + ) + self.config_name = "bleurt-base-128" + if self.config_name not in CHECKPOINT_URLS.keys(): + raise KeyError( + f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" + ) + + # download the model checkpoint specified by self.config_name and set up the scorer + model_path = dl_manager.download_and_extract(CHECKPOINT_URLS[self.config_name]) + self.scorer = score.BleurtScorer(os.path.join(model_path, self.config_name)) +``` + +### Compute score + +[`DatasetBuilder._compute`] provides the actual instructions for how to compute a metric given the predictions and references. Now let's take a look at the [GLUE metric loading script](https://github.com/huggingface/datasets/blob/main/metrics/glue/glue.py). + +1. Provide the functions for [`DatasetBuilder._compute`] to calculate your metric: + +```py +def simple_accuracy(preds, labels): + return (preds == labels).mean().item() + +def acc_and_f1(preds, labels): + acc = simple_accuracy(preds, labels) + f1 = f1_score(y_true=labels, y_pred=preds).item() + return { + "accuracy": acc, + "f1": f1, + } + +def pearson_and_spearman(preds, labels): + pearson_corr = pearsonr(preds, labels)[0].item() + spearman_corr = spearmanr(preds, labels)[0].item() + return { + "pearson": pearson_corr, + "spearmanr": spearman_corr, + } +``` + +2. Create [`DatasetBuilder._compute`] with instructions for what metric to calculate for each configuration: + +```py +def _compute(self, predictions, references): + if self.config_name == "cola": + return {"matthews_correlation": matthews_corrcoef(references, predictions)} + elif self.config_name == "stsb": + return pearson_and_spearman(predictions, references) + elif self.config_name in ["mrpc", "qqp"]: + return acc_and_f1(predictions, references) + elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: + return {"accuracy": simple_accuracy(predictions, references)} + else: + raise KeyError( + "You should supply a configuration name selected in " + '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' + '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' + ) +``` + +### Test + +Once you're finished writing your metric loading script, try to load it locally: + +```py +>>> from datasets import load_metric +>>> metric = load_metric('PATH/TO/MY/SCRIPT.py') +``` diff --git a/testbed/huggingface__datasets/docs/source/image_classification.mdx b/testbed/huggingface__datasets/docs/source/image_classification.mdx new file mode 100644 index 0000000000000000000000000000000000000000..e1b3e059266c43b311456ddb860649d21c2b83b5 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/image_classification.mdx @@ -0,0 +1,89 @@ +# Image classification + +Image classification datasets are used to train a model to classify an entire image. There are a wide variety of applications enabled by these datasets such as identifying endangered wildlife species or screening for disease in medical images. This guide will show you how to apply transformations to an image classification dataset. + +Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed: + +```bash +pip install -U albumentations opencv-python +``` + +This guide uses the [Beans](https://huggingface.co/datasets/beans) dataset for identifying the type of bean plant disease based on an image of its leaf. + +Load the dataset and take a look at an example: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("beans") +>>> dataset["train"][10] +{'image': , + 'image_file_path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/angular_leaf_spot/angular_leaf_spot_train.204.jpg', + 'labels': 0} +``` + +The dataset has three fields: + +* `image`: a PIL image object. +* `image_file_path`: the path to the image file. +* `labels`: the label or category of the image. + +Next, check out an image: + +
+ +
+ +Now apply some augmentations with `albumentations`. You'll randomly crop the image, flip it horizontally, and adjust its brightness. + +```py +>>> import cv2 +>>> import albumentations +>>> import numpy as np + +>>> transform = albumentations.Compose([ +... albumentations.RandomCrop(width=256, height=256), +... albumentations.HorizontalFlip(p=0.5), +... albumentations.RandomBrightnessContrast(p=0.2), +... ]) +``` + +Create a function to apply the transformation to the images: + +```py +>>> def transforms(examples): +... examples["pixel_values"] = [ +... transform(image=np.array(image))["image"] for image in examples["image"] +... ] +... +... return examples +``` + +Use the [`~Dataset.set_transform`] function to apply the transformation on-the-fly to batches of the dataset to consume less disk space: + +```py +>>> dataset.set_transform(transforms) +``` + +You can verify the transformation worked by indexing into the `pixel_values` of the first example: + +```py +>>> import numpy as np +>>> import matplotlib.pyplot as plt + +>>> img = dataset["train"][0]["pixel_values"] +>>> plt.imshow(img) +``` + +
+ + +
+ + + +Now that you know how to process a dataset for image classification, learn +[how to train an image classification model](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb) +and use it for inference. + + \ No newline at end of file diff --git a/testbed/huggingface__datasets/docs/source/image_dataset.mdx b/testbed/huggingface__datasets/docs/source/image_dataset.mdx new file mode 100644 index 0000000000000000000000000000000000000000..030406f6a465322622131e0b1f142ee9f88b8c65 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/image_dataset.mdx @@ -0,0 +1,403 @@ +# Create an image dataset + +There are two methods for creating and sharing an image dataset. This guide will show you how to: + +* Create an image dataset with `ImageFolder` and some metadata. This is a no-code solution for quickly creating an image dataset with several thousand images. +* Create an image dataset by writing a loading script. This method is a bit more involved, but you have greater flexibility over how a dataset is defined, downloaded, and generated which can be useful for more complex or large scale image datasets. + + + +You can control access to your dataset by requiring users to share their contact information first. Check out the [Gated datasets](https://huggingface.co/docs/hub/datasets-gated) guide for more information about how to enable this feature on the Hub. + + + +## ImageFolder + +The `ImageFolder` is a dataset builder designed to quickly load an image dataset with several thousand images without requiring you to write any code. + + + +💡 Take a look at the [Split pattern hierarchy](repository_structure#split-pattern-hierarchy) to learn more about how `ImageFolder` creates dataset splits based on your dataset repository structure. + + + +`ImageFolder` automatically infers the class labels of your dataset based on the directory name. Store your dataset in a directory structure like: + +``` +folder/train/dog/golden_retriever.png +folder/train/dog/german_shepherd.png +folder/train/dog/chihuahua.png + +folder/train/cat/maine_coon.png +folder/train/cat/bengal.png +folder/train/cat/birman.png +``` + +Then users can load your dataset by specifying `imagefolder` in [`load_dataset`] and the directory in `data_dir`: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder") +``` + +You can also use `imagefolder` to load datasets involving multiple splits. To do so, your dataset directory should have the following structure: + +``` +folder/train/dog/golden_retriever.png +folder/train/cat/maine_coon.png +folder/test/dog/german_shepherd.png +folder/test/cat/bengal.png +``` + + + +If all image files are contained in a single directory or if they are not on the same level of directory structure, `label` column won't be added automatically. If you need it, set `drop_labels=False` explicitly. + + + + +If there is additional information you'd like to include about your dataset, like text captions or bounding boxes, add it as a `metadata.csv` file in your folder. This lets you quickly create datasets for different computer vision tasks like text captioning or object detection. You can also use a JSONL file `metadata.jsonl`. + +``` +folder/train/metadata.csv +folder/train/0001.png +folder/train/0002.png +folder/train/0003.png +``` + +You can also zip your images: + +``` +folder/metadata.csv +folder/train.zip +folder/test.zip +folder/valid.zip +``` + +Your `metadata.csv` file must have a `file_name` column which links image files with their metadata: + +```csv +file_name,additional_feature +0001.png,This is a first value of a text feature you added to your images +0002.png,This is a second value of a text feature you added to your images +0003.png,This is a third value of a text feature you added to your images +``` + +or using `metadata.jsonl`: + +```jsonl +{"file_name": "0001.png", "additional_feature": "This is a first value of a text feature you added to your images"} +{"file_name": "0002.png", "additional_feature": "This is a second value of a text feature you added to your images"} +{"file_name": "0003.png", "additional_feature": "This is a third value of a text feature you added to your images"} +``` + + + +If metadata files are present, the inferred labels based on the directory name are dropped by default. To include those labels, set `drop_labels=False` in `load_dataset`. + + + +### Image captioning + +Image captioning datasets have text describing an image. An example `metadata.csv` may look like: + +```csv +file_name,text +0001.png,This is a golden retriever playing with a ball +0002.png,A german shepherd +0003.png,One chihuahua +``` + +Load the dataset with `ImageFolder`, and it will create a `text` column for the image captions: + +```py +>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train") +>>> dataset[0]["text"] +"This is a golden retriever playing with a ball" +``` + +### Object detection + +Object detection datasets have bounding boxes and categories identifying objects in an image. An example `metadata.jsonl` may look like: + +```jsonl +{"file_name": "0001.png", "objects": {"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]}} +{"file_name": "0002.png", "objects": {"bbox": [[810.0, 100.0, 57.0, 28.0]], "categories": [1]}} +{"file_name": "0003.png", "objects": {"bbox": [[160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0]], "categories": [2, 2]}} +``` + +Load the dataset with `ImageFolder`, and it will create a `objects` column with the bounding boxes and the categories: + +```py +>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train") +>>> dataset[0]["objects"] +{"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]} +``` + +### Upload dataset to the Hub + +Once you've created a dataset, you can share it to the Hub with the [`~datasets.DatasetDict.push_to_hub`] method. Make sure you have the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library installed and you're logged in to your Hugging Face account (see the [Upload with Python tutorial](upload_dataset#upload-with-python) for more details). + +Upload your dataset with [`~datasets.DatasetDict.push_to_hub`]: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train") +>>> dataset.push_to_hub("stevhliu/my-image-captioning-dataset") +``` + +## Loading script + +Write a dataset loading script to share a dataset. It defines a dataset's splits and configurations, and handles downloading and generating a dataset. The script is located in the same folder or repository as the dataset and should have the same name. + +``` +my_dataset/ +├── README.md +├── my_dataset.py +└── data/ # optional, may contain your images or TAR archives +``` + +This structure allows your dataset to be loaded in one line: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("path/to/my_dataset") +``` + +This guide will show you how to create a dataset loading script for image datasets, which is a bit different from creating a loading script for text datasets. You'll learn how to: + +* Create a dataset builder class. +* Create dataset configurations. +* Add dataset metadata. +* Download and define the dataset splits. +* Generate the dataset. +* Generate the dataset metadata (optional). +* Upload the dataset to the Hub. + +The best way to learn is to open up an existing image dataset loading script, like [Food-101](https://huggingface.co/datasets/food101/blob/main/food101.py), and follow along! + + + +To help you get started, we created a loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py) you can copy and use as a starting point! + + + +### Create a dataset builder class + +[`GeneratorBasedBuilder`] is the base class for datasets generated from a dictionary generator. Within this class, there are three methods to help create your dataset: + +* `info` stores information about your dataset like its description, license, and features. +* `split_generators` downloads the dataset and defines its splits. +* `generate_examples` generates the images and labels for each split. + +Start by creating your dataset class as a subclass of [`GeneratorBasedBuilder`] and add the three methods. Don't worry about filling in each of these methods yet, you'll develop those over the next few sections: + +```py +class Food101(datasets.GeneratorBasedBuilder): + """Food-101 Images dataset""" + + def _info(self): + + def _split_generators(self, dl_manager): + + def _generate_examples(self, images, metadata_path): +``` + +#### Multiple configurations + +In some cases, a dataset may have more than one configuration. For example, if you check out the [Imagenette dataset](https://huggingface.co/datasets/frgfm/imagenette), you'll notice there are three subsets. + +To create different configurations, use the [`BuilderConfig`] class to create a subclass for your dataset. Provide the links to download the images and labels in `data_url` and `metadata_urls`: + +```py +class Food101Config(datasets.BuilderConfig): + """Builder Config for Food-101""" + + def __init__(self, data_url, metadata_urls, **kwargs): + """BuilderConfig for Food-101. + Args: + data_url: `string`, url to download the zip file from. + metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs + **kwargs: keyword arguments forwarded to super. + """ + super(Food101Config, self).__init__(version=datasets.Version("1.0.0"), **kwargs) + self.data_url = data_url + self.metadata_urls = metadata_urls +``` + +Now you can define your subsets at the top of [`GeneratorBasedBuilder`]. Imagine you want to create two subsets in the Food-101 dataset based on whether it is a breakfast or dinner food. + +1. Define your subsets with `Food101Config` in a list in `BUILDER_CONFIGS`. +2. For each configuration, provide a name, description, and where to download the images and labels from. + +```py +class Food101(datasets.GeneratorBasedBuilder): + """Food-101 Images dataset""" + + BUILDER_CONFIGS = [ + Food101Config( + name="breakfast", + description="Food types commonly eaten during breakfast.", + data_url="https://link-to-breakfast-foods.zip", + metadata_urls={ + "train": "https://link-to-breakfast-foods-train.txt", + "validation": "https://link-to-breakfast-foods-validation.txt" + }, + , + Food101Config( + name="dinner", + description="Food types commonly eaten during dinner.", + data_url="https://link-to-dinner-foods.zip", + metadata_urls={ + "train": "https://link-to-dinner-foods-train.txt", + "validation": "https://link-to-dinner-foods-validation.txt" + }, + )... + ] +``` + +Now if users want to load the `breakfast` configuration, they can use the configuration name: + +```py +>>> from datasets import load_dataset +>>> ds = load_dataset("food101", "breakfast", split="train") +``` + +### Add dataset metadata + +Adding information about your dataset is useful for users to learn more about it. This information is stored in the [`DatasetInfo`] class which is returned by the `info` method. Users can access this information by: + +```py +>>> from datasets import load_dataset_builder +>>> ds_builder = load_dataset_builder("food101") +>>> ds_builder.info +``` + +There is a lot of information you can specify about your dataset, but some important ones to include are: + +1. `description` provides a concise description of the dataset. +2. `features` specify the dataset column types. Since you're creating an image loading script, you'll need to include the [`Image`] feature. +3. `supervised_keys` specify the input feature and label. +4. `homepage` provides a link to the dataset homepage. +5. `citation` is a BibTeX citation of the dataset. +6. `license` states the dataset's license. + + + +You'll notice a lot of the dataset information is defined earlier in the loading script which makes it easier to read. There are also other [`~Datasets.Features`] you can input, so be sure to check out the full list for more details. + + + +```py +def _info(self): + return datasets.DatasetInfo( + description=_DESCRIPTION, + features=datasets.Features( + { + "image": datasets.Image(), + "label": datasets.ClassLabel(names=_NAMES), + } + ), + supervised_keys=("image", "label"), + homepage=_HOMEPAGE, + citation=_CITATION, + license=_LICENSE, + task_templates=[ImageClassification(image_column="image", label_column="label")], + ) +``` + +### Download and define the dataset splits + +Now that you've added some information about your dataset, the next step is to download the dataset and generate the splits. + +1. Use the [`DownloadManager.download`] method to download the dataset and any other metadata you'd like to associate with it. This method accepts: + + * a name to a file inside a Hub dataset repository (in other words, the `data/` folder) + * a URL to a file hosted somewhere else + * a list or dictionary of file names or URLs + + In the Food-101 loading script, you'll notice again the URLs are defined earlier in the script. + +2. After you've downloaded the dataset, use the [`SplitGenerator`] to organize the images and labels in each split. Name each split with a standard name like: `Split.TRAIN`, `Split.TEST`, and `SPLIT.Validation`. + + In the `gen_kwargs` parameter, specify the file paths to the `images` to iterate over and load. If necessary, you can use [`DownloadManager.iter_archive`] to iterate over images in TAR archives. You can also specify the associated labels in the `metadata_path`. The `images` and `metadata_path` are actually passed onto the next step where you'll actually generate the dataset. + + + +To stream a TAR archive file, you need to use [`DownloadManager.iter_archive`]! The [`DownloadManager.download_and_extract`] function does not support TAR archives in streaming mode. + + + +```py +def _split_generators(self, dl_manager): + archive_path = dl_manager.download(_BASE_URL) + split_metadata_paths = dl_manager.download(_METADATA_URLS) + return [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={ + "images": dl_manager.iter_archive(archive_path), + "metadata_path": split_metadata_paths["train"], + }, + ), + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, + gen_kwargs={ + "images": dl_manager.iter_archive(archive_path), + "metadata_path": split_metadata_paths["test"], + }, + ), + ] +``` + +### Generate the dataset + +The last method in the [`GeneratorBasedBuilder`] class actually generates the images and labels in the dataset. It yields a dataset according to the stucture specified in `features` from the `info` method. As you can see, `generate_examples` accepts the `images` and `metadata_path` from the previous method as arguments. + + + +To stream a TAR archive file, the `metadata_path` needs to be opened and read first. TAR files are accessed and yielded sequentially. This means you need to have the metadata information in hand first so you can yield it with its corresponding image. + + + +Now you can write a function for opening and loading examples from the dataset: + +```py +def _generate_examples(self, images, metadata_path): + """Generate images and labels for splits.""" + with open(metadata_path, encoding="utf-8") as f: + files_to_keep = set(f.read().split("\n")) + for file_path, file_obj in images: + if file_path.startswith(_IMAGES_DIR): + if file_path[len(_IMAGES_DIR) : -len(".jpg")] in files_to_keep: + label = file_path.split("/")[2] + yield file_path, { + "image": {"path": file_path, "bytes": file_obj.read()}, + "label": label, + } +``` + +### Generate the dataset metadata (optional) + +The dataset metadata can be generated and stored in the dataset card (`README.md` file). + +Run the following command to generate your dataset metadata in `README.md` and make sure your new loading script works correctly: + +```bash +datasets-cli test path/to/ --save_info --all_configs +``` + +If your loading script passed the test, you should now have the `dataset_info` YAML fields in the header of the `README.md` file in your dataset folder. + +### Upload the dataset to the Hub + +Once your script is ready, [create a dataset card](./dataset_card) and [upload it to the Hub](./share). + +Congratulations, you can now load your dataset from the Hub! 🥳 + +```py +>>> from datasets import load_dataset +>>> load_dataset("/my_dataset") +``` diff --git a/testbed/huggingface__datasets/docs/source/image_load.mdx b/testbed/huggingface__datasets/docs/source/image_load.mdx new file mode 100644 index 0000000000000000000000000000000000000000..2f09f2277841e8f3851a8f9377962100b0c3d494 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/image_load.mdx @@ -0,0 +1,95 @@ +# Load image data + +Image datasets are loaded from the `image` column, which contains a PIL object. + + + +To work with image datasets, you need to have the `vision` dependency installed. Check out the [installation](./installation#vision) guide to learn how to install it. + + + +When you load an image dataset and call the `image` column, the [`Image`] feature automatically decodes the PIL object into an image: + +```py +>>> from datasets import load_dataset, Image + +>>> dataset = load_dataset("beans", split="train") +>>> dataset[0]["image"] +``` + + + +Index into an image dataset using the row index first and then the `image` column - `dataset[0]["image"]` - to avoid decoding and resampling all the image objects in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset. + + + +For a guide on how to load any type of dataset, take a look at the general loading guide. + +## Local files + +You can load a dataset from the image path. Use the [`~Dataset.cast_column`] function to accept a column of image file paths, and decode it into a PIL image with the [`Image`] feature: +```py +>>> from datasets import Dataset, Image + +>>> dataset = Dataset.from_dict({"image": ["path/to/image_1", "path/to/image_2", ..., "path/to/image_n"]}).cast_column("image", Image()) +>>> dataset[0]["image"] +] +``` + +If you only want to load the underlying path to the image dataset without decoding the image object, set `decode=False` in the [`Image`] feature: + +```py +>>> dataset = load_dataset("beans", split="train").cast_column("image", Image(decode=False)) +>>> dataset[0]["image"] +{'bytes': None, + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/bean_rust/bean_rust_train.29.jpg'} +``` + +## ImageFolder + +You can also load a dataset with an `ImageFolder` dataset builder which does not require writing a custom dataloader. This makes `ImageFolder` ideal for quickly creating and loading image datasets with several thousand images for different vision tasks. Your image dataset structure should look like this: + +``` +folder/train/dog/golden_retriever.png +folder/train/dog/german_shepherd.png +folder/train/dog/chihuahua.png + +folder/train/cat/maine_coon.png +folder/train/cat/bengal.png +folder/train/cat/birman.png +``` + +Load your dataset by specifying `imagefolder` and the directory of your dataset in `data_dir`: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder") +>>> dataset["train"][0] +{"image": , "label": 0} + +>>> dataset["train"][-1] +{"image": , "label": 1} +``` + +Load remote datasets from their URLs with the `data_files` parameter: + +```py +>>> dataset = load_dataset("imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip", split="train") +``` + +Some datasets have a metadata file (`metadata.csv`/`metadata.jsonl`) associated with it, containing other information about the data like bounding boxes, text captions, and labels. The metadata is automatically loaded when you call [`load_dataset`] and specify `imagefolder`. + +To ignore the information in the metadata file, set `drop_labels=False` in [`load_dataset`], and allow `ImageFolder` to automatically infer the label name from the directory name: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", drop_labels=False) +``` + + + +For more information about creating your own `ImageFolder` dataset, take a look at the [Create an image dataset](./image_dataset) guide. + + diff --git a/testbed/huggingface__datasets/docs/source/image_process.mdx b/testbed/huggingface__datasets/docs/source/image_process.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d07ee37fb2fce4a7c2b561a40510757a7169361c --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/image_process.mdx @@ -0,0 +1,75 @@ +# Process image data + +This guide shows specific methods for processing image datasets. Learn how to: + +- Use [`~Dataset.map`] with image dataset. +- Apply data augmentations to a dataset with [`~Dataset.set_transform`]. + +For a guide on how to process any type of dataset, take a look at the general process guide. + +## Map + +The [`~Dataset.map`] function can apply transforms over an entire dataset. + +For example, create a basic [`Resize`](https://pytorch.org/vision/stable/generated/torchvision.transforms.Resize.html) function: + +```py +>>> def transforms(examples): +... examples["pixel_values"] = [image.convert("RGB").resize((100,100)) for image in examples["image"]] +... return examples +``` + +Now use the [`~Dataset.map`] function to resize the entire dataset, and set `batched=True` to speed up the process by accepting batches of examples. The transform returns `pixel_values` as a cacheable `PIL.Image` object: + +```py +>>> dataset = dataset.map(transforms, remove_columns=["image"], batched=True) +>>> dataset[0] +{'label': 6, + 'pixel_values': } +``` + +The cache file saves time because you don't have to execute the same transform twice. The [`~Dataset.map`] function is best for operations you only run once per training - like resizing an image - instead of using it for operations executed for each epoch, like data augmentations. + +[`~Dataset.map`] takes up some memory, but you can reduce its memory requirements with the following parameters: + +- [`batch_size`](./package_reference/main_classes#datasets.DatasetDict.map.batch_size) determines the number of examples that are processed in one call to the transform function. +- [`writer_batch_size`](./package_reference/main_classes#datasets.DatasetDict.map.writer_batch_size) determines the number of processed examples that are kept in memory before they are stored away. + +Both parameter values default to 1000, which can be expensive if you are storing images. Lower these values to use less memory when you use [`~Dataset.map`]. + +## Apply transforms + +🤗 Datasets applies data augmentations from any library or package to your dataset. Transforms can be applied on-the-fly on batches of data with [`~Dataset.set_transform`], which consumes less disk space. + + + +The following example uses [torchvision](https://pytorch.org/vision/stable/index.html), but feel free to use other data augmentation libraries like [Albumentations](https://albumentations.ai/docs/), [Kornia](https://kornia.readthedocs.io/en/latest/), and [imgaug](https://imgaug.readthedocs.io/en/latest/). + + + +For example, if you'd like to change the color properties of an image randomly: + +```py +>>> from torchvision.transforms import Compose, ColorJitter, ToTensor + +>>> jitter = Compose( +... [ +... ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.7), +... ToTensor(), +... ] +... ) +``` + +Create a function to apply the `ColorJitter` transform: + +```py +>>> def transforms(examples): +... examples["pixel_values"] = [jitter(image.convert("RGB")) for image in examples["image"]] +... return examples +``` + +Apply the transform with the [`~Dataset.set_transform`] function: + +```py +>>> dataset.set_transform(transforms) +``` \ No newline at end of file diff --git a/testbed/huggingface__datasets/docs/source/index.mdx b/testbed/huggingface__datasets/docs/source/index.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a0273e2e8e2d556820201d8c88b6187cc74f5358 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/index.mdx @@ -0,0 +1,30 @@ +# Datasets + + + +🤗 Datasets is a library for easily accessing and sharing datasets for Audio, Computer Vision, and Natural Language Processing (NLP) tasks. + +Load a dataset in a single line of code, and use our powerful data processing methods to quickly get your dataset ready for training in a deep learning model. Backed by the Apache Arrow format, process large datasets with zero-copy reads without any memory constraints for optimal speed and efficiency. We also feature a deep integration with the [Hugging Face Hub](https://huggingface.co/datasets), allowing you to easily load and share a dataset with the wider machine learning community. + +Find your dataset today on the [Hugging Face Hub](https://huggingface.co/datasets), and take an in-depth look inside of it with the live viewer. + + diff --git a/testbed/huggingface__datasets/docs/source/installation.md b/testbed/huggingface__datasets/docs/source/installation.md new file mode 100644 index 0000000000000000000000000000000000000000..06f7b1c32e34e6a67ca676a000071bf3e8c61411 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/installation.md @@ -0,0 +1,110 @@ +# Installation + +Before you start, you'll need to setup your environment and install the appropriate packages. 🤗 Datasets is tested on **Python 3.7+**. + + + +If you want to use 🤗 Datasets with TensorFlow or PyTorch, you'll need to install them separately. Refer to the [TensorFlow installation page](https://www.tensorflow.org/install/pip#tensorflow-2-packages-are-available) or the [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) for the specific install command for your framework. + + + +## Virtual environment + +You should install 🤗 Datasets in a [virtual environment](https://docs.python.org/3/library/venv.html) to keep things tidy and avoid dependency conflicts. + +1. Create and navigate to your project directory: + + ```bash + mkdir ~/my-project + cd ~/my-project + ``` + +2. Start a virtual environment inside your directory: + + ```bash + python -m venv .env + ``` + +3. Activate and deactivate the virtual environment with the following commands: + + ```bash + # Activate the virtual environment + source .env/bin/activate + + # Deactivate the virtual environment + source .env/bin/deactivate + ``` + +Once you've created your virtual environment, you can install 🤗 Datasets in it. + +## pip + +The most straightforward way to install 🤗 Datasets is with pip: + +```bash +pip install datasets +``` + +Run the following command to check if 🤗 Datasets has been properly installed: + +```bash +python -c "from datasets import load_dataset; print(load_dataset('squad', split='train')[0])" +``` + +This command downloads version 1 of the [Stanford Question Answering Dataset (SQuAD)](https://rajpurkar.github.io/SQuAD-explorer/), loads the training split, and prints the first training example. You should see: + +```python +{'answers': {'answer_start': [515], 'text': ['Saint Bernadette Soubirous']}, 'context': 'Architecturally, the school has a Catholic character. Atop the Main Building\'s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend "Venite Ad Me Omnes". Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.', 'id': '5733be284776f41900661182', 'question': 'To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?', 'title': 'University_of_Notre_Dame'} +``` + +## Audio + +To work with audio datasets, you need to install the [`Audio`] feature as an extra dependency: + +```bash +pip install datasets[audio] +``` + + + +To decode mp3 files, you need to have at least version 1.1.0 of the `libsndfile` system library. Usually, it's bundled with the python [`soundfile`](https://github.com/bastibe/python-soundfile) package, which is installed as an extra audio dependency for 🤗 Datasets. +For Linux, the required version of `libsndfile` is bundled with `soundfile` starting from version 0.12.0. You can run the following command to determine which version of `libsndfile` is being used by `soundfile`: + +```bash +python -c "import soundfile; print(soundfile.__libsndfile_version__)" +``` + + + + +## Vision + +To work with image datasets, you need to install the [`Image`] feature as an extra dependency: + +```bash +pip install datasets[vision] +``` + +## source + +Building 🤗 Datasets from source lets you make changes to the code base. To install from the source, clone the repository and install with the following commands: + +```bash +git clone https://github.com/huggingface/datasets.git +cd datasets +pip install -e . +``` + +Again, you can check if 🤗 Datasets was properly installed with the following command: + +```bash +python -c "from datasets import load_dataset; print(load_dataset('squad', split='train')[0])" +``` + +## conda + +🤗 Datasets can also be installed from conda, a package management system: + +```bash +conda install -c huggingface -c conda-forge datasets +``` diff --git a/testbed/huggingface__datasets/docs/source/load_hub.mdx b/testbed/huggingface__datasets/docs/source/load_hub.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b36662fb4da755807b3c4fbbeec284571969aa81 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/load_hub.mdx @@ -0,0 +1,101 @@ +# Load a dataset from the Hub + +Finding high-quality datasets that are reproducible and accessible can be difficult. One of 🤗 Datasets main goals is to provide a simple way to load a dataset of any format or type. The easiest way to get started is to discover an existing dataset on the [Hugging Face Hub](https://huggingface.co/datasets) - a community-driven collection of datasets for tasks in NLP, computer vision, and audio - and use 🤗 Datasets to download and generate the dataset. + +This tutorial uses the [rotten_tomatoes](https://huggingface.co/datasets/rotten_tomatoes) and [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) datasets, but feel free to load any dataset you want and follow along. Head over to the Hub now and find a dataset for your task! + +## Load a dataset + +Before you take the time to download a dataset, it's often helpful to quickly get some general information about a dataset. A dataset's information is stored inside [`DatasetInfo`] and can include information such as the dataset description, features, and dataset size. + +Use the [`load_dataset_builder`] function to load a dataset builder and inspect a dataset's attributes without committing to downloading it: + +```py +>>> from datasets import load_dataset_builder +>>> ds_builder = load_dataset_builder("rotten_tomatoes") + +# Inspect dataset description +>>> ds_builder.info.description +Movie Review Dataset. This is a dataset of containing 5,331 positive and 5,331 negative processed sentences from Rotten Tomatoes movie reviews. This data was first used in Bo Pang and Lillian Lee, ``Seeing stars: Exploiting class relationships for sentiment categorization with respect to rating scales.'', Proceedings of the ACL, 2005. + +# Inspect dataset features +>>> ds_builder.info.features +{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), + 'text': Value(dtype='string', id=None)} +``` + +If you're happy with the dataset, then load it with [`load_dataset`]: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("rotten_tomatoes", split="train") +``` + +## Splits + +A split is a specific subset of a dataset like `train` and `test`. List a dataset's split names with the [`get_dataset_split_names`] function: + +```py +>>> from datasets import get_dataset_split_names + +>>> get_dataset_split_names("rotten_tomatoes") +['train', 'validation', 'test'] +``` + +Then you can load a specific split with the `split` parameter. Loading a dataset `split` returns a [`Dataset`] object: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("rotten_tomatoes", split="train") +>>> dataset +Dataset({ + features: ['text', 'label'], + num_rows: 8530 +}) +``` + +If you don't specify a `split`, 🤗 Datasets returns a [`DatasetDict`] object instead: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("rotten_tomatoes") +DatasetDict({ + train: Dataset({ + features: ['text', 'label'], + num_rows: 8530 + }) + validation: Dataset({ + features: ['text', 'label'], + num_rows: 1066 + }) + test: Dataset({ + features: ['text', 'label'], + num_rows: 1066 + }) +}) +``` + +## Configurations + +Some datasets contain several sub-datasets. For example, the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset has several sub-datasets, each one containing audio data in a different language. These sub-datasets are known as *configurations*, and you must explicitly select one when loading the dataset. If you don't provide a configuration name, 🤗 Datasets will raise a `ValueError` and remind you to choose a configuration. + +Use the [`get_dataset_config_names`] function to retrieve a list of all the possible configurations available to your dataset: + +```py +>>> from datasets import get_dataset_config_names + +>>> configs = get_dataset_config_names("PolyAI/minds14") +>>> print(configs) +['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN', 'all'] +``` + +Then load the configuration you want: + +```py +>>> from datasets import load_dataset + +>>> mindsFR = load_dataset("PolyAI/minds14", "fr-FR", split="train") +``` diff --git a/testbed/huggingface__datasets/docs/source/loading.mdx b/testbed/huggingface__datasets/docs/source/loading.mdx new file mode 100644 index 0000000000000000000000000000000000000000..7ca48559dc67f92f02e0cf273e3d028e26c1254d --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/loading.mdx @@ -0,0 +1,510 @@ +# Load + +Your data can be stored in various places; they can be on your local machine's disk, in a Github repository, and in in-memory data structures like Python dictionaries and Pandas DataFrames. Wherever a dataset is stored, 🤗 Datasets can help you load it. + +This guide will show you how to load a dataset from: + +- The Hub without a dataset loading script +- Local loading script +- Local files +- In-memory data +- Offline +- A specific slice of a split + +For more details specific to loading other dataset modalities, take a look at the load audio dataset guide, the load image dataset guide, or the load text dataset guide. + + + +## Hugging Face Hub + +Datasets are loaded from a dataset loading script that downloads and generates the dataset. However, you can also load a dataset from any dataset repository on the Hub without a loading script! Begin by [creating a dataset repository](share#create-the-repository) and upload your data files. Now you can use the [`load_dataset`] function to load the dataset. + +For example, try loading the files from this [demo repository](https://huggingface.co/datasets/lhoestq/demo1) by providing the repository namespace and dataset name. This dataset repository contains CSV files, and the code below loads the dataset from the CSV files: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("lhoestq/demo1") +``` + +Some datasets may have more than one version based on Git tags, branches, or commits. Use the `revision` parameter to specify the dataset version you want to load: + +```py +>>> dataset = load_dataset( +... "lhoestq/custom_squad", +... revision="main" # tag name, or branch name, or commit hash +... ) +``` + + + +Refer to the [Upload a dataset to the Hub](./upload_dataset) tutorial for more details on how to create a dataset repository on the Hub, and how to upload your data files. + + + +A dataset without a loading script by default loads all the data into the `train` split. Use the `data_files` parameter to map data files to splits like `train`, `validation` and `test`: + +```py +>>> data_files = {"train": "train.csv", "test": "test.csv"} +>>> dataset = load_dataset("namespace/your_dataset_name", data_files=data_files) +``` + + + +If you don't specify which data files to use, [`load_dataset`] will return all the data files. This can take a long time if you load a large dataset like C4, which is approximately 13TB of data. + + + +You can also load a specific subset of the files with the `data_files` or `data_dir` parameter. These parameters can accept a relative path which resolves to the base path corresponding to where the dataset is loaded from. + +```py +>>> from datasets import load_dataset + +# load files that match the grep pattern +>>> c4_subset = load_dataset("allenai/c4", data_files="en/c4-train.0000*-of-01024.json.gz") + +# load dataset from the en directory on the Hub +>>> c4_subset = load_dataset("allenai/c4", data_dir="en") +``` + +The `split` parameter can also map a data file to a specific split: + +```py +>>> data_files = {"validation": "en/c4-validation.*.json.gz"} +>>> c4_validation = load_dataset("allenai/c4", data_files=data_files, split="validation") +``` + +## Local loading script + +You may have a 🤗 Datasets loading script locally on your computer. In this case, load the dataset by passing one of the following paths to [`load_dataset`]: + +- The local path to the loading script file. +- The local path to the directory containing the loading script file (only if the script file has the same name as the directory). + +```py +>>> dataset = load_dataset("path/to/local/loading_script/loading_script.py", split="train") +>>> dataset = load_dataset("path/to/local/loading_script", split="train") # equivalent because the file has the same name as the directory +``` + +### Edit loading script + +You can also edit a loading script from the Hub to add your own modifications. Download the dataset repository locally so any data files referenced by a relative path in the loading script can be loaded: + +```bash +git clone https://huggingface.co/datasets/eli5 +``` + +Make your edits to the loading script and then load it by passing its local path to [`~datasets.load_dataset`]: + +```py +>>> from datasets import load_dataset +>>> eli5 = load_dataset("path/to/local/eli5") +``` + +## Local and remote files + +Datasets can be loaded from local files stored on your computer and from remote files. The datasets are most likely stored as a `csv`, `json`, `txt` or `parquet` file. The [`load_dataset`] function can load each of these file types. + +### CSV + +🤗 Datasets can read a dataset made up of one or several CSV files (in this case, pass your CSV files as a list): + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("csv", data_files="my_file.csv") +``` + + + +For more details, check out the [how to load tabular datasets from CSV files](tabular_load#csv-files) guide. + + + +### JSON + +JSON files are loaded directly with [`load_dataset`] as shown below: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("json", data_files="my_file.json") +``` + +JSON files have diverse formats, but we think the most efficient format is to have multiple JSON objects; each line represents an individual row of data. For example: + +```json +{"a": 1, "b": 2.0, "c": "foo", "d": false} +{"a": 4, "b": -5.5, "c": null, "d": true} +``` + +Another JSON format you may encounter is a nested field, in which case you'll need to specify the `field` argument as shown in the following: + +```py +{"version": "0.1.0", + "data": [{"a": 1, "b": 2.0, "c": "foo", "d": false}, + {"a": 4, "b": -5.5, "c": null, "d": true}] +} + +>>> from datasets import load_dataset +>>> dataset = load_dataset("json", data_files="my_file.json", field="data") +``` + +To load remote JSON files via HTTP, pass the URLs instead: + +```py +>>> base_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/" +>>> dataset = load_dataset("json", data_files={"train": base_url + "train-v1.1.json", "validation": base_url + "dev-v1.1.json"}, field="data") +``` + +While these are the most common JSON formats, you'll see other datasets that are formatted differently. 🤗 Datasets recognizes these other formats and will fallback accordingly on the Python JSON loading methods to handle them. + +### Parquet + +Parquet files are stored in a columnar format, unlike row-based files like a CSV. Large datasets may be stored in a Parquet file because it is more efficient and faster at returning your query. + +To load a Parquet file: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("parquet", data_files={'train': 'train.parquet', 'test': 'test.parquet'}) +``` + +To load remote Parquet files via HTTP, pass the URLs instead: + +```py +>>> base_url = "https://storage.googleapis.com/huggingface-nlp/cache/datasets/wikipedia/20200501.en/1.0.0/" +>>> data_files = {"train": base_url + "wikipedia-train.parquet"} +>>> wiki = load_dataset("parquet", data_files=data_files, split="train") +``` + +### Arrow + +Arrow files are stored in an in-memory columnar format, unlike row-based formats like CSV and uncompressed formats like Parquet. + +To load an Arrow file: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("arrow", data_files={'train': 'train.arrow', 'test': 'test.arrow'}) +``` + +To load remote Arrow files via HTTP, pass the URLs instead: + +```py +>>> base_url = "https://storage.googleapis.com/huggingface-nlp/cache/datasets/wikipedia/20200501.en/1.0.0/" +>>> data_files = {"train": base_url + "wikipedia-train.arrow"} +>>> wiki = load_dataset("arrow", data_files=data_files, split="train") +``` + +Arrow is the file format used by 🤗 Datasets under the hood, therefore you can load a local Arrow file using [`Dataset.from_file`] directly: + +```py +>>> from datasets import Dataset +>>> dataset = Dataset.from_file("data.arrow") +``` + +Unlike [`load_dataset`], [`Dataset.from_file`] memory maps the Arrow file without preparing the dataset in the cache, saving you disk space. +The cache directory to store intermediate processing results will be the Arrow file directory in that case. + +For now only the Arrow streaming format is supported. The Arrow IPC file format (also known as Feather V2) is not supported. + +### SQL + +Read database contents with [`~datasets.Dataset.from_sql`] by specifying the URI to connect to your database. You can read both table names and queries: + +```py +>>> from datasets import Dataset +# load entire table +>>> dataset = Dataset.from_sql("data_table_name", con="sqlite:///sqlite_file.db") +# load from query +>>> dataset = Dataset.from_sql("SELECT text FROM table WHERE length(text) > 100 LIMIT 10", con="sqlite:///sqlite_file.db") +``` + + + +For more details, check out the [how to load tabular datasets from SQL databases](tabular_load#databases) guide. + + + +## Multiprocessing + +When a dataset is made of several files (that we call "shards"), it is possible to significantly speed up the dataset downloading and preparation step. + +You can choose how many processes you'd like to use to prepare a dataset in parallel using `num_proc`. +In this case, each process is given a subset of shards to prepare: + +```python +from datasets import load_dataset + +oscar_afrikaans = load_dataset("oscar-corpus/OSCAR-2201", "af", num_proc=8) +imagenet = load_dataset("imagenet-1k", num_proc=8) +ml_librispeech_spanish = load_dataset("facebook/multilingual_librispeech", "spanish", num_proc=8) +``` + +## In-memory data + +🤗 Datasets will also allow you to create a [`Dataset`] directly from in-memory data structures like Python dictionaries and Pandas DataFrames. + +### Python dictionary + +Load Python dictionaries with [`~Dataset.from_dict`]: + +```py +>>> from datasets import Dataset +>>> my_dict = {"a": [1, 2, 3]} +>>> dataset = Dataset.from_dict(my_dict) +``` + +### Python list of dictionaries + +Load a list of Python dictionaries with [`~Dataset.from_list`]: + +```py +>>> from datasets import Dataset +>>> my_list = [{"a": 1}, {"a": 2}, {"a": 3}] +>>> dataset = Dataset.from_list(my_list) +``` + +### Python generator + +Create a dataset from a Python generator with [`~Dataset.from_generator`]: + +```py +>>> from datasets import Dataset +>>> def my_gen(): +... for i in range(1, 4): +... yield {"a": i} +... +>>> dataset = Dataset.from_generator(my_gen) +``` + +This approach supports loading data larger than available memory. + +You can also define a sharded dataset by passing lists to `gen_kwargs`: + +```py +>>> def gen(shards): +... for shard in shards: +... with open(shard) as f: +... for line in f: +... yield {"line": line} +... +>>> shards = [f"data{i}.txt" for i in range(32)] +>>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards}) +>>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer +>>> from torch.utils.data import DataLoader +>>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards +``` + +### Pandas DataFrame + +Load Pandas DataFrames with [`~Dataset.from_pandas`]: + +```py +>>> from datasets import Dataset +>>> import pandas as pd +>>> df = pd.DataFrame({"a": [1, 2, 3]}) +>>> dataset = Dataset.from_pandas(df) +``` + + + +For more details, check out the [how to load tabular datasets from Pandas DataFrames](tabular_load#pandas-dataframes) guide. + + + +## Offline + +Even if you don't have an internet connection, it is still possible to load a dataset. As long as you've downloaded a dataset from the Hub repository before, it should be cached. This means you can reload the dataset from the cache and use it offline. + +If you know you won't have internet access, you can run 🤗 Datasets in full offline mode. This saves time because instead of waiting for the Dataset builder download to time out, 🤗 Datasets will look directly in the cache. Set the environment variable `HF_DATASETS_OFFLINE` to `1` to enable full offline mode. + +## Slice splits + +You can also choose only to load specific slices of a split. There are two options for slicing a split: using strings or the [`ReadInstruction`] API. Strings are more compact and readable for simple cases, while [`ReadInstruction`] is easier to use with variable slicing parameters. + +Concatenate a `train` and `test` split by: + +```py +>>> train_test_ds = datasets.load_dataset("bookcorpus", split="train+test") +===STRINGAPI-READINSTRUCTION-SPLIT=== +>>> ri = datasets.ReadInstruction("train") + datasets.ReadInstruction("test") +>>> train_test_ds = datasets.load_dataset("bookcorpus", split=ri) +``` + +Select specific rows of the `train` split: + +```py +>>> train_10_20_ds = datasets.load_dataset("bookcorpus", split="train[10:20]") +===STRINGAPI-READINSTRUCTION-SPLIT=== +>>> train_10_20_ds = datasets.load_dataset("bookcorpu", split=datasets.ReadInstruction("train", from_=10, to=20, unit="abs")) +``` + +Or select a percentage of a split with: + +```py +>>> train_10pct_ds = datasets.load_dataset("bookcorpus", split="train[:10%]") +===STRINGAPI-READINSTRUCTION-SPLIT=== +>>> train_10_20_ds = datasets.load_dataset("bookcorpus", split=datasets.ReadInstruction("train", to=10, unit="%")) +``` + +Select a combination of percentages from each split: + +```py +>>> train_10_80pct_ds = datasets.load_dataset("bookcorpus", split="train[:10%]+train[-80%:]") +===STRINGAPI-READINSTRUCTION-SPLIT=== +>>> ri = (datasets.ReadInstruction("train", to=10, unit="%") + datasets.ReadInstruction("train", from_=-80, unit="%")) +>>> train_10_80pct_ds = datasets.load_dataset("bookcorpus", split=ri) +``` + +Finally, you can even create cross-validated splits. The example below creates 10-fold cross-validated splits. Each validation dataset is a 10% chunk, and the training dataset makes up the remaining complementary 90% chunk: + +```py +>>> val_ds = datasets.load_dataset("bookcorpus", split=[f"train[{k}%:{k+10}%]" for k in range(0, 100, 10)]) +>>> train_ds = datasets.load_dataset("bookcorpus", split=[f"train[:{k}%]+train[{k+10}%:]" for k in range(0, 100, 10)]) +===STRINGAPI-READINSTRUCTION-SPLIT=== +>>> val_ds = datasets.load_dataset("bookcorpus", [datasets.ReadInstruction("train", from_=k, to=k+10, unit="%") for k in range(0, 100, 10)]) +>>> train_ds = datasets.load_dataset("bookcorpus", [(datasets.ReadInstruction("train", to=k, unit="%") + datasets.ReadInstruction("train", from_=k+10, unit="%")) for k in range(0, 100, 10)]) +``` + +### Percent slicing and rounding + +The default behavior is to round the boundaries to the nearest integer for datasets where the requested slice boundaries do not divide evenly by 100. As shown below, some slices may contain more examples than others. For instance, if the following train split includes 999 records, then: + +```py +# 19 records, from 500 (included) to 519 (excluded). +>>> train_50_52_ds = datasets.load_dataset("bookcorpus", split="train[50%:52%]") +# 20 records, from 519 (included) to 539 (excluded). +>>> train_52_54_ds = datasets.load_dataset("bookcorpus", split="train[52%:54%]") +``` + +If you want equal sized splits, use `pct1_dropremainder` rounding instead. This treats the specified percentage boundaries as multiples of 1%. + +```py +# 18 records, from 450 (included) to 468 (excluded). +>>> train_50_52pct1_ds = datasets.load_dataset("bookcorpus", split=datasets.ReadInstruction("train", from_=50, to=52, unit="%", rounding="pct1_dropremainder")) +# 18 records, from 468 (included) to 486 (excluded). +>>> train_52_54pct1_ds = datasets.load_dataset("bookcorpus", split=datasets.ReadInstruction("train",from_=52, to=54, unit="%", rounding="pct1_dropremainder")) +# Or equivalently: +>>> train_50_52pct1_ds = datasets.load_dataset("bookcorpus", split="train[50%:52%](pct1_dropremainder)") +>>> train_52_54pct1_ds = datasets.load_dataset("bookcorpus", split="train[52%:54%](pct1_dropremainder)") +``` + + + +`pct1_dropremainder` rounding may truncate the last examples in a dataset if the number of examples in your dataset don't divide evenly by 100. + + + + + +## Troubleshooting + +Sometimes, you may get unexpected results when you load a dataset. Two of the most common issues you may encounter are manually downloading a dataset and specifying features of a dataset. + +### Manual download + +Certain datasets require you to manually download the dataset files due to licensing incompatibility or if the files are hidden behind a login page. This causes [`load_dataset`] to throw an `AssertionError`. But 🤗 Datasets provides detailed instructions for downloading the missing files. After you've downloaded the files, use the `data_dir` argument to specify the path to the files you just downloaded. + +For example, if you try to download a configuration from the [MATINF](https://huggingface.co/datasets/matinf) dataset: + +```py +>>> dataset = load_dataset("matinf", "summarization") +Downloading and preparing dataset matinf/summarization (download: Unknown size, generated: 246.89 MiB, post-processed: Unknown size, total: 246.89 MiB) to /root/.cache/huggingface/datasets/matinf/summarization/1.0.0/82eee5e71c3ceaf20d909bca36ff237452b4e4ab195d3be7ee1c78b53e6f540e... +AssertionError: The dataset matinf with config summarization requires manual data. +Please follow the manual download instructions: To use MATINF you have to download it manually. Please fill this google form (https://forms.gle/nkH4LVE4iNQeDzsc9). You will receive a download link and a password once you complete the form. Please extract all files in one folder and load the dataset with: *datasets.load_dataset('matinf', data_dir='path/to/folder/folder_name')*. +Manual data can be loaded with `datasets.load_dataset(matinf, data_dir='') +``` + +If you've already downloaded a dataset from the *Hub with a loading script* to your computer, then you need to pass an absolute path to the `data_dir` or `data_files` parameter to load that dataset. Otherwise, if you pass a relative path, [`load_dataset`] will load the directory from the repository on the Hub instead of the local directory. + +### Specify features + +When you create a dataset from local files, the [`Features`] are automatically inferred by [Apache Arrow](https://arrow.apache.org/docs/). However, the dataset's features may not always align with your expectations, or you may want to define the features yourself. The following example shows how you can add custom labels with the [`ClassLabel`] feature. + +Start by defining your own labels with the [`Features`] class: + +```py +>>> class_names = ["sadness", "joy", "love", "anger", "fear", "surprise"] +>>> emotion_features = Features({'text': Value('string'), 'label': ClassLabel(names=class_names)}) +``` + +Next, specify the `features` parameter in [`load_dataset`] with the features you just created: + +```py +>>> dataset = load_dataset('csv', data_files=file_dict, delimiter=';', column_names=['text', 'label'], features=emotion_features) +``` + +Now when you look at your dataset features, you can see it uses the custom labels you defined: + +```py +>>> dataset['train'].features +{'text': Value(dtype='string', id=None), +'label': ClassLabel(num_classes=6, names=['sadness', 'joy', 'love', 'anger', 'fear', 'surprise'], names_file=None, id=None)} +``` + +## Metrics + + + +Metrics is deprecated in 🤗 Datasets. To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets. + + + +When the metric you want to use is not supported by 🤗 Datasets, you can write and use your own metric script. Load your metric by providing the path to your local metric loading script: + +```py +>>> from datasets import load_metric +>>> metric = load_metric('PATH/TO/MY/METRIC/SCRIPT') + +>>> # Example of typical usage +>>> for batch in dataset: +... inputs, references = batch +... predictions = model(inputs) +... metric.add_batch(predictions=predictions, references=references) +>>> score = metric.compute() +``` + + + +See the [Metrics](./how_to_metrics#custom-metric-loading-script) guide for more details on how to write your own metric loading script. + + + +### Load configurations + +It is possible for a metric to have different configurations. The configurations are stored in the `config_name` parameter in [`MetricInfo`] attribute. When you load a metric, provide the configuration name as shown in the following: + +``` +>>> from datasets import load_metric +>>> metric = load_metric('bleurt', name='bleurt-base-128') +>>> metric = load_metric('bleurt', name='bleurt-base-512') +``` + +### Distributed setup + +When working in a distributed or parallel processing environment, loading and computing a metric can be tricky because these processes are executed in parallel on separate subsets of the data. 🤗 Datasets supports distributed usage with a few additional arguments when you load a metric. + +For example, imagine you are training and evaluating on eight parallel processes. Here's how you would load a metric in this distributed setting: + +1. Define the total number of processes with the `num_process` argument. + +2. Set the process `rank` as an integer between zero and `num_process - 1`. + +3. Load your metric with [`load_metric`] with these arguments: + +```py +>>> from datasets import load_metric +>>> metric = load_metric('glue', 'mrpc', num_process=num_process, process_id=rank) +``` + + + +Once you've loaded a metric for distributed usage, you can compute the metric as usual. Behind the scenes, [`Metric.compute`] gathers all the predictions and references from the nodes, and computes the final metric. + + + +In some instances, you may be simultaneously running multiple independent distributed evaluations on the same server and files. To avoid any conflicts, it is important to provide an `experiment_id` to distinguish the separate evaluations: + +```py +>>> from datasets import load_metric +>>> metric = load_metric('glue', 'mrpc', num_process=num_process, process_id=process_id, experiment_id="My_experiment_10") +``` diff --git a/testbed/huggingface__datasets/docs/source/metrics.mdx b/testbed/huggingface__datasets/docs/source/metrics.mdx new file mode 100644 index 0000000000000000000000000000000000000000..3342fa847c47cc9e7e59e76b564c91e27d341bc5 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/metrics.mdx @@ -0,0 +1,85 @@ +# Evaluate predictions + + + +Metrics is deprecated in 🤗 Datasets. To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets. + + + +🤗 Datasets provides various common and NLP-specific [metrics](https://huggingface.co/metrics) for you to measure your models performance. In this section of the tutorials, you will load a metric and use it to evaluate your models predictions. + +You can see what metrics are available with [`list_metrics`]: + +```py +>>> from datasets import list_metrics +>>> metrics_list = list_metrics() +>>> len(metrics_list) +28 +>>> print(metrics_list) +['accuracy', 'bertscore', 'bleu', 'bleurt', 'cer', 'comet', 'coval', 'cuad', 'f1', 'gleu', 'glue', 'indic_glue', 'matthews_correlation', 'meteor', 'pearsonr', 'precision', 'recall', 'rouge', 'sacrebleu', 'sari', 'seqeval', 'spearmanr', 'squad', 'squad_v2', 'super_glue', 'wer', 'wiki_split', 'xnli'] +``` + +## Load metric + +It is very easy to load a metric with 🤗 Datasets. In fact, you will notice that it is very similar to loading a dataset! Load a metric from the Hub with [`load_metric`]: + +```py +>>> from datasets import load_metric +>>> metric = load_metric('glue', 'mrpc') +``` + +This will load the metric associated with the MRPC dataset from the GLUE benchmark. + +## Select a configuration + +If you are using a benchmark dataset, you need to select a metric that is associated with the configuration you are using. Select a metric configuration by providing the configuration name: + +```py +>>> metric = load_metric('glue', 'mrpc') +``` + +## Metrics object + +Before you begin using a [`Metric`] object, you should get to know it a little better. As with a dataset, you can return some basic information about a metric. For example, access the `inputs_description` parameter in [`datasets.MetricInfo`] to get more information about a metrics expected input format and some usage examples: + +```py +>>> print(metric.inputs_description) +Compute GLUE evaluation metric associated to each GLUE dataset. +Args: + predictions: list of predictions to score. + Each translation should be tokenized into a list of tokens. + references: list of lists of references for each translation. + Each reference should be tokenized into a list of tokens. +Returns: depending on the GLUE subset, one or several of: + "accuracy": Accuracy + "f1": F1 score + "pearson": Pearson Correlation + "spearmanr": Spearman Correlation + "matthews_correlation": Matthew Correlation +Examples: + >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"] + >>> references = [0, 1] + >>> predictions = [0, 1] + >>> results = glue_metric.compute(predictions=predictions, references=references) + >>> print(results) + {'accuracy': 1.0} + ... + >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp' + >>> references = [0, 1] + >>> predictions = [0, 1] + >>> results = glue_metric.compute(predictions=predictions, references=references) + >>> print(results) + {'accuracy': 1.0, 'f1': 1.0} + ... +``` + +Notice for the MRPC configuration, the metric expects the input format to be zero or one. For a complete list of attributes you can return with your metric, take a look at [`MetricInfo`]. + +## Compute metric + +Once you have loaded a metric, you are ready to use it to evaluate a models predictions. Provide the model predictions and references to [`~datasets.Metric.compute`]: + +```py +>>> model_predictions = model(model_inputs) +>>> final_score = metric.compute(predictions=model_predictions, references=gold_references) +``` diff --git a/testbed/huggingface__datasets/docs/source/nlp_load.mdx b/testbed/huggingface__datasets/docs/source/nlp_load.mdx new file mode 100644 index 0000000000000000000000000000000000000000..5cfe5d31e99e67993c7dcffeeabcb7a893b51eee --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/nlp_load.mdx @@ -0,0 +1,36 @@ +# Load text data + +This guide shows you how to load text datasets. To learn how to load any type of dataset, take a look at the general loading guide. + +Text files are one of the most common file types for storing a dataset. By default, 🤗 Datasets samples a text file line by line to build the dataset. + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("text", data_files={"train": ["my_text_1.txt", "my_text_2.txt"], "test": "my_test_file.txt"}) + +# Load from a directory +>>> dataset = load_dataset("text", data_dir="path/to/text/dataset") +``` + +To sample a text file by paragraph or even an entire document, use the `sample_by` parameter: + +```py +# Sample by paragraph +>>> dataset = load_dataset("text", data_files={"train": "my_train_file.txt", "test": "my_test_file.txt"}, sample_by="paragraph") + +# Sample by document +>>> dataset = load_dataset("text", data_files={"train": "my_train_file.txt", "test": "my_test_file.txt"}, sample_by="document") +``` + +You can also use grep patterns to load specific files: + +```py +>>> from datasets import load_dataset +>>> c4_subset = load_dataset("allenai/c4", data_files="en/c4-train.0000*-of-01024.json.gz") +``` + +To load remote text files via HTTP, pass the URLs instead: + +```py +>>> dataset = load_dataset("text", data_files="https://huggingface.co/datasets/lhoestq/test/resolve/main/some_text.txt") +``` \ No newline at end of file diff --git a/testbed/huggingface__datasets/docs/source/nlp_process.mdx b/testbed/huggingface__datasets/docs/source/nlp_process.mdx new file mode 100644 index 0000000000000000000000000000000000000000..bfcc0bd16ba37754cedcd4528e64e9ccc52ec887 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/nlp_process.mdx @@ -0,0 +1,63 @@ +# Process text data + +This guide shows specific methods for processing text datasets. Learn how to: + +- Tokenize a dataset with [`~Dataset.map`]. +- Align dataset labels with label ids for NLI datasets. + +For a guide on how to process any type of dataset, take a look at the general process guide. + +## Map + +The [`~Dataset.map`] function supports processing batches of examples at once which speeds up tokenization. + +Load a tokenizer from 🤗 [Transformers](https://huggingface.co/transformers/): + +```py +>>> from transformers import AutoTokenizer + +>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") +``` + +Set the `batched` parameter to `True` in the [`~Dataset.map`] function to apply the tokenizer to batches of examples: + +```py +>>> dataset = dataset.map(lambda examples: tokenizer(examples["text"]), batched=True) +>>> dataset[0] +{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', + 'label': 1, + 'input_ids': [101, 1996, 2600, 2003, 16036, 2000, 2022, 1996, 7398, 2301, 1005, 1055, 2047, 1000, 16608, 1000, 1998, 2008, 2002, 1005, 1055, 2183, 2000, 2191, 1037, 17624, 2130, 3618, 2084, 7779, 29058, 8625, 13327, 1010, 3744, 1011, 18856, 19513, 3158, 5477, 4168, 2030, 7112, 16562, 2140, 1012, 102], + 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} +``` + +The [`~Dataset.map`] function converts the returned values to a PyArrow-supported format. But explicitly returning the tensors as NumPy arrays is faster because it is a natively supported PyArrow format. Set `return_tensors="np"` when you tokenize your text: + +```py +>>> dataset = dataset.map(lambda examples: tokenizer(examples["text"], return_tensors="np"), batched=True) +``` + +## Align + +The [`~Dataset.align_labels_with_mapping`] function aligns a dataset label id with the label name. Not all 🤗 Transformers models follow the prescribed label mapping of the original dataset, especially for NLI datasets. For example, the [MNLI](https://huggingface.co/datasets/glue) dataset uses the following label mapping: + +```py +>>> label2id = {"entailment": 0, "neutral": 1, "contradiction": 2} +``` + +To align the dataset label mapping with the mapping used by a model, create a dictionary of the label name and id to align on: + +```py +>>> label2id = {"contradiction": 0, "neutral": 1, "entailment": 2} +``` + +Pass the dictionary of the label mappings to the [`~Dataset.align_labels_with_mapping`] function, and the column to align on: + +```py +>>> from datasets import load_dataset + +>>> mnli = load_dataset("glue", "mnli", split="train") +>>> mnli_aligned = mnli.align_labels_with_mapping(label2id, "label") +``` + +You can also use this function to assign a custom mapping of labels to ids. \ No newline at end of file diff --git a/testbed/huggingface__datasets/docs/source/object_detection.mdx b/testbed/huggingface__datasets/docs/source/object_detection.mdx new file mode 100644 index 0000000000000000000000000000000000000000..083803523d062841308280c7c82f9470bee892be --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/object_detection.mdx @@ -0,0 +1,164 @@ +# Object detection + +Object detection models identify something in an image, and object detection datasets are used for applications such as autonomous driving and detecting natural hazards like wildfire. This guide will show you how to apply transformations to an object detection dataset following the [tutorial](https://albumentations.ai/docs/examples/example_bboxes/) from [Albumentations](https://albumentations.ai/docs/). + +To run these examples, make sure you have up-to-date versions of `albumentations` and `cv2` installed: + +``` +pip install -U albumentations opencv-python +``` + +In this example, you'll use the [`cppe-5`](https://huggingface.co/datasets/cppe-5) dataset for identifying medical personal protective equipment (PPE) in the context of the COVID-19 pandemic. + +Load the dataset and take a look at an example: + +```py +>>> from datasets import load_dataset + +>>> ds = load_dataset("cppe-5") +>>> example = ds['train'][0] +>>> example +{'height': 663, + 'image': , + 'image_id': 15, + 'objects': {'area': [3796, 1596, 152768, 81002], + 'bbox': [[302.0, 109.0, 73.0, 52.0], + [810.0, 100.0, 57.0, 28.0], + [160.0, 31.0, 248.0, 616.0], + [741.0, 68.0, 202.0, 401.0]], + 'category': [4, 4, 0, 0], + 'id': [114, 115, 116, 117]}, + 'width': 943} +``` + +The dataset has the following fields: + +- `image`: PIL.Image.Image object containing the image. +- `image_id`: The image ID. +- `height`: The image height. +- `width`: The image width. +- `objects`: A dictionary containing bounding box metadata for the objects in the image: + - `id`: The annotation id. + - `area`: The area of the bounding box. + - `bbox`: The object's bounding box (in the [coco](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) format). + - `category`: The object's category, with possible values including `Coverall (0)`, `Face_Shield (1)`, `Gloves (2)`, `Goggles (3)` and `Mask (4)`. + +You can visualize the `bboxes` on the image using some internal torch utilities. To do that, you will need to reference the [`~datasets.ClassLabel`] feature associated with the category IDs so you can look up the string labels: + + +```py +>>> import torch +>>> from torchvision.ops import box_convert +>>> from torchvision.utils import draw_bounding_boxes +>>> from torchvision.transforms.functional import pil_to_tensor, to_pil_image + +>>> categories = ds['train'].features['objects'].feature['category'] + +>>> boxes_xywh = torch.tensor(example['objects']['bbox']) +>>> boxes_xyxy = box_convert(boxes_xywh, 'xywh', 'xyxy') +>>> labels = [categories.int2str(x) for x in example['objects']['category']] +>>> to_pil_image( +... draw_bounding_boxes( +... pil_to_tensor(example['image']), +... boxes_xyxy, +... colors="red", +... labels=labels, +... ) +... ) +``` + +
+ +
+ + +With `albumentations`, you can apply transforms that will affect the image while also updating the `bboxes` accordingly. In this case, the image is resized to (480, 480), flipped horizontally, and brightened. + +```py +>>> import albumentations +>>> import numpy as np + +>>> transform = albumentations.Compose([ +... albumentations.Resize(480, 480), +... albumentations.HorizontalFlip(p=1.0), +... albumentations.RandomBrightnessContrast(p=1.0), +... ], bbox_params=albumentations.BboxParams(format='coco', label_fields=['category'])) + +>>> image = np.array(example['image']) +>>> out = transform( +... image=image, +... bboxes=example['objects']['bbox'], +... category=example['objects']['category'], +... ) +``` + +Now when you visualize the result, the image should be flipped, but the `bboxes` should still be in the right places. + +```py +>>> image = torch.tensor(out['image']).permute(2, 0, 1) +>>> boxes_xywh = torch.stack([torch.tensor(x) for x in out['bboxes']]) +>>> boxes_xyxy = box_convert(boxes_xywh, 'xywh', 'xyxy') +>>> labels = [categories.int2str(x) for x in out['category']] +>>> to_pil_image( +... draw_bounding_boxes( +... image, +... boxes_xyxy, +... colors='red', +... labels=labels +... ) +... ) +``` + +
+ +
+ +Create a function to apply the transform to a batch of examples: + +```py +>>> def transforms(examples): +... images, bboxes, categories = [], [], [] +... for image, objects in zip(examples['image'], examples['objects']): +... image = np.array(image.convert("RGB")) +... out = transform( +... image=image, +... bboxes=objects['bbox'], +... category=objects['category'] +... ) +... images.append(torch.tensor(out['image']).permute(2, 0, 1)) +... bboxes.append(torch.tensor(out['bboxes'])) +... categories.append(out['category']) +... return {'image': images, 'bbox': bboxes, 'category': categories} +``` + +Use the [`~Dataset.set_transform`] function to apply the transform on-the-fly which consumes less disk space. The randomness of data augmentation may return a different image if you access the same example twice. It is especially useful when training a model for several epochs. + +```py +>>> ds['train'].set_transform(transforms) +``` + +You can verify the transform works by visualizing the 10th example: + +```py +>>> example = ds['train'][10] +>>> to_pil_image( +... draw_bounding_boxes( +... example['image'], +... box_convert(example['bbox'], 'xywh', 'xyxy'), +... colors='red', +... labels=[categories.int2str(x) for x in example['category']] +... ) +... ) +``` + +
+ +
+ + + +Now that you know how to process a dataset for object detection, learn +[how to train an object detection model](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/YOLOS/Fine_tuning_YOLOS_for_object_detection_on_custom_dataset_(balloon).ipynb) +and use it for inference. + + diff --git a/testbed/huggingface__datasets/docs/source/package_reference/builder_classes.mdx b/testbed/huggingface__datasets/docs/source/package_reference/builder_classes.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4590ebe954b6b224c6b883142387ced5e19863b1 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/package_reference/builder_classes.mdx @@ -0,0 +1,45 @@ +# Builder classes + +## Builders + +🤗 Datasets relies on two main classes during the dataset building process: [`DatasetBuilder`] and [`BuilderConfig`]. + +[[autodoc]] datasets.DatasetBuilder + +[[autodoc]] datasets.GeneratorBasedBuilder + +[[autodoc]] datasets.BeamBasedBuilder + +[[autodoc]] datasets.ArrowBasedBuilder + +[[autodoc]] datasets.BuilderConfig + +## Download + +[[autodoc]] datasets.DownloadManager + +[[autodoc]] datasets.StreamingDownloadManager + +[[autodoc]] datasets.DownloadConfig + +[[autodoc]] datasets.DownloadMode + +## Verification + +[[autodoc]] datasets.VerificationMode + +## Splits + +[[autodoc]] datasets.SplitGenerator + +[[autodoc]] datasets.Split + +[[autodoc]] datasets.NamedSplit + +[[autodoc]] datasets.NamedSplitAll + +[[autodoc]] datasets.ReadInstruction + +## Version + +[[autodoc]] datasets.utils.Version diff --git a/testbed/huggingface__datasets/docs/source/package_reference/loading_methods.mdx b/testbed/huggingface__datasets/docs/source/package_reference/loading_methods.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d91714cb41c58eabb69a348f80e475aeeba22516 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/package_reference/loading_methods.mdx @@ -0,0 +1,98 @@ +# Loading methods + +Methods for listing and loading datasets and metrics: + +## Datasets + +[[autodoc]] datasets.list_datasets + +[[autodoc]] datasets.load_dataset + +[[autodoc]] datasets.load_from_disk + +[[autodoc]] datasets.load_dataset_builder + +[[autodoc]] datasets.get_dataset_config_names + +[[autodoc]] datasets.get_dataset_infos + +[[autodoc]] datasets.get_dataset_split_names + +[[autodoc]] datasets.inspect_dataset + +## Metrics + + + +Metrics is deprecated in 🤗 Datasets. To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets. + + + +[[autodoc]] datasets.list_metrics + +[[autodoc]] datasets.load_metric + +[[autodoc]] datasets.inspect_metric + +## From files + +Configurations used to load data files. +They are used when loading local files or a dataset repository: + +- local files: `load_dataset("parquet", data_dir="path/to/data/dir")` +- dataset repository: `load_dataset("allenai/c4")` + +You can pass arguments to `load_dataset` to configure data loading. +For example you can specify the `sep` parameter to define the [`~datasets.packaged_modules.csv.CsvConfig`] that is used to load the data: + +```python +load_dataset("csv", data_dir="path/to/data/dir", sep="\t") +``` + +### Text + +[[autodoc]] datasets.packaged_modules.text.TextConfig + +[[autodoc]] datasets.packaged_modules.text.Text + +### CSV + +[[autodoc]] datasets.packaged_modules.csv.CsvConfig + +[[autodoc]] datasets.packaged_modules.csv.Csv + +### JSON + +[[autodoc]] datasets.packaged_modules.json.JsonConfig + +[[autodoc]] datasets.packaged_modules.json.Json + +### Parquet + +[[autodoc]] datasets.packaged_modules.parquet.ParquetConfig + +[[autodoc]] datasets.packaged_modules.parquet.Parquet + +### Arrow + +[[autodoc]] datasets.packaged_modules.arrow.ArrowConfig + +[[autodoc]] datasets.packaged_modules.arrow.Arrow + +### SQL + +[[autodoc]] datasets.packaged_modules.sql.SqlConfig + +[[autodoc]] datasets.packaged_modules.sql.Sql + +### Images + +[[autodoc]] datasets.packaged_modules.imagefolder.ImageFolderConfig + +[[autodoc]] datasets.packaged_modules.imagefolder.ImageFolder + +### Audio + +[[autodoc]] datasets.packaged_modules.audiofolder.AudioFolderConfig + +[[autodoc]] datasets.packaged_modules.audiofolder.AudioFolder diff --git a/testbed/huggingface__datasets/docs/source/package_reference/main_classes.mdx b/testbed/huggingface__datasets/docs/source/package_reference/main_classes.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b8ea344196331e70bfd13b94846fa57d9a276c04 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/package_reference/main_classes.mdx @@ -0,0 +1,252 @@ +# Main classes + + +## DatasetInfo + +[[autodoc]] datasets.DatasetInfo + +## Dataset + +The base class [`Dataset`] implements a Dataset backed by an Apache Arrow table. + +[[autodoc]] datasets.Dataset + - add_column + - add_item + - from_file + - from_buffer + - from_pandas + - from_dict + - from_generator + - data + - cache_files + - num_columns + - num_rows + - column_names + - shape + - unique + - flatten + - cast + - cast_column + - remove_columns + - rename_column + - rename_columns + - select_columns + - class_encode_column + - __len__ + - __iter__ + - iter + - formatted_as + - set_format + - set_transform + - reset_format + - with_format + - with_transform + - __getitem__ + - cleanup_cache_files + - map + - filter + - select + - sort + - shuffle + - train_test_split + - shard + - to_tf_dataset + - push_to_hub + - save_to_disk + - load_from_disk + - flatten_indices + - to_csv + - to_pandas + - to_dict + - to_json + - to_parquet + - to_sql + - to_iterable_dataset + - add_faiss_index + - add_faiss_index_from_external_arrays + - save_faiss_index + - load_faiss_index + - add_elasticsearch_index + - load_elasticsearch_index + - list_indexes + - get_index + - drop_index + - search + - search_batch + - get_nearest_examples + - get_nearest_examples_batch + - info + - split + - builder_name + - citation + - config_name + - dataset_size + - description + - download_checksums + - download_size + - features + - homepage + - license + - size_in_bytes + - supervised_keys + - version + - from_csv + - from_json + - from_parquet + - from_text + - from_sql + - prepare_for_task + - align_labels_with_mapping + +[[autodoc]] datasets.concatenate_datasets + +[[autodoc]] datasets.interleave_datasets + +[[autodoc]] datasets.distributed.split_dataset_by_node + +[[autodoc]] datasets.enable_caching + +[[autodoc]] datasets.disable_caching + +[[autodoc]] datasets.is_caching_enabled + +## DatasetDict + +Dictionary with split names as keys ('train', 'test' for example), and `Dataset` objects as values. +It also has dataset transform methods like map or filter, to process all the splits at once. + +[[autodoc]] datasets.DatasetDict + - data + - cache_files + - num_columns + - num_rows + - column_names + - shape + - unique + - cleanup_cache_files + - map + - filter + - sort + - shuffle + - set_format + - reset_format + - formatted_as + - with_format + - with_transform + - flatten + - cast + - cast_column + - remove_columns + - rename_column + - rename_columns + - select_columns + - class_encode_column + - push_to_hub + - save_to_disk + - load_from_disk + - from_csv + - from_json + - from_parquet + - from_text + - prepare_for_task + + + +## IterableDataset + +The base class [`IterableDataset`] implements an iterable Dataset backed by python generators. + +[[autodoc]] datasets.IterableDataset + - from_generator + - remove_columns + - select_columns + - cast_column + - cast + - __iter__ + - iter + - map + - rename_column + - filter + - shuffle + - skip + - take + - info + - split + - builder_name + - citation + - config_name + - dataset_size + - description + - download_checksums + - download_size + - features + - homepage + - license + - size_in_bytes + - supervised_keys + - version + +## IterableDatasetDict + +Dictionary with split names as keys ('train', 'test' for example), and `IterableDataset` objects as values. + +[[autodoc]] datasets.IterableDatasetDict + - map + - filter + - shuffle + - with_format + - cast + - cast_column + - remove_columns + - rename_column + - rename_columns + - select_columns + +## Features + +[[autodoc]] datasets.Features + +[[autodoc]] datasets.Sequence + +[[autodoc]] datasets.ClassLabel + +[[autodoc]] datasets.Value + +[[autodoc]] datasets.Translation + +[[autodoc]] datasets.TranslationVariableLanguages + +[[autodoc]] datasets.Array2D + +[[autodoc]] datasets.Array3D + +[[autodoc]] datasets.Array4D + +[[autodoc]] datasets.Array5D + +[[autodoc]] datasets.Audio + +[[autodoc]] datasets.Image + +## MetricInfo + +[[autodoc]] datasets.MetricInfo + +## Metric + +The base class `Metric` implements a Metric backed by one or several [`Dataset`]. + +[[autodoc]] datasets.Metric + +## Filesystems + + +[[autodoc]] datasets.filesystems.S3FileSystem + +[[autodoc]] datasets.filesystems.extract_path_from_uri + +[[autodoc]] datasets.filesystems.is_remote_filesystem + +## Fingerprint + +[[autodoc]] datasets.fingerprint.Hasher diff --git a/testbed/huggingface__datasets/docs/source/package_reference/table_classes.mdx b/testbed/huggingface__datasets/docs/source/package_reference/table_classes.mdx new file mode 100644 index 0000000000000000000000000000000000000000..4a46c15f61dfaa63a391e8bb77c4b2dd5a02f88b --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/package_reference/table_classes.mdx @@ -0,0 +1,138 @@ +# Table Classes + +Each `Dataset` object is backed by a PyArrow Table. +A Table can be loaded from either the disk (memory mapped) or in memory. +Several Table types are available, and they all inherit from [`table.Table`]. + +## Table + +[[autodoc]] datasets.table.Table + - validate + - equals + - to_batches + - to_pydict + - to_pandas + - to_string + - field + - column + - itercolumns + - schema + - columns + - num_columns + - num_rows + - shape + - nbytes + +## InMemoryTable + +[[autodoc]] datasets.table.InMemoryTable + - validate + - equals + - to_batches + - to_pydict + - to_pandas + - to_string + - field + - column + - itercolumns + - schema + - columns + - num_columns + - num_rows + - shape + - nbytes + - column_names + - slice + - filter + - flatten + - combine_chunks + - cast + - replace_schema_metadata + - add_column + - append_column + - remove_column + - set_column + - rename_columns + - select + - drop + - from_file + - from_buffer + - from_pandas + - from_arrays + - from_pydict + - from_batches + +## MemoryMappedTable + +[[autodoc]] datasets.table.MemoryMappedTable + - validate + - equals + - to_batches + - to_pydict + - to_pandas + - to_string + - field + - column + - itercolumns + - schema + - columns + - num_columns + - num_rows + - shape + - nbytes + - column_names + - slice + - filter + - flatten + - combine_chunks + - cast + - replace_schema_metadata + - add_column + - append_column + - remove_column + - set_column + - rename_columns + - select + - drop + - from_file + +## ConcatenationTable + +[[autodoc]] datasets.table.ConcatenationTable + - validate + - equals + - to_batches + - to_pydict + - to_pandas + - to_string + - field + - column + - itercolumns + - schema + - columns + - num_columns + - num_rows + - shape + - nbytes + - column_names + - slice + - filter + - flatten + - combine_chunks + - cast + - replace_schema_metadata + - add_column + - append_column + - remove_column + - set_column + - rename_columns + - select + - drop + - from_blocks + - from_tables + +## Utils + +[[autodoc]] datasets.table.concat_tables + +[[autodoc]] datasets.table.list_table_cache_files diff --git a/testbed/huggingface__datasets/docs/source/package_reference/task_templates.mdx b/testbed/huggingface__datasets/docs/source/package_reference/task_templates.mdx new file mode 100644 index 0000000000000000000000000000000000000000..d07566590e38b365c73925cfa0be8105e15668b0 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/package_reference/task_templates.mdx @@ -0,0 +1,25 @@ +# Task templates + + + +The Task API is deprecated in favor of [`train-eval-index`](https://github.com/huggingface/hub-docs/blob/9ab2555e1c146122056aba6f89af404a8bc9a6f1/datasetcard.md?plain=1#L90-L106) and will be removed in the next major release. + + + +The tasks supported by [`Dataset.prepare_for_task`] and [`DatasetDict.prepare_for_task`]. + +[[autodoc]] datasets.tasks.AutomaticSpeechRecognition + +[[autodoc]] datasets.tasks.AudioClassification + +[[autodoc]] datasets.tasks.ImageClassification + - align_with_features + +[[autodoc]] datasets.tasks.LanguageModeling + +[[autodoc]] datasets.tasks.QuestionAnsweringExtractive + +[[autodoc]] datasets.tasks.Summarization + +[[autodoc]] datasets.tasks.TextClassification + - align_with_features diff --git a/testbed/huggingface__datasets/docs/source/package_reference/utilities.mdx b/testbed/huggingface__datasets/docs/source/package_reference/utilities.mdx new file mode 100644 index 0000000000000000000000000000000000000000..59668dbc120e6d94f7f789843c083ec1af1f5e99 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/package_reference/utilities.mdx @@ -0,0 +1,58 @@ +# Utilities + +## Configure logging + +🤗 Datasets strives to be transparent and explicit about how it works, but this can be quite verbose at times. We have included a series of logging methods which allow you to easily adjust the level of verbosity of the entire library. Currently the default verbosity of the library is set to `WARNING`. + +To change the level of verbosity, use one of the direct setters. For instance, here is how to change the verbosity to the `INFO` level: + +```py +import datasets +datasets.logging.set_verbosity_info() +``` + +You can also use the environment variable `DATASETS_VERBOSITY` to override the default verbosity, and set it to one of the following: `debug`, `info`, `warning`, `error`, `critical`: + +```bash +DATASETS_VERBOSITY=error ./myprogram.py +``` + +All the methods of this logging module are documented below. The main ones are: + +- [`logging.get_verbosity`] to get the current level of verbosity in the logger +- [`logging.set_verbosity`] to set the verbosity to the level of your choice + +In order from the least to the most verbose (with their corresponding `int` values): + +1. `logging.CRITICAL` or `logging.FATAL` (int value, 50): only report the most critical errors. +2. `logging.ERROR` (int value, 40): only report errors. +3. `logging.WARNING` or `logging.WARN` (int value, 30): only reports error and warnings. This the default level used by the library. +4. `logging.INFO` (int value, 20): reports error, warnings and basic information. +5. `logging.DEBUG` (int value, 10): report all information. + +[[autodoc]] datasets.logging.get_verbosity + +[[autodoc]] datasets.logging.set_verbosity + +[[autodoc]] datasets.logging.set_verbosity_info + +[[autodoc]] datasets.logging.set_verbosity_warning + +[[autodoc]] datasets.logging.set_verbosity_debug + +[[autodoc]] datasets.logging.set_verbosity_error + +[[autodoc]] datasets.logging.disable_propagation + +[[autodoc]] datasets.logging.enable_propagation + +## Configure progress bars + +By default, `tqdm` progress bars will be displayed during dataset download and preprocessing. You can disable them globally by setting `HF_DATASETS_DISABLE_PROGRESS_BARS` +environment variable. You can also enable/disable them using [`~utils.enable_progress_bars`] and [`~utils.disable_progress_bars`]. If set, the environment variable has priority on the helpers. + +[[autodoc]] datasets.utils.enable_progress_bars + +[[autodoc]] datasets.utils.disable_progress_bars + +[[autodoc]] datasets.utils.are_progress_bars_disabled \ No newline at end of file diff --git a/testbed/huggingface__datasets/docs/source/process.mdx b/testbed/huggingface__datasets/docs/source/process.mdx new file mode 100644 index 0000000000000000000000000000000000000000..0f5dbfee66694895921c8f3cbb7918a8f3577d51 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/process.mdx @@ -0,0 +1,691 @@ +# Process + +🤗 Datasets provides many tools for modifying the structure and content of a dataset. These tools are important for tidying up a dataset, creating additional columns, converting between features and formats, and much more. + +This guide will show you how to: + +- Reorder rows and split the dataset. +- Rename and remove columns, and other common column operations. +- Apply processing functions to each example in a dataset. +- Concatenate datasets. +- Apply a custom formatting transform. +- Save and export processed datasets. + +For more details specific to processing other dataset modalities, take a look at the process audio dataset guide, the process image dataset guide, or the process text dataset guide. + +The examples in this guide use the MRPC dataset, but feel free to load any dataset of your choice and follow along! + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("glue", "mrpc", split="train") +``` + + + +All processing methods in this guide return a new [`Dataset`] object. Modification is not done in-place. Be careful about overriding your previous dataset! + + + +## Sort, shuffle, select, split, and shard + +There are several functions for rearranging the structure of a dataset. +These functions are useful for selecting only the rows you want, creating train and test splits, and sharding very large datasets into smaller chunks. + +### Sort + +Use [`~Dataset.sort`] to sort column values according to their numerical values. The provided column must be NumPy compatible. + +```py +>>> dataset["label"][:10] +[1, 0, 1, 0, 1, 1, 0, 1, 0, 0] +>>> sorted_dataset = dataset.sort("label") +>>> sorted_dataset["label"][:10] +[0, 0, 0, 0, 0, 0, 0, 0, 0, 0] +>>> sorted_dataset["label"][-10:] +[1, 1, 1, 1, 1, 1, 1, 1, 1, 1] +``` + +Under the hood, this creates a list of indices that is sorted according to values of the column. +This indices mapping is then used to access the right rows in the underlying Arrow table. + +### Shuffle + +The [`~Dataset.shuffle`] function randomly rearranges the column values. You can specify the `generator` parameter in this function to use a different `numpy.random.Generator` if you want more control over the algorithm used to shuffle the dataset. + +```py +>>> shuffled_dataset = sorted_dataset.shuffle(seed=42) +>>> shuffled_dataset["label"][:10] +[1, 1, 1, 0, 1, 1, 1, 1, 1, 0] +``` + +Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping. +However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower. +This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore. +To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping. +Alternatively, you can switch to an [`IterableDataset`] and leverage its fast approximate shuffling [`IterableDataset.shuffle`]: + +```py +>>> iterable_dataset = dataset.to_iterable_dataset(num_shards=128) +>>> shuffled_iterable_dataset = iterable_dataset.shuffle(seed=42, buffer_size=1000) +``` + +### Select and Filter + +There are two options for filtering rows in a dataset: [`~Dataset.select`] and [`~Dataset.filter`]. + +- [`~Dataset.select`] returns rows according to a list of indices: + +```py +>>> small_dataset = dataset.select([0, 10, 20, 30, 40, 50]) +>>> len(small_dataset) +6 +``` + +- [`~Dataset.filter`] returns rows that match a specified condition: + +```py +>>> start_with_ar = dataset.filter(lambda example: example["sentence1"].startswith("Ar")) +>>> len(start_with_ar) +6 +>>> start_with_ar["sentence1"] +['Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .', +'Arison said Mann may have been one of the pioneers of the world music movement and he had a deep love of Brazilian music .', +'Arts helped coach the youth on an eighth-grade football team at Lombardi Middle School in Green Bay .', +'Around 9 : 00 a.m. EDT ( 1300 GMT ) , the euro was at $ 1.1566 against the dollar , up 0.07 percent on the day .', +"Arguing that the case was an isolated example , Canada has threatened a trade backlash if Tokyo 's ban is not justified on scientific grounds .", +'Artists are worried the plan would harm those who need help most - performers who have a difficult time lining up shows .' +] +``` + +[`~Dataset.filter`] can also filter by indices if you set `with_indices=True`: + +```py +>>> even_dataset = dataset.filter(lambda example, idx: idx % 2 == 0, with_indices=True) +>>> len(even_dataset) +1834 +>>> len(dataset) / 2 +1834.0 +``` + +Unless the list of indices to keep is contiguous, those methods also create an indices mapping under the hood. + +### Split + +The [`~Dataset.train_test_split`] function creates train and test splits if your dataset doesn't already have them. This allows you to adjust the relative proportions or an absolute number of samples in each split. In the example below, use the `test_size` parameter to create a test split that is 10% of the original dataset: + +```py +>>> dataset.train_test_split(test_size=0.1) +{'train': Dataset(schema: {'sentence1': 'string', 'sentence2': 'string', 'label': 'int64', 'idx': 'int32'}, num_rows: 3301), +'test': Dataset(schema: {'sentence1': 'string', 'sentence2': 'string', 'label': 'int64', 'idx': 'int32'}, num_rows: 367)} +>>> 0.1 * len(dataset) +366.8 +``` + +The splits are shuffled by default, but you can set `shuffle=False` to prevent shuffling. + +### Shard + +🤗 Datasets supports sharding to divide a very large dataset into a predefined number of chunks. Specify the `num_shards` parameter in [`~Dataset.shard`] to determine the number of shards to split the dataset into. You'll also need to provide the shard you want to return with the `index` parameter. + +For example, the [imdb](https://huggingface.co/datasets/imdb) dataset has 25000 examples: + +```py +>>> from datasets import load_dataset +>>> datasets = load_dataset("imdb", split="train") +>>> print(dataset) +Dataset({ + features: ['text', 'label'], + num_rows: 25000 +}) +``` + +After sharding the dataset into four chunks, the first shard will only have 6250 examples: + +```py +>>> dataset.shard(num_shards=4, index=0) +Dataset({ + features: ['text', 'label'], + num_rows: 6250 +}) +>>> print(25000/4) +6250.0 +``` + +## Rename, remove, cast, and flatten + +The following functions allow you to modify the columns of a dataset. These functions are useful for renaming or removing columns, changing columns to a new set of features, and flattening nested column structures. + +### Rename + +Use [`~Dataset.rename_column`] when you need to rename a column in your dataset. Features associated with the original column are actually moved under the new column name, instead of just replacing the original column in-place. + +Provide [`~Dataset.rename_column`] with the name of the original column, and the new column name: + +```py +>>> dataset +Dataset({ + features: ['sentence1', 'sentence2', 'label', 'idx'], + num_rows: 3668 +}) +>>> dataset = dataset.rename_column("sentence1", "sentenceA") +>>> dataset = dataset.rename_column("sentence2", "sentenceB") +>>> dataset +Dataset({ + features: ['sentenceA', 'sentenceB', 'label', 'idx'], + num_rows: 3668 +}) +``` + +### Remove + +When you need to remove one or more columns, provide the column name to remove to the [`~Dataset.remove_columns`] function. Remove more than one column by providing a list of column names: + +```py +>>> dataset = dataset.remove_columns("label") +>>> dataset +Dataset({ + features: ['sentence1', 'sentence2', 'idx'], + num_rows: 3668 +}) +>>> dataset = dataset.remove_columns(["sentence1", "sentence2"]) +>>> dataset +Dataset({ + features: ['idx'], + num_rows: 3668 +}) +``` + +Conversely, [`~Dataset.select_columns`] selects one or more columns to keep and removes the rest. This function takes either one or a list of column names: + +```py +>>> dataset +Dataset({ + features: ['sentence1', 'sentence2', 'label', 'idx'], + num_rows: 3668 +}) +>>> dataset = dataset.select_columns(['sentence1', 'sentence2', 'idx']) +>>> dataset +Dataset({ + features: ['sentence1', 'sentence2', 'idx'], + num_rows: 3668 +}) +>>> dataset = dataset.select_columns('idx') +>>> dataset +Dataset({ + features: ['idx'], + num_rows: 3668 +}) +``` + +### Cast + +The [`~Dataset.cast`] function transforms the feature type of one or more columns. This function accepts your new [`Features`] as its argument. The example below demonstrates how to change the [`ClassLabel`] and [`Value`] features: + +```py +>>> dataset.features +{'sentence1': Value(dtype='string', id=None), +'sentence2': Value(dtype='string', id=None), +'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None), +'idx': Value(dtype='int32', id=None)} + +>>> from datasets import ClassLabel, Value +>>> new_features = dataset.features.copy() +>>> new_features["label"] = ClassLabel(names=["negative", "positive"]) +>>> new_features["idx"] = Value("int64") +>>> dataset = dataset.cast(new_features) +>>> dataset.features +{'sentence1': Value(dtype='string', id=None), +'sentence2': Value(dtype='string', id=None), +'label': ClassLabel(num_classes=2, names=['negative', 'positive'], names_file=None, id=None), +'idx': Value(dtype='int64', id=None)} +``` + + + +Casting only works if the original feature type and new feature type are compatible. For example, you can cast a column with the feature type `Value("int32")` to `Value("bool")` if the original column only contains ones and zeros. + + + +Use the [`~Dataset.cast_column`] function to change the feature type of a single column. Pass the column name and its new feature type as arguments: + +```py +>>> dataset.features +{'audio': Audio(sampling_rate=44100, mono=True, id=None)} + +>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) +>>> dataset.features +{'audio': Audio(sampling_rate=16000, mono=True, id=None)} +``` + +### Flatten + +Sometimes a column can be a nested structure of several types. Take a look at the nested structure below from the SQuAD dataset: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("squad", split="train") +>>> dataset.features +{'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), +'context': Value(dtype='string', id=None), +'id': Value(dtype='string', id=None), +'question': Value(dtype='string', id=None), +'title': Value(dtype='string', id=None)} +``` + +The `answers` field contains two subfields: `text` and `answer_start`. Use the [`~Dataset.flatten`] function to extract the subfields into their own separate columns: + +```py +>>> flat_dataset = dataset.flatten() +>>> flat_dataset +Dataset({ + features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], + num_rows: 87599 +}) +``` + +Notice how the subfields are now their own independent columns: `answers.text` and `answers.answer_start`. + +## Map + +Some of the more powerful applications of 🤗 Datasets come from using the [`~Dataset.map`] function. The primary purpose of [`~Dataset.map`] is to speed up processing functions. It allows you to apply a processing function to each example in a dataset, independently or in batches. This function can even create new rows and columns. + +In the following example, prefix each `sentence1` value in the dataset with `'My sentence: '`. + +Start by creating a function that adds `'My sentence: '` to the beginning of each sentence. The function needs to accept and output a `dict`: + +```py +>>> def add_prefix(example): +... example["sentence1"] = 'My sentence: ' + example["sentence1"] +... return example +``` + +Now use [`~Dataset.map`] to apply the `add_prefix` function to the entire dataset: + +```py +>>> updated_dataset = small_dataset.map(add_prefix) +>>> updated_dataset["sentence1"][:5] +['My sentence: Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .', +"My sentence: Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .", +'My sentence: They had published an advertisement on the Internet on June 10 , offering the cargo for sale , he added .', +'My sentence: Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .', +] +``` + +Let's take a look at another example, except this time, you'll remove a column with [`~Dataset.map`]. When you remove a column, it is only removed after the example has been provided to the mapped function. This allows the mapped function to use the content of the columns before they are removed. + +Specify the column to remove with the `remove_columns` parameter in [`~Dataset.map`]: + +```py +>>> updated_dataset = dataset.map(lambda example: {"new_sentence": example["sentence1"]}, remove_columns=["sentence1"]) +>>> updated_dataset.column_names +['sentence2', 'label', 'idx', 'new_sentence'] +``` + + + +🤗 Datasets also has a [`~Dataset.remove_columns`] function which is faster because it doesn't copy the data of the remaining columns. + + + +You can also use [`~Dataset.map`] with indices if you set `with_indices=True`. The example below adds the index to the beginning of each sentence: + +```py +>>> updated_dataset = dataset.map(lambda example, idx: {"sentence2": f"{idx}: " + example["sentence2"]}, with_indices=True) +>>> updated_dataset["sentence2"][:5] +['0: Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .', + "1: Yucaipa bought Dominick 's in 1995 for $ 693 million and sold it to Safeway for $ 1.8 billion in 1998 .", + "2: On June 10 , the ship 's owners had published an advertisement on the Internet , offering the explosives for sale .", + '3: Tab shares jumped 20 cents , or 4.6 % , to set a record closing high at A $ 4.57 .', + '4: PG & E Corp. shares jumped $ 1.63 or 8 percent to $ 21.03 on the New York Stock Exchange on Friday .' +] +``` + +### Multiprocessing + +Multiprocessing significantly speeds up processing by parallelizing processes on the CPU. Set the `num_proc` parameter in [`~Dataset.map`] to set the number of processes to use: + +```py +>>> updated_dataset = dataset.map(lambda example, idx: {"sentence2": f"{idx}: " + example["sentence2"]}, num_proc=4) +``` + +The [`~Dataset.map`] also works with the rank of the process if you set `with_rank=True`. This is analogous to the `with_indices` parameter. The `with_rank` parameter in the mapped function goes after the `index` one if it is already present. + +```py +>>> from multiprocess import set_start_method +>>> import torch +>>> import os +>>> +>>> for i in range(torch.cuda.device_count()): # send model to every GPU +... model.to(f"cuda:{i}") +>>> +>>> def gpu_computation(example, rank): +... torch.cuda.set_device(f"cuda:{rank}") # use one GPU +... # Your big GPU call goes here, for example +... inputs = tokenizer(texts, truncation=True, return_tensors="pt").to(f"cuda:{rank}") +... outputs = model.generate(**inputs) +... example["generated_text"] = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True) +... return example +>>> +>>> if __name__ == "__main__": +... set_start_method("spawn") +... updated_dataset = dataset.map(gpu_computation, with_rank=True, num_proc=torch.cuda.device_count()) +``` + +The main use-case for rank is to parallelize computation across several GPUs. This requires setting `multiprocess.set_start_method("spawn")`. If you don't you'll receive the following CUDA error: + +```bash +RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method. +``` + +### Batch processing + +The [`~Dataset.map`] function supports working with batches of examples. Operate on batches by setting `batched=True`. The default batch size is 1000, but you can adjust it with the `batch_size` parameter. Batch processing enables interesting applications such as splitting long sentences into shorter chunks and data augmentation. + +#### Split long examples + +When examples are too long, you may want to split them into several smaller chunks. Begin by creating a function that: + +1. Splits the `sentence1` field into chunks of 50 characters. + +2. Stacks all the chunks together to create the new dataset. + +```py +>>> def chunk_examples(examples): +... chunks = [] +... for sentence in examples["sentence1"]: +... chunks += [sentence[i:i + 50] for i in range(0, len(sentence), 50)] +... return {"chunks": chunks} +``` + +Apply the function with [`~Dataset.map`]: + +```py +>>> chunked_dataset = dataset.map(chunk_examples, batched=True, remove_columns=dataset.column_names) +>>> chunked_dataset[:10] +{'chunks': ['Amrozi accused his brother , whom he called " the ', + 'witness " , of deliberately distorting his evidenc', + 'e .', + "Yucaipa owned Dominick 's before selling the chain", + ' to Safeway in 1998 for $ 2.5 billion .', + 'They had published an advertisement on the Interne', + 't on June 10 , offering the cargo for sale , he ad', + 'ded .', + 'Around 0335 GMT , Tab shares were up 19 cents , or', + ' 4.4 % , at A $ 4.56 , having earlier set a record']} +``` + +Notice how the sentences are split into shorter chunks now, and there are more rows in the dataset. + +```py +>>> dataset +Dataset({ + features: ['sentence1', 'sentence2', 'label', 'idx'], + num_rows: 3668 +}) +>>> chunked_dataset +Dataset(schema: {'chunks': 'string'}, num_rows: 10470) +``` + +#### Data augmentation + +The [`~Dataset.map`] function could also be used for data augmentation. The following example generates additional words for a masked token in a sentence. + +Load and use the [RoBERTA](https://huggingface.co/roberta-base) model in 🤗 Transformers' [FillMaskPipeline](https://huggingface.co/transformers/main_classes/pipelines#transformers.FillMaskPipeline): + +```py +>>> from random import randint +>>> from transformers import pipeline + +>>> fillmask = pipeline("fill-mask", model="roberta-base") +>>> mask_token = fillmask.tokenizer.mask_token +>>> smaller_dataset = dataset.filter(lambda e, i: i<100, with_indices=True) +``` + +Create a function to randomly select a word to mask in the sentence. The function should also return the original sentence and the top two replacements generated by RoBERTA. + +```py +>>> def augment_data(examples): +... outputs = [] +... for sentence in examples["sentence1"]: +... words = sentence.split(' ') +... K = randint(1, len(words)-1) +... masked_sentence = " ".join(words[:K] + [mask_token] + words[K+1:]) +... predictions = fillmask(masked_sentence) +... augmented_sequences = [predictions[i]["sequence"] for i in range(3)] +... outputs += [sentence] + augmented_sequences +... +... return {"data": outputs} +``` + +Use [`~Dataset.map`] to apply the function over the whole dataset: + +```py +>>> augmented_dataset = smaller_dataset.map(augment_data, batched=True, remove_columns=dataset.column_names, batch_size=8) +>>> augmented_dataset[:9]["data"] +['Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .', + 'Amrozi accused his brother, whom he called " the witness ", of deliberately withholding his evidence.', + 'Amrozi accused his brother, whom he called " the witness ", of deliberately suppressing his evidence.', + 'Amrozi accused his brother, whom he called " the witness ", of deliberately destroying his evidence.', + "Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .", + 'Yucaipa owned Dominick Stores before selling the chain to Safeway in 1998 for $ 2.5 billion.', + "Yucaipa owned Dominick's before selling the chain to Safeway in 1998 for $ 2.5 billion.", + 'Yucaipa owned Dominick Pizza before selling the chain to Safeway in 1998 for $ 2.5 billion.' +] +``` + +For each original sentence, RoBERTA augmented a random word with three alternatives. The original word `distorting` is supplemented by `withholding`, `suppressing`, and `destroying`. + +### Process multiple splits + +Many datasets have splits that can be processed simultaneously with [`DatasetDict.map`]. For example, tokenize the `sentence1` field in the train and test split by: + +```py +>>> from datasets import load_dataset + +# load all the splits +>>> dataset = load_dataset('glue', 'mrpc') +>>> encoded_dataset = dataset.map(lambda examples: tokenizer(examples["sentence1"]), batched=True) +>>> encoded_dataset["train"][0] +{'sentence1': 'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .', +'sentence2': 'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .', +'label': 1, +'idx': 0, +'input_ids': [ 101, 7277, 2180, 5303, 4806, 1117, 1711, 117, 2292, 1119, 1270, 107, 1103, 7737, 107, 117, 1104, 9938, 4267, 12223, 21811, 1117, 2554, 119, 102], +'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], +'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] +} +``` + +### Distributed usage + +When you use [`~Dataset.map`] in a distributed setting, you should also use [torch.distributed.barrier](https://pytorch.org/docs/stable/distributed?highlight=barrier#torch.distributed.barrier). This ensures the main process performs the mapping, while the other processes load the results, thereby avoiding duplicate work. + +The following example shows how you can use `torch.distributed.barrier` to synchronize the processes: + +```py +>>> from datasets import Dataset +>>> import torch.distributed + +>>> dataset1 = Dataset.from_dict({"a": [0, 1, 2]}) + +>>> if training_args.local_rank > 0: +... print("Waiting for main process to perform the mapping") +... torch.distributed.barrier() + +>>> dataset2 = dataset1.map(lambda x: {"a": x["a"] + 1}) + +>>> if training_args.local_rank == 0: +... print("Loading results from main process") +... torch.distributed.barrier() +``` + +## Concatenate + +Separate datasets can be concatenated if they share the same column types. Concatenate datasets with [`concatenate_datasets`]: + +```py +>>> from datasets import concatenate_datasets, load_dataset + +>>> bookcorpus = load_dataset("bookcorpus", split="train") +>>> wiki = load_dataset("wikipedia", "20220301.en", split="train") +>>> wiki = wiki.remove_columns([col for col in wiki.column_names if col != "text"]) # only keep the 'text' column + +>>> assert bookcorpus.features.type == wiki.features.type +>>> bert_dataset = concatenate_datasets([bookcorpus, wiki]) +``` + +You can also concatenate two datasets horizontally by setting `axis=1` as long as the datasets have the same number of rows: + +```py +>>> from datasets import Dataset +>>> bookcorpus_ids = Dataset.from_dict({"ids": list(range(len(bookcorpus)))}) +>>> bookcorpus_with_ids = concatenate_datasets([bookcorpus, bookcorpus_ids], axis=1) +``` + +### Interleave + +You can also mix several datasets together by taking alternating examples from each one to create a new dataset. This is known as *interleaving*, which is enabled by the [`interleave_datasets`] function. Both [`interleave_datasets`] and [`concatenate_datasets`] work with regular [`Dataset`] and [`IterableDataset`] objects. +Refer to the [Stream](./stream#interleave) guide for an example of how to interleave [`IterableDataset`] objects. + +You can define sampling probabilities for each of the original datasets to specify how to interleave the datasets. +In this case, the new dataset is constructed by getting examples one by one from a random dataset until one of the datasets runs out of samples. + +```py +>>> seed = 42 +>>> probabilities = [0.3, 0.5, 0.2] +>>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) +>>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) +>>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) +>>> dataset = interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed) +>>> dataset["a"] +[10, 11, 20, 12, 0, 21, 13] +``` + +You can also specify the `stopping_strategy`. The default strategy, `first_exhausted`, is a subsampling strategy, i.e the dataset construction is stopped as soon one of the dataset runs out of samples. +You can specify `stopping_strategy=all_exhausted` to execute an oversampling strategy. In this case, the dataset construction is stopped as soon as every samples in every dataset has been added at least once. In practice, it means that if a dataset is exhausted, it will return to the beginning of this dataset until the stop criterion has been reached. +Note that if no sampling probabilities are specified, the new dataset will have `max_length_datasets*nb_dataset samples`. + +```py +>>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) +>>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) +>>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) +>>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") +>>> dataset["a"] +[0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 20] +``` + +## Format + +The [`~Dataset.set_format`] function changes the format of a column to be compatible with some common data formats. Specify the output you'd like in the `type` parameter and the columns you want to format. Formatting is applied on-the-fly. + +For example, create PyTorch tensors by setting `type="torch"`: + +```py +>>> import torch +>>> dataset.set_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "label"]) +``` + +The [`~Dataset.with_format`] function also changes the format of a column, except it returns a new [`Dataset`] object: + +```py +>>> dataset = dataset.with_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "label"]) +``` + + + +🤗 Datasets also provides support for other common data formats such as NumPy, Pandas, and JAX. Check out the [Using Datasets with TensorFlow](https://huggingface.co/docs/datasets/master/en/use_with_tensorflow#using-totfdataset) guide for more details on how to efficiently create a TensorFlow dataset. + + + +If you need to reset the dataset to its original format, use the [`~Dataset.reset_format`] function: + +```py +>>> dataset.format +{'type': 'torch', 'format_kwargs': {}, 'columns': ['label'], 'output_all_columns': False} +>>> dataset.reset_format() +>>> dataset.format +{'type': 'python', 'format_kwargs': {}, 'columns': ['idx', 'label', 'sentence1', 'sentence2'], 'output_all_columns': False} +``` + +### Format transform + +The [`~Dataset.set_transform`] function applies a custom formatting transform on-the-fly. This function replaces any previously specified format. For example, you can use this function to tokenize and pad tokens on-the-fly. Tokenization is only applied when examples are accessed: + +```py +>>> from transformers import AutoTokenizer + +>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") +>>> def encode(batch): +... return tokenizer(batch["sentence1"], padding="longest", truncation=True, max_length=512, return_tensors="pt") +>>> dataset.set_transform(encode) +>>> dataset.format +{'type': 'custom', 'format_kwargs': {'transform': }, 'columns': ['idx', 'label', 'sentence1', 'sentence2'], 'output_all_columns': False} +``` + +You can also use the [`~Dataset.set_transform`] function to decode formats not supported by [`Features`]. For example, the [`Audio`] feature uses [`soundfile`](https://python-soundfile.readthedocs.io/en/0.11.0/) - a fast and simple library to install - but it does not provide support for less common audio formats. Here is where you can use [`~Dataset.set_transform`] to apply a custom decoding transform on the fly. You're free to use any library you like to decode the audio files. + +The example below uses the [`pydub`](http://pydub.com/) package to open an audio format not supported by `soundfile`: + +```py +>>> import numpy as np +>>> from pydub import AudioSegment + +>>> audio_dataset_amr = Dataset.from_dict({"audio": ["audio_samples/audio.amr"]}) + +>>> def decode_audio_with_pydub(batch, sampling_rate=16_000): +... def pydub_decode_file(audio_path): +... sound = AudioSegment.from_file(audio_path) +... if sound.frame_rate != sampling_rate: +... sound = sound.set_frame_rate(sampling_rate) +... channel_sounds = sound.split_to_mono() +... samples = [s.get_array_of_samples() for s in channel_sounds] +... fp_arr = np.array(samples).T.astype(np.float32) +... fp_arr /= np.iinfo(samples[0].typecode).max +... return fp_arr +... +... batch["audio"] = [pydub_decode_file(audio_path) for audio_path in batch["audio"]] +... return batch + +>>> audio_dataset_amr.set_transform(decode_audio_with_pydub) +``` + +## Save + +Once you are done processing your dataset, you can save and reuse it later with [`~Dataset.save_to_disk`]. + +Save your dataset by providing the path to the directory you wish to save it to: + +```py +>>> encoded_dataset.save_to_disk("path/of/my/dataset/directory") +``` + +Use the [`load_from_disk`] function to reload the dataset: + +```py +>>> from datasets import load_from_disk +>>> reloaded_dataset = load_from_disk("path/of/my/dataset/directory") +``` + + + +Want to save your dataset to a cloud storage provider? Read our [Cloud Storage](./filesystems) guide to learn how to save your dataset to AWS or Google Cloud Storage. + + + +## Export + +🤗 Datasets supports exporting as well so you can work with your dataset in other applications. The following table shows currently supported file formats you can export to: + +| File type | Export method | +|-------------------------|----------------------------------------------------------------| +| CSV | [`Dataset.to_csv`] | +| JSON | [`Dataset.to_json`] | +| Parquet | [`Dataset.to_parquet`] | +| SQL | [`Dataset.to_sql`] | +| In-memory Python object | [`Dataset.to_pandas`] or [`Dataset.to_dict`] | + +For example, export your dataset to a CSV file like this: + +```py +>>> encoded_dataset.to_csv("path/of/my/dataset.csv") +``` diff --git a/testbed/huggingface__datasets/docs/source/quickstart.mdx b/testbed/huggingface__datasets/docs/source/quickstart.mdx new file mode 100644 index 0000000000000000000000000000000000000000..9c4663972616f803d2629117d2f5d91409f8f8ec --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/quickstart.mdx @@ -0,0 +1,355 @@ + + +# Quickstart + +[[open-in-colab]] + +This quickstart is intended for developers who are ready to dive into the code and see an example of how to integrate 🤗 Datasets into their model training workflow. If you're a beginner, we recommend starting with our [tutorials](./tutorial), where you'll get a more thorough introduction. + +Each dataset is unique, and depending on the task, some datasets may require additional steps to prepare it for training. But you can always use 🤗 Datasets tools to load and process a dataset. The fastest and easiest way to get started is by loading an existing dataset from the [Hugging Face Hub](https://huggingface.co/datasets). There are thousands of datasets to choose from, spanning many tasks. Choose the type of dataset you want to work with, and let's get started! + + + + + +Check out [Chapter 5](https://huggingface.co/course/chapter5/1?fw=pt) of the Hugging Face course to learn more about other important topics such as loading remote or local datasets, tools for cleaning up a dataset, and creating your own dataset. + + + +Start by installing 🤗 Datasets: + +```bash +pip install datasets +``` + +🤗 Datasets also support audio and image data formats: + +* To work with audio datasets, install the [`Audio`] feature: + + ```bash + pip install datasets[audio] + ``` + +* To work with image datasets, install the [`Image`] feature: + + ```bash + pip install datasets[vision] + ``` + +Besides 🤗 Datasets, make sure your preferred machine learning framework is installed: + + + +```bash +pip install torch +``` + + +```bash +pip install tensorflow +``` + + + +## Audio + +Audio datasets are loaded just like text datasets. However, an audio dataset is preprocessed a bit differently. Instead of a tokenizer, you'll need a [feature extractor](https://huggingface.co/docs/transformers/main_classes/feature_extractor#feature-extractor). An audio input may also require resampling its sampling rate to match the sampling rate of the pretrained model you're using. In this quickstart, you'll prepare the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset for a model train on and classify the banking issue a customer is having. + +**1**. Load the MInDS-14 dataset by providing the [`load_dataset`] function with the dataset name, dataset configuration (not all datasets will have a configuration), and a dataset split: + +```py +>>> from datasets import load_dataset, Audio + +>>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") +``` + +**2**. Next, load a pretrained [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) model and its corresponding feature extractor from the [🤗 Transformers](https://huggingface.co/transformers/) library. It is totally normal to see a warning after you load the model about some weights not being initialized. This is expected because you are loading this model checkpoint for training with another task. + +```py +>>> from transformers import AutoModelForAudioClassification, AutoFeatureExtractor + +>>> model = AutoModelForAudioClassification.from_pretrained("facebook/wav2vec2-base") +>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") +``` + +**3**. The [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset card indicates the sampling rate is 8kHz, but the Wav2Vec2 model was pretrained on a sampling rate of 16kHZ. You'll need to upsample the `audio` column with the [`~Dataset.cast_column`] function and [`Audio`] feature to match the model's sampling rate. + +```py +>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) +>>> dataset[0]["audio"] +{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., + 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', + 'sampling_rate': 16000} +``` + +**4**. Create a function to preprocess the audio `array` with the feature extractor, and truncate and pad the sequences into tidy rectangular tensors. The most important thing to remember is to call the audio `array` in the feature extractor since the `array` - the actual speech signal - is the model input. + +Once you have a preprocessing function, use the [`~Dataset.map`] function to speed up processing by applying the function to batches of examples in the dataset. + +```py +>>> def preprocess_function(examples): +... audio_arrays = [x["array"] for x in examples["audio"]] +... inputs = feature_extractor( +... audio_arrays, +... sampling_rate=16000, +... padding=True, +... max_length=100000, +... truncation=True, +... ) +... return inputs + +>>> dataset = dataset.map(preprocess_function, batched=True) +``` + +**5**. Use the [`~Dataset.rename_column`] function to rename the `intent_class` column to `labels`, which is the expected input name in [Wav2Vec2ForSequenceClassification](https://huggingface.co/docs/transformers/main/en/model_doc/wav2vec2#transformers.Wav2Vec2ForSequenceClassification): + +```py +>>> dataset = dataset.rename_column("intent_class", "labels") +``` + +**6**. Set the dataset format according to the machine learning framework you're using. + + + +Use the [`~Dataset.set_format`] function to set the dataset format to `torch` and specify the columns you want to format. This function applies formatting on-the-fly. After converting to PyTorch tensors, wrap the dataset in [`torch.utils.data.DataLoader`](https://alband.github.io/doc_view/data.html?highlight=torch%20utils%20data%20dataloader#torch.utils.data.DataLoader): + +```py +>>> from torch.utils.data import DataLoader + +>>> dataset.set_format(type="torch", columns=["input_values", "labels"]) +>>> dataloader = DataLoader(dataset, batch_size=4) +``` + + + +Use the [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] method from 🤗 Transformers to prepare the dataset to be compatible with +TensorFlow, and ready to train/fine-tune a model, as it wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset` +with collation and batching, so one can pass it directly to Keras methods like `fit()` without further modification. + +```py +>>> import tensorflow as tf + +>>> tf_dataset = model.prepare_tf_dataset( +... dataset, +... batch_size=4, +... shuffle=True, +... ) +``` + + + +**7**. Start training with your machine learning framework! Check out the 🤗 Transformers [audio classification guide](https://huggingface.co/docs/transformers/tasks/audio_classification) for an end-to-end example of how to train a model on an audio dataset. + +## Vision + +Image datasets are loaded just like text datasets. However, instead of a tokenizer, you'll need a [feature extractor](https://huggingface.co/docs/transformers/main_classes/feature_extractor#feature-extractor) to preprocess the dataset. Applying data augmentation to an image is common in computer vision to make the model more robust against overfitting. You're free to use any data augmentation library you want, and then you can apply the augmentations with 🤗 Datasets. In this quickstart, you'll load the [Beans](https://huggingface.co/datasets/beans) dataset and get it ready for the model to train on and identify disease from the leaf images. + +**1**. Load the Beans dataset by providing the [`load_dataset`] function with the dataset name and a dataset split: + +```py +>>> from datasets import load_dataset, Image + +>>> dataset = load_dataset("beans", split="train") +``` + +**2**. Now you can add some data augmentations with any library ([Albumentations](https://albumentations.ai/), [imgaug](https://imgaug.readthedocs.io/en/latest/), [Kornia](https://kornia.readthedocs.io/en/latest/)) you like. Here, you'll use [torchvision](https://pytorch.org/vision/stable/transforms.html) to randomly change the color properties of an image: + +```py +>>> from torchvision.transforms import Compose, ColorJitter, ToTensor + +>>> jitter = Compose( +... [ColorJitter(brightness=0.5, hue=0.5), ToTensor()] +... ) +``` + +**3**. Create a function to apply your transform to the dataset and generate the model input: `pixel_values`. + +```python +>>> def transforms(examples): +... examples["pixel_values"] = [jitter(image.convert("RGB")) for image in examples["image"]] +... return examples +``` + +**4**. Use the [`~Dataset.with_transform`] function to apply the data augmentations on-the-fly: + +```py +>>> dataset = dataset.with_transform(transforms) +``` + +**5**. Set the dataset format according to the machine learning framework you're using. + + + +Wrap the dataset in [`torch.utils.data.DataLoader`](https://alband.github.io/doc_view/data.html?highlight=torch%20utils%20data%20dataloader#torch.utils.data.DataLoader). You'll also need to create a collate function to collate the samples into batches: + +```py +>>> from torch.utils.data import DataLoader + +>>> def collate_fn(examples): +... images = [] +... labels = [] +... for example in examples: +... images.append((example["pixel_values"])) +... labels.append(example["labels"]) +... +... pixel_values = torch.stack(images) +... labels = torch.tensor(labels) +... return {"pixel_values": pixel_values, "labels": labels} +>>> dataloader = DataLoader(dataset, collate_fn=collate_fn, batch_size=4) +``` + + + +Use the [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] method from 🤗 Transformers to prepare the dataset to be compatible with +TensorFlow, and ready to train/fine-tune a model, as it wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset` +with collation and batching, so one can pass it directly to Keras methods like `fit()` without further modification. + +Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed: + +```bash +pip install -U albumentations opencv-python +``` + +```py +>>> import albumentations +>>> import numpy as np + +>>> transform = albumentations.Compose([ +... albumentations.RandomCrop(width=256, height=256), +... albumentations.HorizontalFlip(p=0.5), +... albumentations.RandomBrightnessContrast(p=0.2), +... ]) + +>>> def transforms(examples): +... examples["pixel_values"] = [ +... transform(image=np.array(image))["image"] for image in examples["image"] +... ] +... return examples + +>>> dataset.set_transform(transforms) +>>> tf_dataset = model.prepare_tf_dataset( +... dataset, +... batch_size=4, +... shuffle=True, +... ) +``` + + + +**6**. Start training with your machine learning framework! Check out the 🤗 Transformers [image classification guide](https://huggingface.co/docs/transformers/tasks/image_classification) for an end-to-end example of how to train a model on an image dataset. + +## NLP + +Text needs to be tokenized into individual tokens by a [tokenizer](https://huggingface.co/docs/transformers/main_classes/tokenizer). For the quickstart, you'll load the [Microsoft Research Paraphrase Corpus (MRPC)](https://huggingface.co/datasets/glue/viewer/mrpc) training dataset to train a model to determine whether a pair of sentences mean the same thing. + +**1**. Load the MRPC dataset by providing the [`load_dataset`] function with the dataset name, dataset configuration (not all datasets will have a configuration), and dataset split: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("glue", "mrpc", split="train") +``` + +**2**. Next, load a pretrained [BERT](https://huggingface.co/bert-base-uncased) model and its corresponding tokenizer from the [🤗 Transformers](https://huggingface.co/transformers/) library. It is totally normal to see a warning after you load the model about some weights not being initialized. This is expected because you are loading this model checkpoint for training with another task. + +```py +>>> from transformers import AutoModelForSequenceClassification, AutoTokenizer + +>>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased") +>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") +===PT-TF-SPLIT=== +>>> from transformers import TFAutoModelForSequenceClassification, AutoTokenizer + +>>> model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-uncased") +>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") +``` + +**3**. Create a function to tokenize the dataset, and you should also truncate and pad the text into tidy rectangular tensors. The tokenizer generates three new columns in the dataset: `input_ids`, `token_type_ids`, and an `attention_mask`. These are the model inputs. + +Use the [`~Dataset.map`] function to speed up processing by applying your tokenization function to batches of examples in the dataset: + +```py +>>> def encode(examples): +... return tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, padding="max_length") + +>>> dataset = dataset.map(encode, batched=True) +>>> dataset[0] +{'sentence1': 'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .', +'sentence2': 'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .', +'label': 1, +'idx': 0, +'input_ids': array([ 101, 7277, 2180, 5303, 4806, 1117, 1711, 117, 2292, 1119, 1270, 107, 1103, 7737, 107, 117, 1104, 9938, 4267, 12223, 21811, 1117, 2554, 119, 102, 11336, 6732, 3384, 1106, 1140, 1112, 1178, 107, 1103, 7737, 107, 117, 7277, 2180, 5303, 4806, 1117, 1711, 1104, 9938, 4267, 12223, 21811, 1117, 2554, 119, 102]), +'token_type_ids': array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), +'attention_mask': array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])} +``` + +**4**. Rename the `label` column to `labels`, which is the expected input name in [BertForSequenceClassification](https://huggingface.co/docs/transformers/main/en/model_doc/bert#transformers.BertForSequenceClassification): + +```py +>>> dataset = dataset.map(lambda examples: {"labels": examples["label"]}, batched=True) +``` + +**5**. Set the dataset format according to the machine learning framework you're using. + + + +Use the [`~Dataset.set_format`] function to set the dataset format to `torch` and specify the columns you want to format. This function applies formatting on-the-fly. After converting to PyTorch tensors, wrap the dataset in [`torch.utils.data.DataLoader`](https://alband.github.io/doc_view/data.html?highlight=torch%20utils%20data%20dataloader#torch.utils.data.DataLoader): + +```py +>>> import torch + +>>> dataset.set_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "labels"]) +>>> dataloader = torch.utils.data.DataLoader(dataset, batch_size=32) +``` + + + +Use the [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] method from 🤗 Transformers to prepare the dataset to be compatible with +TensorFlow, and ready to train/fine-tune a model, as it wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset` +with collation and batching, so one can pass it directly to Keras methods like `fit()` without further modification. + +```py +>>> import tensorflow as tf + +>>> tf_dataset = model.prepare_tf_dataset( +... dataset, +... batch_size=4, +... shuffle=True, +... ) +``` + + + +**6**. Start training with your machine learning framework! Check out the 🤗 Transformers [text classification guide](https://huggingface.co/docs/transformers/tasks/sequence_classification) for an end-to-end example of how to train a model on a text dataset. + +## What's next? + +This completes the 🤗 Datasets quickstart! You can load any text, audio, or image dataset with a single function and get it ready for your model to train on. + +For your next steps, take a look at our [How-to guides](./how_to) and learn how to do more specific things like loading different dataset formats, aligning labels, and streaming large datasets. If you're interested in learning more about 🤗 Datasets core concepts, grab a cup of coffee and read our [Conceptual Guides](./about_arrow)! diff --git a/testbed/huggingface__datasets/docs/source/repository_structure.mdx b/testbed/huggingface__datasets/docs/source/repository_structure.mdx new file mode 100644 index 0000000000000000000000000000000000000000..80b5da95d2897053b5fd75e1c4b479d48928e271 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/repository_structure.mdx @@ -0,0 +1,281 @@ +# Structure your repository + +To host and share your dataset, create a dataset repository on the Hugging Face Hub and upload your data files. + +This guide will show you how to structure your dataset repository when you upload it. +A dataset with a supported structure and file format (`.txt`, `.csv`, `.parquet`, `.jsonl`, `.mp3`, `.jpg`, `.zip` etc.) are loaded automatically with [`~datasets.load_dataset`], and it'll have a dataset viewer on its dataset page on the Hub. + +## Main use-case + +The simplest dataset structure has two files: `train.csv` and `test.csv` (this works with any supported file format). + +Your repository will also contain a `README.md` file, the [dataset card](dataset_card) displayed on your dataset page. + +``` +my_dataset_repository/ +├── README.md +├── train.csv +└── test.csv +``` + +In this simple case, you'll get a dataset with two splits: `train` (containing examples from `train.csv`) and `test` (containing examples from `test.csv`). + +## Define your splits and subsets in YAML + +## Splits + +If you have multiple files and want to define which file goes into which split, you can use the YAML `configs` field at the top of your README.md. + +For example, given a repository like this one: + +``` +my_dataset_repository/ +├── README.md +├── data.csv +└── holdout.csv +``` + +You can define your splits by adding the `configs` field in the YAML block at the top of your README.md: + +```yaml +--- +configs: +- config_name: default + data_files: + - split: train + path: "data.csv" + - split: test + path: "holdout.csv" +--- +``` + + +You can select multiple files per split using a list of paths: + +``` +my_dataset_repository/ +├── README.md +├── data/ +│ ├── abc.csv +│ └── def.csv +└── holdout/ + └── ghi.csv +``` + +```yaml +--- +configs: +- config_name: default + data_files: + - split: train + path: + - "data/abc.csv" + - "data/def.csv" + - split: test + path: "holdout/ghi.csv" +--- +``` + +Or you can use glob patterns to automatically list all the files you need: + +```yaml +--- +configs: +- config_name: default + data_files: + - split: train + path: "data/*.csv" + - split: test + path: "holdout/*.csv" +--- +``` + + + +Note that `config_name` field is required even if you have a single configuration. + + + +## Configurations + +Your dataset might have several subsets of data that you want to be able to load separately. In that case you can define a list of configurations inside the `configs` field in YAML: + +``` +my_dataset_repository/ +├── README.md +├── main_data.csv +└── additional_data.csv +``` + +```yaml +--- +configs: +- config_name: main_data + data_files: "main_data.csv" +- config_name: additional_data + data_files: "additional_data.csv" +--- +``` + +Each configuration is shown separately on the Hugging Face Hub, and can be loaded by passing its name as a second parameter: + +```python +from datasets import load_dataset + +main_data = load_dataset("my_dataset_repository", "main_data") +additional_data = load_dataset("my_dataset_repository", "additional_data") +``` + +## Builder parameters + +Not only `data_files`, but other builder-specific parameters can be passed via YAML, allowing for more flexibility on how to load the data while not requiring any custom code. For example, define which separator to use in which configuration to load your `csv` files: + +```yaml +--- +configs: +- config_name: tab + data_files: "main_data.csv" + sep: "\t" +- config_name: comma + data_files: "additional_data.csv" + sep: "," +--- +``` + +Refer to [specific builders' documentation](./package_reference/builder_classes) to see what configuration parameters they have. + + + +You can set a default configuration using `default: true`, e.g. you can run `main_data = load_dataset("my_dataset_repository")` if you set + +```yaml +- config_name: main_data + data_files: "main_data.csv" + default: true +``` + + + +## Automatic splits detection + +If no YAML is provided, 🤗 Datasets searches for certain patterns in the dataset repository to automatically infer the dataset splits. +There is an order to the patterns, beginning with the custom filename split format to treating all files as a single split if no pattern is found. + +### Directory name + +Your data files may also be placed into different directories named `train`, `test`, and `validation` where each directory contains the data files for that split: + +``` +my_dataset_repository/ +├── README.md +└── data/ + ├── train/ + │ └── bees.csv + ├── test/ + │ └── more_bees.csv + └── validation/ + └── even_more_bees.csv +``` + +### Filename splits + +If you don't have any non-traditional splits, then you can place the split name anywhere in the data file and it is automatically inferred. The only rule is that the split name must be delimited by non-word characters, like `test-file.csv` for example instead of `testfile.csv`. Supported delimiters include underscores, dashes, spaces, dots, and numbers. + +For example, the following file names are all acceptable: + +- train split: `train.csv`, `my_train_file.csv`, `train1.csv` +- validation split: `validation.csv`, `my_validation_file.csv`, `validation1.csv` +- test split: `test.csv`, `my_test_file.csv`, `test1.csv` + +Here is an example where all the files are placed into a directory named `data`: + +``` +my_dataset_repository/ +├── README.md +└── data/ + ├── train.csv + ├── test.csv + └── validation.csv +``` + +### Custom filename split + +If your dataset splits have custom names that aren't `train`, `test`, or `validation`, then you can name your data files like `data/-xxxxx-of-xxxxx.csv`. + +Here is an example with three splits, `train`, `test`, and `random`: + +``` +my_dataset_repository/ +├── README.md +└── data/ + ├── train-00000-of-00003.csv + ├── train-00001-of-00003.csv + ├── train-00002-of-00003.csv + ├── test-00000-of-00001.csv + ├── random-00000-of-00003.csv + ├── random-00001-of-00003.csv + └── random-00002-of-00003.csv +``` + +### Single split + +When 🤗 Datasets can't find any of the above patterns, then it'll treat all the files as a single train split. If your dataset splits aren't loading as expected, it may be due to an incorrect pattern. + +### Split name keywords + +There are several ways to name splits. Validation splits are sometimes called "dev", and test splits may be referred to as "eval". +These other split names are also supported, and the following keywords are equivalent: + +- train, training +- validation, valid, val, dev +- test, testing, eval, evaluation + +The structure below is a valid repository: + +``` +my_dataset_repository/ +├── README.md +└── data/ + ├── training.csv + ├── eval.csv + └── valid.csv +``` + +### Multiple files per split + +If one of your splits comprises several files, 🤗 Datasets can still infer whether it is the train, validation, and test split from the file name. +For example, if your train and test splits span several files: + +``` +my_dataset_repository/ +├── README.md +├── train_0.csv +├── train_1.csv +├── train_2.csv +├── train_3.csv +├── test_0.csv +└── test_1.csv +``` + +Make sure all the files of your `train` set have *train* in their names (same for test and validation). +Even if you add a prefix or suffix to `train` in the file name (like `my_train_file_00001.csv` for example), +🤗 Datasets can still infer the appropriate split. + +For convenience, you can also place your data files into different directories. +In this case, the split name is inferred from the directory name. + +``` +my_dataset_repository/ +├── README.md +└── data/ + ├── train/ + │ ├── shard_0.csv + │ ├── shard_1.csv + │ ├── shard_2.csv + │ └── shard_3.csv + └── test/ + ├── shard_0.csv + └── shard_1.csv +``` + +For more flexibility over how to load and generate a dataset, you can also write a [dataset loading script](./dataset_script). diff --git a/testbed/huggingface__datasets/docs/source/semantic_segmentation.mdx b/testbed/huggingface__datasets/docs/source/semantic_segmentation.mdx new file mode 100644 index 0000000000000000000000000000000000000000..b7ee935b79af25383ec46ccfd182f4d69afaeb31 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/semantic_segmentation.mdx @@ -0,0 +1,177 @@ +# Semantic segmentation + +Semantic segmentation datasets are used to train a model to classify every pixel in an image. There are +a wide variety of applications enabled by these datasets such as background removal from images, stylizing +images, or scene understanding for autonomous driving. This guide will show you how to apply transformations +to an image segmentation dataset. + +Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed: + +```bash +pip install -U albumentations opencv-python +``` + +[Albumentations](https://albumentations.ai/) is a Python library for performing data augmentation +for computer vision. It supports various computer vision tasks such as image classification, object +detection, segmentation, and keypoint estimation. + +This guide uses the [Scene Parsing](https://huggingface.co/datasets/scene_parse_150) dataset for segmenting +and parsing an image into different image regions associated with semantic categories, such as sky, road, person, and bed. + +Load the `train` split of the dataset and take a look at an example: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("scene_parse_150", split="train") +>>> index = 10 +>>> dataset[index] +{'image': , + 'annotation': , + 'scene_category': 927} +``` + +The dataset has three fields: + +* `image`: a PIL image object. +* `annotation`: segmentation mask of the image. +* `scene_category`: the label or scene category of the image (like “kitchen” or “office”). + +Next, check out an image with: + +```py +>>> dataset[index]["image"] +``` + +
+ +
+ +Similarly, you can check out the respective segmentation mask: + +```py +>>> dataset[index]["annotation"] +``` + +
+ +
+ +We can also add a [color palette](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.py#L51) on the +segmentation mask and overlay it on top of the original image to visualize the dataset: + +After defining the color palette, you should be ready to visualize some overlays. + +```py +>>> import matplotlib.pyplot as plt + +>>> def visualize_seg_mask(image: np.ndarray, mask: np.ndarray): +... color_seg = np.zeros((mask.shape[0], mask.shape[1], 3), dtype=np.uint8) +... palette = np.array(create_ade20k_label_colormap()) +... for label, color in enumerate(palette): +... color_seg[mask == label, :] = color +... color_seg = color_seg[..., ::-1] # convert to BGR + +... img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map +... img = img.astype(np.uint8) + +... plt.figure(figsize=(15, 10)) +... plt.imshow(img) +... plt.axis("off") +... plt.show() + + +>>> visualize_seg_mask( +... np.array(dataset[index]["image"]), +... np.array(dataset[index]["annotation"]) +... ) +``` + +
+ +
+ +Now apply some augmentations with `albumentations`. You’ll first resize the image and adjust its brightness. + +```py +>>> import albumentations + +>>> transform = albumentations.Compose( +... [ +... albumentations.Resize(256, 256), +... albumentations.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.5), +... ] +... ) +``` + +Create a function to apply the transformation to the images: + +```py +>>> def transforms(examples): +... transformed_images, transformed_masks = [], [] +... +... for image, seg_mask in zip(examples["image"], examples["annotation"]): +... image, seg_mask = np.array(image), np.array(seg_mask) +... transformed = transform(image=image, mask=seg_mask) +... transformed_images.append(transformed["image"]) +... transformed_masks.append(transformed["mask"]) +... +... examples["pixel_values"] = transformed_images +... examples["label"] = transformed_masks +... return examples +``` + +Use the [`~Dataset.set_transform`] function to apply the transformation on-the-fly to batches of the dataset to consume less disk space: + +```py +>>> dataset.set_transform(transforms) +``` + +You can verify the transformation worked by indexing into the `pixel_values` and `label` of an example: + +```py +>>> image = np.array(dataset[index]["pixel_values"]) +>>> mask = np.array(dataset[index]["label"]) + +>>> visualize_seg_mask(image, mask) +``` + +
+ +
+ +In this guide, you have used `albumentations` for augmenting the dataset. It's also possible to use `torchvision` to apply some similar transforms. + +```py +>>> from torchvision.transforms import Resize, ColorJitter, Compose + +>>> transformation_chain = Compose([ +... Resize((256, 256)), +... ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) +... ]) +>>> resize = Resize((256, 256)) + +>>> def train_transforms(example_batch): +... example_batch["pixel_values"] = [transformation_chain(x) for x in example_batch["image"]] +... example_batch["label"] = [resize(x) for x in example_batch["annotation"]] +... return example_batch + +>>> dataset.set_transform(train_transforms) + +>>> image = np.array(dataset[index]["pixel_values"]) +>>> mask = np.array(dataset[index]["label"]) + +>>> visualize_seg_mask(image, mask) +``` + +
+ +
+ + + +Now that you know how to process a dataset for semantic segmentation, learn +[how to train a semantic segmentation model](https://huggingface.co/docs/transformers/tasks/semantic_segmentation) +and use it for inference. + + \ No newline at end of file diff --git a/testbed/huggingface__datasets/docs/source/share.mdx b/testbed/huggingface__datasets/docs/source/share.mdx new file mode 100644 index 0000000000000000000000000000000000000000..a318757c95bf1ae03e7577c547d0dc3e1936244b --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/share.mdx @@ -0,0 +1,155 @@ +# Share a dataset using the CLI + +At Hugging Face, we are on a mission to democratize good Machine Learning and we believe in the value of open source. That's why we designed 🤗 Datasets so that anyone can share a dataset with the greater ML community. There are currently thousands of datasets in over 100 languages in the Hugging Face Hub, and the Hugging Face team always welcomes new contributions! + +Dataset repositories offer features such as: + +- Free dataset hosting +- Dataset versioning +- Commit history and diffs +- Metadata for discoverability +- Dataset cards for documentation, licensing, limitations, etc. + +This guide will show you how to share a dataset that can be easily accessed by anyone. + + + +## Add a dataset + +You can share your dataset with the community with a dataset repository on the Hugging Face Hub. +It can also be a private dataset if you want to control who has access to it. + +In a dataset repository, you can either host all your data files and [configure your dataset](./repository_structure#define-your-splits-in-yaml) to define which file goes to which split. +The following formats: CSV, TSV, JSON, JSON lines, text, Parquet, Arrow, SQLite. +The script also supports many kinds of compressed file types such as: GZ, BZ2, LZ4, LZMA or ZSTD. +For example, your dataset can be made of `.json.gz` files. + +On the other hand, if your dataset is not in a supported format or if you want more control over how your dataset is loaded, you can write your own dataset script. + +When loading a dataset from the Hub, all the files in the supported formats are loaded, following the [repository structure](./repository_structure). +However if there's a dataset script, it is downloaded and executed to download and prepare the dataset instead. + +For more information on how to load a dataset from the Hub, take a look at the [load a dataset from the Hub](./load_hub) tutorial. + +### Create the repository + +Sharing a community dataset will require you to create an account on [hf.co](https://huggingface.co/join) if you don't have one yet. +You can directly create a [new dataset repository](https://huggingface.co/login?next=%2Fnew-dataset) from your account on the Hugging Face Hub, but this guide will show you how to upload a dataset from the terminal. + +1. Make sure you are in the virtual environment where you installed Datasets, and run the following command: + +``` +huggingface-cli login +``` + +2. Login using your Hugging Face Hub credentials, and create a new dataset repository: + +``` +huggingface-cli repo create your_dataset_name --type dataset +``` + +Add the `-organization` flag to create a repository under a specific organization: + +``` +huggingface-cli repo create your_dataset_name --type dataset --organization your-org-name +``` + +### Clone the repository + +3. Install [Git LFS](https://git-lfs.github.com/) and clone your repository: + +``` +# Make sure you have git-lfs installed +# (https://git-lfs.github.com/) +git lfs install + +git clone https://huggingface.co/datasets/namespace/your_dataset_name +``` + +Here the `namespace` is either your username or your organization name. + +### Prepare your files + +4. Now is a good time to check your directory to ensure the only files you're uploading are: + +- The data files of the dataset + +- The dataset card `README.md` + +- (optional) `your_dataset_name.py` is your dataset loading script (optional if your data files are already in the supported formats csv/jsonl/json/parquet/txt). To create a dataset script, see the [dataset script](dataset_script) page. + +### Upload your files + +You can directly upload your files to your repository on the Hugging Face Hub, but this guide will show you how to upload the files from the terminal. + +5. It is important to add the large data files first with `git lfs track` or else you will encounter an error later when you push your files: + +``` +cp /somewhere/data/*.json . +git lfs track *.json +git add .gitattributes +git add *.json +git commit -m "add json files" +``` + +6. (Optional) Add the dataset loading script: + +``` +cp /somewhere/data/load_script.py . +git add --all +``` + +7. Verify the files have been correctly staged. Then you can commit and push your files: + +``` +git status +git commit -m "First version of the your_dataset_name dataset." +git push +``` + +Congratulations, your dataset has now been uploaded to the Hugging Face Hub where anyone can load it in a single line of code! 🥳 + +``` +dataset = load_dataset("namespace/your_dataset_name") +``` + +Finally, don't forget to enrich the dataset card to document your dataset and make it discoverable! Check out the [Create a dataset card](dataset_card) guide to learn more. + +### Ask for a help and reviews + +If you need help with a dataset script, feel free to check the [datasets forum](https://discuss.huggingface.co/c/datasets/10): it's possible that someone had similar issues and shared how they managed to fix them. + +Then if your script is ready and if you wish your dataset script to be reviewed by the Hugging Face team, you can open a discussion in the Community tab of your dataset with this message: + +``` +# Dataset rewiew request for + +## Description + + + +## Files to review + +- file1 +- file2 +- ... + +cc @lhoestq @polinaeterna @mariosasko @albertvillanova +``` + +Members of the Hugging Face team will be happy to review your dataset script and give you advice. + +## Datasets on GitHub (legacy) + +Datasets used to be hosted on our GitHub repository, but all datasets have now been migrated to the Hugging Face Hub. + +The legacy GitHub datasets were added originally on our GitHub repository and therefore don't have a namespace on the Hub: "squad", "glue", etc. unlike the other datasets that are named "username/dataset_name" or "org/dataset_name". + + + +The distinction between a Hub dataset within or without a namespace only comes from the legacy sharing workflow. It does not involve any ranking, decisioning, or opinion regarding the contents of the dataset itself. + + + +Those datasets are now maintained on the Hub: if you think a fix is needed, please use their "Community" tab to open a discussion or create a Pull Request. +The code of these datasets is reviewed by the Hugging Face team. diff --git a/testbed/huggingface__datasets/docs/source/stream.mdx b/testbed/huggingface__datasets/docs/source/stream.mdx new file mode 100644 index 0000000000000000000000000000000000000000..694eafdb0d36ed344ef8d978df39a4f5e642bacc --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/stream.mdx @@ -0,0 +1,362 @@ +# Stream + +Dataset streaming lets you work with a dataset without downloading it. +The data is streamed as you iterate over the dataset. +This is especially helpful when: + +- You don't want to wait for an extremely large dataset to download. +- The dataset size exceeds the amount of available disk space on your computer. +- You want to quickly explore just a few samples of a dataset. + +
+ + +
+ +For example, the English split of the [oscar-corpus/OSCAR-2201](https://huggingface.co/datasets/oscar-corpus/OSCAR-2201) dataset is 1.2 terabytes, but you can use it instantly with streaming. Stream a dataset by setting `streaming=True` in [`load_dataset`] as shown below: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset('oscar-corpus/OSCAR-2201', 'en', split='train', streaming=True) +>>> print(next(iter(dataset))) +{'id': 0, 'text': 'Founded in 2015, Golden Bees is a leading programmatic recruitment platform dedicated to employers, HR agencies and job boards. The company has developed unique HR-custom technologies and predictive algorithms to identify and attract the best candidates for a job opportunity.', ... +``` + +Dataset streaming also lets you work with a dataset made of local files without doing any conversion. +In this case, the data is streamed from the local files as you iterate over the dataset. +This is especially helpful when: + +- You don't want to wait for an extremely large local dataset to be converted to Arrow. +- The converted files size would exceed the amount of available disk space on your computer. +- You want to quickly explore just a few samples of a dataset. + +For example, you can stream a local dataset of hundreds of compressed JSONL files like [oscar-corpus/OSCAR-2201](https://huggingface.co/datasets/oscar-corpus/OSCAR-2201) to use it instantly: + +```py +>>> from datasets import load_dataset +>>> data_files = {'train': 'path/to/OSCAR-2201/compressed/en_meta/*.jsonl.gz'} +>>> dataset = load_dataset('json', data_files=data_files, split='train', streaming=True) +>>> print(next(iter(dataset))) +{'id': 0, 'text': 'Founded in 2015, Golden Bees is a leading programmatic recruitment platform dedicated to employers, HR agencies and job boards. The company has developed unique HR-custom technologies and predictive algorithms to identify and attract the best candidates for a job opportunity.', ... +``` + +Loading a dataset in streaming mode creates a new dataset type instance (instead of the classic [`Dataset`] object), known as an [`IterableDataset`]. +This special type of dataset has its own set of processing methods shown below. + + + +An [`IterableDataset`] is useful for iterative jobs like training a model. +You shouldn't use a [`IterableDataset`] for jobs that require random access to examples because you have to iterate all over it using a for loop. Getting the last example in an iterable dataset would require you to iterate over all the previous examples. +You can find more details in the [Dataset vs. IterableDataset guide](./about_mapstyle_vs_iterable). + + + +## Convert from a Dataset + +If you have an existing [`Dataset`] object, you can convert it to an [`IterableDataset`] with the [`~Dataset.to_iterable_dataset`] function. This is actually faster than setting the `streaming=True` argument in [`load_dataset`] because the data is streamed from local files. + +```py +>>> from datasets import load_dataset + +# faster 🐇 +>>> dataset = load_dataset("food101") +>>> iterable_dataset = dataset.to_iterable_dataset() + +# slower 🐢 +>>> iterable_dataset = load_dataset("food101", streaming=True) +``` + +The [`~Dataset.to_iterable_dataset`] function supports sharding when the [`IterableDataset`] is instantiated. This is useful when working with big datasets, and you'd like to shuffle the dataset or to enable fast parallel loading with a PyTorch DataLoader. + +```py +>>> import torch +>>> from datasets import load_dataset + +>>> dataset = load_dataset("food101") +>>> iterable_dataset = dataset.to_iterable_dataset(num_shards=64) # shard the dataset +>>> iterable_dataset = iterable_dataset.shuffle(buffer_size=10_000) # shuffles the shards order and use a shuffle buffer when you start iterating +dataloader = torch.utils.data.DataLoader(iterable_dataset, num_workers=4) # assigns 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating +``` + +## Shuffle + +Like a regular [`Dataset`] object, you can also shuffle a [`IterableDataset`] with [`IterableDataset.shuffle`]. + +The `buffer_size` argument controls the size of the buffer to randomly sample examples from. Let's say your dataset has one million examples, and you set the `buffer_size` to ten thousand. [`IterableDataset.shuffle`] will randomly select examples from the first ten thousand examples in the buffer. Selected examples in the buffer are replaced with new examples. By default, the buffer size is 1,000. + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True) +>>> shuffled_dataset = dataset.shuffle(seed=42, buffer_size=10_000) +``` + + + +[`IterableDataset.shuffle`] will also shuffle the order of the shards if the dataset is sharded into multiple files. + + + +## Reshuffle + +Sometimes you may want to reshuffle the dataset after each epoch. This will require you to set a different seed for each epoch. Use [`IterableDataset.set_epoch`] in between epochs to tell the dataset what epoch you're on. + +Your seed effectively becomes: `initial seed + current epoch`. + +```py +>>> for epoch in range(epochs): +... shuffled_dataset.set_epoch(epoch) +... for example in shuffled_dataset: +... ... +``` + +## Split dataset + +You can split your dataset one of two ways: + +- [`IterableDataset.take`] returns the first `n` examples in a dataset: + +```py +>>> dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True) +>>> dataset_head = dataset.take(2) +>>> list(dataset_head) +[{'id': 0, 'text': 'Mtendere Village was...'}, {'id': 1, 'text': 'Lily James cannot fight the music...'}] +``` + +- [`IterableDataset.skip`] omits the first `n` examples in a dataset and returns the remaining examples: + +```py +>>> train_dataset = shuffled_dataset.skip(1000) +``` + + + +`take` and `skip` prevent future calls to `shuffle` because they lock in the order of the shards. You should `shuffle` your dataset before splitting it. + + + + + +## Interleave + +[`interleave_datasets`] can combine an [`IterableDataset`] with other datasets. The combined dataset returns alternating examples from each of the original datasets. + +```py +>>> from datasets import interleave_datasets +>>> en_dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True) +>>> fr_dataset = load_dataset('oscar', "unshuffled_deduplicated_fr", split='train', streaming=True) + +>>> multilingual_dataset = interleave_datasets([en_dataset, fr_dataset]) +>>> list(multilingual_dataset.take(2)) +[{'text': 'Mtendere Village was inspired by the vision...'}, {'text': "Média de débat d'idées, de culture et de littérature..."}] +``` + +Define sampling probabilities from each of the original datasets for more control over how each of them are sampled and combined. Set the `probabilities` argument with your desired sampling probabilities: + +```py +>>> multilingual_dataset_with_oversampling = interleave_datasets([en_dataset, fr_dataset], probabilities=[0.8, 0.2], seed=42) +>>> list(multilingual_dataset_with_oversampling.take(2)) +[{'text': 'Mtendere Village was inspired by the vision...'}, {'text': 'Lily James cannot fight the music...'}] +``` + +Around 80% of the final dataset is made of the `en_dataset`, and 20% of the `fr_dataset`. + +You can also specify the `stopping_strategy`. The default strategy, `first_exhausted`, is a subsampling strategy, i.e the dataset construction is stopped as soon one of the dataset runs out of samples. +You can specify `stopping_strategy=all_exhausted` to execute an oversampling strategy. In this case, the dataset construction is stopped as soon as every samples in every dataset has been added at least once. In practice, it means that if a dataset is exhausted, it will return to the beginning of this dataset until the stop criterion has been reached. +Note that if no sampling probabilities are specified, the new dataset will have `max_length_datasets*nb_dataset samples`. + +## Rename, remove, and cast + +The following methods allow you to modify the columns of a dataset. These methods are useful for renaming or removing columns and changing columns to a new set of features. + +### Rename + +Use [`IterableDataset.rename_column`] when you need to rename a column in your dataset. Features associated with the original column are actually moved under the new column name, instead of just replacing the original column in-place. + +Provide [`IterableDataset.rename_column`] with the name of the original column, and the new column name: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset('mc4', 'en', streaming=True, split='train') +>>> dataset = dataset.rename_column("text", "content") +``` + +### Remove + +When you need to remove one or more columns, give [`IterableDataset.remove_columns`] the name of the column to remove. Remove more than one column by providing a list of column names: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset('mc4', 'en', streaming=True, split='train') +>>> dataset = dataset.remove_columns('timestamp') +``` + +### Cast + +[`IterableDataset.cast`] changes the feature type of one or more columns. This method takes your new `Features` as its argument. The following sample code shows how to change the feature types of `ClassLabel` and `Value`: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset('glue', 'mrpc', split='train', streaming=True) +>>> dataset.features +{'sentence1': Value(dtype='string', id=None), +'sentence2': Value(dtype='string', id=None), +'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None), +'idx': Value(dtype='int32', id=None)} + +>>> from datasets import ClassLabel, Value +>>> new_features = dataset.features.copy() +>>> new_features["label"] = ClassLabel(names=['negative', 'positive']) +>>> new_features["idx"] = Value('int64') +>>> dataset = dataset.cast(new_features) +>>> dataset.features +{'sentence1': Value(dtype='string', id=None), +'sentence2': Value(dtype='string', id=None), +'label': ClassLabel(num_classes=2, names=['negative', 'positive'], names_file=None, id=None), +'idx': Value(dtype='int64', id=None)} +``` + + + +Casting only works if the original feature type and new feature type are compatible. For example, you can cast a column with the feature type `Value('int32')` to `Value('bool')` if the original column only contains ones and zeros. + + + +Use [`IterableDataset.cast_column`] to change the feature type of just one column. Pass the column name and its new feature type as arguments: + +```py +>>> dataset.features +{'audio': Audio(sampling_rate=44100, mono=True, id=None)} + +>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) +>>> dataset.features +{'audio': Audio(sampling_rate=16000, mono=True, id=None)} +``` + +## Map + +Similar to the [`Dataset.map`] function for a regular [`Dataset`], 🤗 Datasets features [`IterableDataset.map`] for processing an [`IterableDataset`]. +[`IterableDataset.map`] applies processing on-the-fly when examples are streamed. + +It allows you to apply a processing function to each example in a dataset, independently or in batches. This function can even create new rows and columns. + +The following example demonstrates how to tokenize a [`IterableDataset`]. The function needs to accept and output a `dict`: + +```py +>>> def add_prefix(example): +... example['text'] = 'My text: ' + example['text'] +... return example +``` + +Next, apply this function to the dataset with [`IterableDataset.map`]: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset('oscar', 'unshuffled_deduplicated_en', streaming=True, split='train') +>>> updated_dataset = dataset.map(add_prefix) +>>> list(updated_dataset.take(3)) +[{'id': 0, 'text': 'My text: Mtendere Village was inspired by...'}, + {'id': 1, 'text': 'My text: Lily James cannot fight the music...'}, + {'id': 2, 'text': 'My text: "I\'d love to help kickstart...'}] +``` + +Let's take a look at another example, except this time, you will remove a column with [`IterableDataset.map`]. When you remove a column, it is only removed after the example has been provided to the mapped function. This allows the mapped function to use the content of the columns before they are removed. + +Specify the column to remove with the `remove_columns` argument in [`IterableDataset.map`]: + +```py +>>> updated_dataset = dataset.map(add_prefix, remove_columns=["id"]) +>>> list(updated_dataset.take(3)) +[{'text': 'My text: Mtendere Village was inspired by...'}, + {'text': 'My text: Lily James cannot fight the music...'}, + {'text': 'My text: "I\'d love to help kickstart...'}] +``` + +### Batch processing + +[`IterableDataset.map`] also supports working with batches of examples. Operate on batches by setting `batched=True`. The default batch size is 1000, but you can adjust it with the `batch_size` argument. This opens the door to many interesting applications such as tokenization, splitting long sentences into shorter chunks, and data augmentation. + +#### Tokenization + +```py +>>> from datasets import load_dataset +>>> from transformers import AutoTokenizer +>>> dataset = load_dataset("mc4", "en", streaming=True, split="train") +>>> tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased') +>>> def encode(examples): +... return tokenizer(examples['text'], truncation=True, padding='max_length') +>>> dataset = dataset.map(encode, batched=True, remove_columns=["text", "timestamp", "url"]) +>>> next(iter(dataset)) +{'input_ids': [101, 8466, 1018, 1010, 4029, 2475, 2062, 18558, 3100, 2061, ...,1106, 3739, 102], +'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ..., 1, 1]} +``` + + + +See other examples of batch processing in the [batched map processing](./process#batch-processing) documentation. They work the same for iterable datasets. + + + +### Filter + +You can filter rows in the dataset based on a predicate function using [`Dataset.filter`]. It returns rows that match a specified condition: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset('oscar', 'unshuffled_deduplicated_en', streaming=True, split='train') +>>> start_with_ar = dataset.filter(lambda example: example['text'].startswith('Ar')) +>>> next(iter(start_with_ar)) +{'id': 4, 'text': 'Are you looking for Number the Stars (Essential Modern Classics)?...'} +``` + +[`Dataset.filter`] can also filter by indices if you set `with_indices=True`: + +```py +>>> even_dataset = dataset.filter(lambda example, idx: idx % 2 == 0, with_indices=True) +>>> list(even_dataset.take(3)) +[{'id': 0, 'text': 'Mtendere Village was inspired by the vision of Chief Napoleon Dzombe, ...'}, + {'id': 2, 'text': '"I\'d love to help kickstart continued development! And 0 EUR/month...'}, + {'id': 4, 'text': 'Are you looking for Number the Stars (Essential Modern Classics)? Normally, ...'}] +``` + +## Stream in a training loop + +[`IterableDataset`] can be integrated into a training loop. First, shuffle the dataset: + + + +```py +>>> seed, buffer_size = 42, 10_000 +>>> dataset = dataset.shuffle(seed, buffer_size=buffer_size) +``` + +Lastly, create a simple training loop and start training: + +```py +>>> import torch +>>> from torch.utils.data import DataLoader +>>> from transformers import AutoModelForMaskedLM, DataCollatorForLanguageModeling +>>> from tqdm import tqdm +>>> dataset = dataset.with_format("torch") +>>> dataloader = DataLoader(dataset, collate_fn=DataCollatorForLanguageModeling(tokenizer)) +>>> device = 'cuda' if torch.cuda.is_available() else 'cpu' +>>> model = AutoModelForMaskedLM.from_pretrained("distilbert-base-uncased") +>>> model.train().to(device) +>>> optimizer = torch.optim.AdamW(params=model.parameters(), lr=1e-5) +>>> for epoch in range(3): +... dataset.set_epoch(epoch) +... for i, batch in enumerate(tqdm(dataloader, total=5)): +... if i == 5: +... break +... batch = {k: v.to(device) for k, v in batch.items()} +... outputs = model(**batch) +... loss = outputs[0] +... loss.backward() +... optimizer.step() +... optimizer.zero_grad() +... if i % 10 == 0: +... print(f"loss: {loss}") +``` + + + + diff --git a/testbed/huggingface__datasets/docs/source/tabular_load.mdx b/testbed/huggingface__datasets/docs/source/tabular_load.mdx new file mode 100644 index 0000000000000000000000000000000000000000..9a49d9505fc12d393a7f888be0c504de6f96e6cb --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/tabular_load.mdx @@ -0,0 +1,139 @@ +# Load tabular data + +A tabular dataset is a generic dataset used to describe any data stored in rows and columns, where the rows represent an example and the columns represent a feature (can be continuous or categorical). These datasets are commonly stored in CSV files, Pandas DataFrames, and in database tables. This guide will show you how to load and create a tabular dataset from: + +- CSV files +- Pandas DataFrames +- Databases + +## CSV files + +🤗 Datasets can read CSV files by specifying the generic `csv` dataset builder name in the [`~datasets.load_dataset`] method. To load more than one CSV file, pass them as a list to the `data_files` parameter: + +```py +>>> from datasets import load_dataset +>>> dataset = load_dataset("csv", data_files="my_file.csv") + +# load multiple CSV files +>>> dataset = load_dataset("csv", data_files=["my_file_1.csv", "my_file_2.csv", "my_file_3.csv"]) +``` + +You can also map specific CSV files to the train and test splits: + +```py +>>> dataset = load_dataset("csv", data_files={"train": ["my_train_file_1.csv", "my_train_file_2.csv"], "test": "my_test_file.csv"}) +``` + +To load remote CSV files, pass the URLs instead: + +```py +>>> base_url = "https://huggingface.co/datasets/lhoestq/demo1/resolve/main/data/" +>>> dataset = load_dataset('csv', data_files={"train": base_url + "train.csv", "test": base_url + "test.csv"}) +``` + +To load zipped CSV files: + +```py +>>> url = "https://domain.org/train_data.zip" +>>> data_files = {"train": url} +>>> dataset = load_dataset("csv", data_files=data_files) +``` + +## Pandas DataFrames + +🤗 Datasets also supports loading datasets from [Pandas DataFrames](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) with the [`~datasets.Dataset.from_pandas`] method: + +```py +>>> from datasets import Dataset +>>> import pandas as pd + +# create a Pandas DataFrame +>>> df = pd.read_csv("https://huggingface.co/datasets/imodels/credit-card/raw/main/train.csv") +>>> df = pd.DataFrame(df) +# load Dataset from Pandas DataFrame +>>> dataset = Dataset.from_pandas(df) +``` + +Use the `splits` parameter to specify the name of the dataset split: + +```py +>>> train_ds = Dataset.from_pandas(train_df, split="train") +>>> test_ds = Dataset.from_pandas(test_df, split="test") +``` + +If the dataset doesn't look as expected, you should explicitly [specify your dataset features](loading#specify-features). A [pandas.Series](https://pandas.pydata.org/docs/reference/api/pandas.Series.html) may not always carry enough information for Arrow to automatically infer a data type. For example, if a DataFrame is of length `0` or if the Series only contains `None/NaN` objects, the type is set to `null`. + +## Databases + +Datasets stored in databases are typically accessed with SQL queries. With 🤗 Datasets, you can connect to a database, query for the data you need, and create a dataset out of it. Then you can use all the processing features of 🤗 Datasets to prepare your dataset for training. + +### SQLite + +SQLite is a small, lightweight database that is fast and easy to set up. You can use an existing database if you'd like, or follow along and start from scratch. + +Start by creating a quick SQLite database with this [Covid-19 data](https://github.com/nytimes/covid-19-data/blob/master/us-states.csv) from the New York Times: + +```py +>>> import sqlite3 +>>> import pandas as pd + +>>> conn = sqlite3.connect("us_covid_data.db") +>>> df = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv") +>>> df.to_sql("states", conn, if_exists="replace") +``` + +This creates a `states` table in the `us_covid_data.db` database which you can now load into a dataset. + +To connect to the database, you'll need the [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) that identifies your database. Connecting to a database with a URI caches the returned dataset. The URI string differs for each database dialect, so be sure to check the [Database URLs](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) for whichever database you're using. + +For SQLite, it is: + +```py +>>> uri = "sqlite:///us_covid_data.db" +``` + +Load the table by passing the table name and URI to [`~datasets.Dataset.from_sql`]: + +```py +>>> from datasets import Dataset + +>>> ds = Dataset.from_sql("states", uri) +>>> ds +Dataset({ + features: ['index', 'date', 'state', 'fips', 'cases', 'deaths'], + num_rows: 54382 +}) +``` + +Then you can use all of 🤗 Datasets process features like [`~datasets.Dataset.filter`] for example: + +```py +>>> ds.filter(lambda x: x["state"] == "California") +``` + +You can also load a dataset from a SQL query instead of an entire table, which is useful for querying and joining multiple tables. + +Load the dataset by passing your query and URI to [`~datasets.Dataset.from_sql`]: + +```py +>>> from datasets import Dataset + +>>> ds = Dataset.from_sql('SELECT * FROM states WHERE state="California";', uri) +>>> ds +Dataset({ + features: ['index', 'date', 'state', 'fips', 'cases', 'deaths'], + num_rows: 1019 +}) +``` + +Then you can use all of 🤗 Datasets process features like [`~datasets.Dataset.filter`] for example: + +```py +>>> ds.filter(lambda x: x["cases"] > 10000) +``` + +### PostgreSQL + +You can also connect and load a dataset from a PostgreSQL database, however we won't directly demonstrate how in the documentation because the example is only meant to be run in a notebook. Instead, take a look at how to install and setup a PostgreSQL server in this [notebook](https://colab.research.google.com/github/nateraw/huggingface-hub-examples/blob/main/sql_with_huggingface_datasets.ipynb#scrollTo=d83yGQMPHGFi)! + +After you've setup your PostgreSQL database, you can use the [`~datasets.Dataset.from_sql`] method to load a dataset from a table or query. \ No newline at end of file diff --git a/testbed/huggingface__datasets/docs/source/tutorial.md b/testbed/huggingface__datasets/docs/source/tutorial.md new file mode 100644 index 0000000000000000000000000000000000000000..559575df8ddd02068b29354bc0d5802d716e21a9 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/tutorial.md @@ -0,0 +1,15 @@ +# Overview + +Welcome to the 🤗 Datasets tutorials! These beginner-friendly tutorials will guide you through the fundamentals of working with 🤗 Datasets. You'll load and prepare a dataset for training with your machine learning framework of choice. Along the way, you'll learn how to load different dataset configurations and splits, interact with and see what's inside your dataset, preprocess, and share a dataset to the [Hub](https://huggingface.co/datasets). + +The tutorials assume some basic knowledge of Python and a machine learning framework like PyTorch or TensorFlow. If you're already familiar with these, feel free to check out the [quickstart](./quickstart) to see what you can do with 🤗 Datasets. + + + +The tutorials only cover the basic skills you need to use 🤗 Datasets. There are many other useful functionalities and applications that aren't discussed here. If you're interested in learning more, take a look at [Chapter 5](https://huggingface.co/course/chapter5/1?fw=pt) of the Hugging Face course. + + + +If you have any questions about 🤗 Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10). + +Let's get started! 🏁 diff --git a/testbed/huggingface__datasets/docs/source/upload_dataset.mdx b/testbed/huggingface__datasets/docs/source/upload_dataset.mdx new file mode 100644 index 0000000000000000000000000000000000000000..59b6f2bdf19e46eaa10d207b747cb8fb8a1deaa0 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/upload_dataset.mdx @@ -0,0 +1,134 @@ +# Share a dataset to the Hub + +The [Hub](https://huggingface.co/datasets) is home to an extensive collection of community-curated and popular research datasets. We encourage you to share your dataset to the Hub to help grow the ML community and accelerate progress for everyone. All contributions are welcome; adding a dataset is just a drag and drop away! + +Start by [creating a Hugging Face Hub account](https://huggingface.co/join) if you don't have one yet. + +## Upload with the Hub UI + +The Hub's web-based interface allows users without any developer experience to upload a dataset. + +### Create a repository + +A repository hosts all your dataset files, including the revision history, making storing more than one dataset version possible. + +1. Click on your profile and select **New Dataset** to create a new dataset repository. +2. Pick a name for your dataset, and choose whether it is a public or private dataset. A public dataset is visible to anyone, whereas a private dataset can only be viewed by you or members of your organization. + +
+ +
+ +### Upload dataset + +1. Once you've created a repository, navigate to the **Files and versions** tab to add a file. Select **Add file** to upload your dataset files. We support many text, audio, and image data extensions such as `.csv`, `.mp3`, and `.jpg` among many others. For text data extensions like `.csv`, `.json`, `.jsonl`, and `.txt`, we recommend compressing them before uploading to the Hub (to `.zip` or `.gz` file extension for example). + + Text file extensions are not tracked by Git LFS by default, and if they're greater than 10MB, they will not be committed and uploaded. Take a look at the `.gitattributes` file in your repository for a complete list of tracked file extensions. For this tutorial, you can use the following sample `.csv` files since they're small: train.csv, test.csv. + +
+ +
+ +2. Drag and drop your dataset files and add a brief descriptive commit message. + +
+ +
+ +3. After uploading your dataset files, they are stored in your dataset repository. + +
+ +
+ +### Create a Dataset card + +Adding a Dataset card is super valuable for helping users find your dataset and understand how to use it responsibly. + +1. Click on **Create Dataset Card** to create a Dataset card. This button creates a `README.md` file in your repository. + +
+ +
+ +2. At the top, you'll see the **Metadata UI** with several fields to select from like license, language, and task categories. These are the most important tags for helping users discover your dataset on the Hub. When you select an option from each field, they'll be automatically added to the top of the dataset card. + + You can also look at the [Dataset Card specifications](https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1), which has a complete set of (but not required) tag options like `annotations_creators`, to help you choose the appropriate tags. + +
+ +
+ +3. Click on the **Import dataset card template** link at the top of the editor to automatically create a dataset card template. Filling out the template is a great way to introduce your dataset to the community and help users understand how to use it. For a detailed example of what a good Dataset card should look like, take a look at the [CNN DailyMail Dataset card](https://huggingface.co/datasets/cnn_dailymail). + +### Load dataset + +Once your dataset is stored on the Hub, anyone can load it with the [`load_dataset`] function: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("stevhliu/demo") +``` + +## Upload with Python + +Users who prefer to upload a dataset programmatically can use the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library. This library allows users to interact with the Hub from Python. + +1. Begin by installing the library: + +```bash +pip install huggingface_hub +``` + +2. To upload a dataset on the Hub in Python, you need to log in to your Hugging Face account: + +```bash +huggingface-cli login +``` + +3. Use the [`push_to_hub()`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.DatasetDict.push_to_hub) function to help you add, commit, and push a file to your repository: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("stevhliu/demo") +# dataset = dataset.map(...) # do all your processing here +>>> dataset.push_to_hub("stevhliu/processed_demo") +``` + +To set your dataset as private, set the `private` parameter to `True`. This parameter will only work if you are creating a repository for the first time. + +```py +>>> dataset.push_to_hub("stevhliu/private_processed_demo", private=True) +``` + +To add a new configuration (or subset) to a dataset or to add a new split (train/validation/test), please refer to the [`Dataset.push_to_hub`] documentation. + +### Privacy + +A private dataset is only accessible by you. Similarly, if you share a dataset within your organization, then members of the organization can also access the dataset. + +Load a private dataset by providing your authentication token to the `token` parameter: + +```py +>>> from datasets import load_dataset + +# Load a private individual dataset +>>> dataset = load_dataset("stevhliu/demo", token=True) + +# Load a private organization dataset +>>> dataset = load_dataset("organization/dataset_name", token=True) +``` + +## What's next? + +Congratulations, you've completed the tutorials! 🥳 + +From here, you can go on to: + +- Learn more about how to use 🤗 Datasets other functions to [process your dataset](process). +- [Stream large datasets](stream) without downloading it locally. +- [Define your dataset splits and configurations](repository_structure) or [loading script](dataset_script) and share your dataset with the community. + +If you have any questions about 🤗 Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10). diff --git a/testbed/huggingface__datasets/docs/source/use_dataset.mdx b/testbed/huggingface__datasets/docs/source/use_dataset.mdx new file mode 100644 index 0000000000000000000000000000000000000000..bd4f9366075cedf329ae3eb10f736ef261c5d6c8 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/use_dataset.mdx @@ -0,0 +1,189 @@ +# Preprocess + +In addition to loading datasets, 🤗 Datasets other main goal is to offer a diverse set of preprocessing functions to get a dataset into an appropriate format for training with your machine learning framework. + +There are many possible ways to preprocess a dataset, and it all depends on your specific dataset. Sometimes you may need to rename a column, and other times you might need to unflatten nested fields. 🤗 Datasets provides a way to do most of these things. But in nearly all preprocessing cases, depending on your dataset modality, you'll need to: + +- Tokenize a text dataset. +- Resample an audio dataset. +- Apply transforms to an image dataset. + +The last preprocessing step is usually setting your dataset format to be compatible with your machine learning framework's expected input format. + +In this tutorial, you'll also need to install the 🤗 Transformers library: + +```bash +pip install transformers +``` + +Grab a dataset of your choice and follow along! + +## Tokenize text + +Models cannot process raw text, so you'll need to convert the text into numbers. Tokenization provides a way to do this by dividing text into individual words called *tokens*. Tokens are finally converted to numbers. + + + +Check out the [Tokenizers](https://huggingface.co/course/chapter2/4?fw=pt) section in Chapter 2 of the Hugging Face course to learn more about tokenization and different tokenization algorithms. + + + +**1**. Start by loading the [rotten_tomatoes](https://huggingface.co/datasets/rotten_tomatoes) dataset and the tokenizer corresponding to a pretrained [BERT](https://huggingface.co/bert-base-uncased) model. Using the same tokenizer as the pretrained model is important because you want to make sure the text is split in the same way. + +```py +>>> from transformers import AutoTokenizer +>>> from datasets import load_dataset + +>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") +>>> dataset = load_dataset("rotten_tomatoes", split="train") +``` + +**2**. Call your tokenizer on the first row of `text` in the dataset: + +```py +>>> tokenizer(dataset[0]["text"]) +{'input_ids': [101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432, 112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119, 112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190, 170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117, 179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137, 188, 1566, 7912, 14516, 6997, 119, 102], + 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} +``` + +The tokenizer returns a dictionary with three items: + +- `input_ids`: the numbers representing the tokens in the text. +- `token_type_ids`: indicates which sequence a token belongs to if there is more than one sequence. +- `attention_mask`: indicates whether a token should be masked or not. + +These values are actually the model inputs. + +**3**. The fastest way to tokenize your entire dataset is to use the [`~Dataset.map`] function. This function speeds up tokenization by applying the tokenizer to batches of examples instead of individual examples. Set the `batched` parameter to `True`: + +```py +>>> def tokenization(example): +... return tokenizer(example["text"]) + +>>> dataset = dataset.map(tokenization, batched=True) +``` + +**4**. Set the format of your dataset to be compatible with your machine learning framework: + + + +Use the [`~Dataset.set_format`] function to set the dataset format to be compatible with PyTorch: + +```py +>>> dataset.set_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "label"]) +>>> dataset.format['type'] +'torch' +``` + + +Use the [`~Dataset.to_tf_dataset`] function to set the dataset format to be compatible with TensorFlow. You'll also need to import a [data collator](https://huggingface.co/docs/transformers/main_classes/data_collator#transformers.DataCollatorWithPadding) from 🤗 Transformers to combine the varying sequence lengths into a single batch of equal lengths: + +```py +>>> from transformers import DataCollatorWithPadding + +>>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="tf") +>>> tf_dataset = dataset.to_tf_dataset( +... columns=["input_ids", "token_type_ids", "attention_mask"], +... label_cols=["label"], +... batch_size=2, +... collate_fn=data_collator, +... shuffle=True +... ) +``` + + + +**5**. The dataset is now ready for training with your machine learning framework! + +## Resample audio signals + +Audio inputs like text datasets need to be divided into discrete data points. This is known as *sampling*; the sampling rate tells you how much of the speech signal is captured per second. It is important to make sure the sampling rate of your dataset matches the sampling rate of the data used to pretrain the model you're using. If the sampling rates are different, the pretrained model may perform poorly on your dataset because it doesn't recognize the differences in the sampling rate. + +**1**. Start by loading the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset, the [`Audio`] feature, and the feature extractor corresponding to a pretrained [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base-960h) model: + +```py +>>> from transformers import AutoFeatureExtractor +>>> from datasets import load_dataset, Audio + +>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") +>>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") +``` + +**2**. Index into the first row of the dataset. When you call the `audio` column of the dataset, it is automatically decoded and resampled: + +```py +>>> dataset[0]["audio"] +{'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, + 0. , 0. ], dtype=float32), + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', + 'sampling_rate': 8000} +``` + +**3**. Reading a dataset card is incredibly useful and can give you a lot of information about the dataset. A quick look at the MInDS-14 dataset card tells you the sampling rate is 8kHz. Likewise, you can get many details about a model from its model card. The Wav2Vec2 model card says it was sampled on 16kHz speech audio. This means you'll need to upsample the MInDS-14 dataset to match the sampling rate of the model. + +Use the [`~Dataset.cast_column`] function and set the `sampling_rate` parameter in the [`Audio`] feature to upsample the audio signal. When you call the `audio` column now, it is decoded and resampled to 16kHz: + +```py +>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) +>>> dataset[0]["audio"] +{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., + 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', + 'sampling_rate': 16000} +``` + +**4**. Use the [`~Dataset.map`] function to resample the entire dataset to 16kHz. This function speeds up resampling by applying the feature extractor to batches of examples instead of individual examples. Set the `batched` parameter to `True`: + +```py +>>> def preprocess_function(examples): +... audio_arrays = [x["array"] for x in examples["audio"]] +... inputs = feature_extractor( +... audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=16000, truncation=True +... ) +... return inputs + +>>> dataset = dataset.map(preprocess_function, batched=True) +``` + +**5**. The dataset is now ready for training with your machine learning framework! + +## Apply data augmentations + +The most common preprocessing you'll do with image datasets is *data augmentation*, a process that introduces random variations to an image without changing the meaning of the data. This can mean changing the color properties of an image or randomly cropping an image. You are free to use any data augmentation library you like, and 🤗 Datasets will help you apply your data augmentations to your dataset. + +**1**. Start by loading the [Beans](https://huggingface.co/datasets/beans) dataset, the `Image` feature, and the feature extractor corresponding to a pretrained [ViT](https://huggingface.co/google/vit-base-patch16-224-in21k) model: + +```py +>>> from transformers import AutoFeatureExtractor +>>> from datasets import load_dataset, Image + +>>> feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k") +>>> dataset = load_dataset("beans", split="train") +``` + +**2**. Index into the first row of the dataset. When you call the `image` column of the dataset, the underlying PIL object is automatically decoded into an image. + +```py +>>> dataset[0]["image"] +``` + +**3**. Now, you can apply some transforms to the image. Feel free to take a look at the [various transforms available](https://pytorch.org/vision/stable/auto_examples/plot_transforms.html#sphx-glr-auto-examples-plot-transforms-py) in torchvision and choose one you'd like to experiment with. This example applies a transform that randomly rotates the image: + +```py +>>> from torchvision.transforms import RandomRotation + +>>> rotate = RandomRotation(degrees=(0, 90)) +>>> def transforms(examples): +... examples["pixel_values"] = [rotate(image.convert("RGB")) for image in examples["image"]] +... return examples +``` + +**4**. Use the [`~Dataset.set_transform`] function to apply the transform on-the-fly. When you index into the image `pixel_values`, the transform is applied, and your image gets rotated. + +```py +>>> dataset.set_transform(transforms) +>>> dataset[0]["pixel_values"] +``` + +**5**. The dataset is now ready for training with your machine learning framework! diff --git a/testbed/huggingface__datasets/docs/source/use_with_jax.mdx b/testbed/huggingface__datasets/docs/source/use_with_jax.mdx new file mode 100644 index 0000000000000000000000000000000000000000..41b6341f4bf38849562ea56bcb0344c314e4f708 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/use_with_jax.mdx @@ -0,0 +1,206 @@ +# Use with JAX + +This document is a quick introduction to using `datasets` with JAX, with a particular focus on how to get +`jax.Array` objects out of our datasets, and how to use them to train JAX models. + + + +`jax` and `jaxlib` are required to reproduce to code above, so please make sure you +install them as `pip install datasets[jax]`. + + + +## Dataset format + +By default, datasets return regular Python objects: integers, floats, strings, lists, etc., and +string and binary objects are unchanged, since JAX only supports numbers. + +To get JAX arrays (numpy-like) instead, you can set the format of the dataset to `jax`: + +```py +>>> from datasets import Dataset +>>> data = [[1, 2], [3, 4]] +>>> ds = Dataset.from_dict({"data": data}) +>>> ds = ds.with_format("jax") +>>> ds[0] +{'data': DeviceArray([1, 2], dtype=int32)} +>>> ds[:2] +{'data': DeviceArray([ + [1, 2], + [3, 4]], dtype=int32)} +``` + + + +A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to JAX arrays. + + + +Note that the exact same procedure applies to `DatasetDict` objects, so that +when setting the format of a `DatasetDict` to `jax`, all the `Dataset`s there +will be formatted as `jax`: + +```py +>>> from datasets import DatasetDict +>>> data = {"train": {"data": [[1, 2], [3, 4]]}, "test": {"data": [[5, 6], [7, 8]]}} +>>> dds = DatasetDict.from_dict(data) +>>> dds = dds.with_format("jax") +>>> dds["train"][:2] +{'data': DeviceArray([ + [1, 2], + [3, 4]], dtype=int32)} +``` + +Another thing you'll need to take into consideration is that the formatting is not applied +until you actually access the data. So if you want to get a JAX array out of a dataset, +you'll need to access the data first, otherwise the format will remain the same. + +Finally, to load the data in the device of your choice, you can specify the `device` argument, +but note that `jaxlib.xla_extension.Device` is not supported as it's not serializable with neither +`pickle` not `dill`, so you'll need to use its string identifier instead: + +```py +>>> import jax +>>> from datasets import Dataset +>>> data = [[1, 2], [3, 4]] +>>> ds = Dataset.from_dict({"data": data}) +>>> device = str(jax.devices()[0]) # Not casting to `str` before passing it to `with_format` will raise a `ValueError` +>>> ds = ds.with_format("jax", device=device) +>>> ds[0] +{'data': DeviceArray([1, 2], dtype=int32)} +>>> ds[0]["data"].device() +TFRT_CPU_0 +>>> assert ds[0]["data"].device() == jax.devices()[0] +True +``` + +Note that if the `device` argument is not provided to `with_format` then it will use the default +device which is `jax.devices()[0]`. + +## N-dimensional arrays + +If your dataset consists of N-dimensional arrays, you will see that by default they are considered as nested lists. +In particular, a JAX formatted dataset outputs a `DeviceArray` object, which is a numpy-like array, so it does not +need the [`Array`] feature type to be specified as opposed to PyTorch or TensorFlow formatters. + +```py +>>> from datasets import Dataset +>>> data = [[[1, 2],[3, 4]], [[5, 6],[7, 8]]] +>>> ds = Dataset.from_dict({"data": data}) +>>> ds = ds.with_format("jax") +>>> ds[0] +{'data': DeviceArray([[1, 2], + [3, 4]], dtype=int32)} +``` + +## Other feature types + +[`ClassLabel`] data is properly converted to arrays: + +```py +>>> from datasets import Dataset, Features, ClassLabel +>>> labels = [0, 0, 1] +>>> features = Features({"label": ClassLabel(names=["negative", "positive"])}) +>>> ds = Dataset.from_dict({"label": labels}, features=features) +>>> ds = ds.with_format("jax") +>>> ds[:3] +{'label': DeviceArray([0, 0, 1], dtype=int32)} +``` + +String and binary objects are unchanged, since JAX only supports numbers. + +The [`Image`] and [`Audio`] feature types are also supported. + + + +To use the [`Image`] feature type, you'll need to install the `vision` extra as +`pip install datasets[vision]`. + + + +```py +>>> from datasets import Dataset, Features, Image +>>> images = ["path/to/image.png"] * 10 +>>> features = Features({"image": Image()}) +>>> ds = Dataset.from_dict({"image": images}, features=features) +>>> ds = ds.with_format("jax") +>>> ds[0]["image"].shape +(512, 512, 3) +>>> ds[0] +{'image': DeviceArray([[[ 255, 255, 255], + [ 255, 255, 255], + ..., + [ 255, 255, 255], + [ 255, 255, 255]]], dtype=uint8)} +>>> ds[:2]["image"].shape +(2, 512, 512, 3) +>>> ds[:2] +{'image': DeviceArray([[[[ 255, 255, 255], + [ 255, 255, 255], + ..., + [ 255, 255, 255], + [ 255, 255, 255]]]], dtype=uint8)} +``` + + + +To use the [`Audio`] feature type, you'll need to install the `audio` extra as +`pip install datasets[audio]`. + + + +```py +>>> from datasets import Dataset, Features, Audio +>>> audio = ["path/to/audio.wav"] * 10 +>>> features = Features({"audio": Audio()}) +>>> ds = Dataset.from_dict({"audio": audio}, features=features) +>>> ds = ds.with_format("jax") +>>> ds[0]["audio"]["array"] +DeviceArray([-0.059021 , -0.03894043, -0.00735474, ..., 0.0133667 , + 0.01809692, 0.00268555], dtype=float32) +>>> ds[0]["audio"]["sampling_rate"] +DeviceArray(44100, dtype=int32, weak_type=True) +``` + +## Data loading + +JAX doesn't have any built-in data loading capabilities, so you'll need to use a library such +as [PyTorch](https://pytorch.org/) to load your data using a `DataLoader` or [TensorFlow](https://www.tensorflow.org/) +using a `tf.data.Dataset`. Citing the [JAX documentation](https://jax.readthedocs.io/en/latest/notebooks/Neural_Network_and_Data_Loading.html#data-loading-with-pytorch) on this topic: +"JAX is laser-focused on program transformations and accelerator-backed NumPy, so we don’t +include data loading or munging in the JAX library. There are already a lot of great data loaders +out there, so let’s just use them instead of reinventing anything. We’ll grab PyTorch’s data loader, +and make a tiny shim to make it work with NumPy arrays.". + +So that's the reason why JAX-formatting in `datasets` is so useful, because it lets you use +any model from the HuggingFace Hub with JAX, without having to worry about the data loading +part. + +### Using `with_format('jax')` + +The easiest way to get JAX arrays out of a dataset is to use the `with_format('jax')` method. Lets assume +that we want to train a neural network on the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) available +at the HuggingFace Hub at https://huggingface.co/datasets/mnist. + +```py +>>> from datasets import load_dataset +>>> ds = load_dataset("mnist") +>>> ds = ds.with_format("jax") +>>> ds["train"][0] +{'image': DeviceArray([[ 0, 0, 0, ...], + [ 0, 0, 0, ...], + ..., + [ 0, 0, 0, ...], + [ 0, 0, 0, ...]], dtype=uint8), + 'label': DeviceArray(5, dtype=int32)} +``` + +Once the format is set we can feed the dataset to the JAX model in batches using the `Dataset.iter()` +method: + +```py +>>> for epoch in range(epochs): +... for batch in ds["train"].iter(batch_size=32): +... x, y = batch["image"], batch["label"] +... ... +``` diff --git a/testbed/huggingface__datasets/docs/source/use_with_pytorch.mdx b/testbed/huggingface__datasets/docs/source/use_with_pytorch.mdx new file mode 100644 index 0000000000000000000000000000000000000000..eeff73ef8644cc72e42b6c3ff4d20a5f34fa7377 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/use_with_pytorch.mdx @@ -0,0 +1,240 @@ +# Use with PyTorch + +This document is a quick introduction to using `datasets` with PyTorch, with a particular focus on how to get +`torch.Tensor` objects out of our datasets, and how to use a PyTorch `DataLoader` and a Hugging Face `Dataset` +with the best performance. + +## Dataset format + +By default, datasets return regular python objects: integers, floats, strings, lists, etc. + +To get PyTorch tensors instead, you can set the format of the dataset to `pytorch` using [`Dataset.with_format`]: + +```py +>>> from datasets import Dataset +>>> data = [[1, 2],[3, 4]] +>>> ds = Dataset.from_dict({"data": data}) +>>> ds = ds.with_format("torch") +>>> ds[0] +{'data': tensor([1, 2])} +>>> ds[:2] +{'data': tensor([[1, 2], + [3, 4]])} +``` + + + +A [`Dataset`] object is a wrapper of an Arrow table, which allows fast zero-copy reads from arrays in the dataset to PyTorch tensors. + + + + +To load the data as tensors on a GPU, specify the `device` argument: +```py +>>> import torch +>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +>>> ds = ds.with_format("torch", device=device) +>>> ds[0] +{'data': tensor([1, 2], device='cuda:0')} +``` + +## N-dimensional arrays + +If your dataset consists of N-dimensional arrays, you will see that by default they are considered as nested lists. +In particular, a PyTorch formatted dataset outputs nested lists instead of a single tensor: + +```py +>>> from datasets import Dataset +>>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] +>>> ds = Dataset.from_dict({"data": data}) +>>> ds = ds.with_format("torch") +>>> ds[0] +{'data': [tensor([1, 2]), tensor([3, 4])]} +``` + +To get a single tensor, you must explicitly use the [`Array`] feature type and specify the shape of your tensors: + +```py +>>> from datasets import Dataset, Features, Array2D +>>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] +>>> features = Features({"data": Array2D(shape=(2, 2), dtype='int32')}) +>>> ds = Dataset.from_dict({"data": data}, features=features) +>>> ds = ds.with_format("torch") +>>> ds[0] +{'data': tensor([[1, 2], + [3, 4]])} +>>> ds[:2] +{'data': tensor([[[1, 2], + [3, 4]], + + [[5, 6], + [7, 8]]])} +``` + + +## Other feature types + +[`ClassLabel`] data are properly converted to tensors: + +```py +>>> from datasets import Dataset, Features, ClassLabel +>>> labels = [0, 0, 1] +>>> features = Features({"label": ClassLabel(names=["negative", "positive"])}) +>>> ds = Dataset.from_dict({"label": labels}, features=features) +>>> ds = ds.with_format("torch") +>>> ds[:3] +{'label': tensor([0, 0, 1])} +``` + +String and binary objects are unchanged, since PyTorch only supports numbers. + +The [`Image`] and [`Audio`] feature types are also supported. + + + +To use the [`Image`] feature type, you'll need to install the `vision` extra as +`pip install datasets[vision]`. + + + +```py +>>> from datasets import Dataset, Features, Audio, Image +>>> images = ["path/to/image.png"] * 10 +>>> features = Features({"image": Image()}) +>>> ds = Dataset.from_dict({"image": images}, features=features) +>>> ds = ds.with_format("torch") +>>> ds[0]["image"].shape +torch.Size([512, 512, 4]) +>>> ds[0] +{'image': tensor([[[255, 215, 106, 255], + [255, 215, 106, 255], + ..., + [255, 255, 255, 255], + [255, 255, 255, 255]]], dtype=torch.uint8)} +>>> ds[:2]["image"].shape +torch.Size([2, 512, 512, 4]) +>>> ds[:2] +{'image': tensor([[[[255, 215, 106, 255], + [255, 215, 106, 255], + ..., + [255, 255, 255, 255], + [255, 255, 255, 255]]]], dtype=torch.uint8)} +``` + + + +To use the [`Audio`] feature type, you'll need to install the `audio` extra as +`pip install datasets[audio]`. + + + +```py +>>> from datasets import Dataset, Features, Audio, Image +>>> audio = ["path/to/audio.wav"] * 10 +>>> features = Features({"audio": Audio()}) +>>> ds = Dataset.from_dict({"audio": audio}, features=features) +>>> ds = ds.with_format("torch") +>>> ds[0]["audio"]["array"] +tensor([ 6.1035e-05, 1.5259e-05, 1.6785e-04, ..., -1.5259e-05, + -1.5259e-05, 1.5259e-05]) +>>> ds[0]["audio"]["sampling_rate"] +tensor(44100) +``` + +## Data loading + +Like `torch.utils.data.Dataset` objects, a [`Dataset`] can be passed directly to a PyTorch `DataLoader`: + +```py +>>> import numpy as np +>>> from datasets import Dataset +>>> from torch.utils.data import DataLoader +>>> data = np.random.rand(16) +>>> label = np.random.randint(0, 2, size=16) +>>> ds = Dataset.from_dict({"data": data, "label": label}).with_format("torch") +>>> dataloader = DataLoader(ds, batch_size=4) +>>> for batch in dataloader: +... print(batch) +{'data': tensor([0.0047, 0.4979, 0.6726, 0.8105]), 'label': tensor([0, 1, 0, 1])} +{'data': tensor([0.4832, 0.2723, 0.4259, 0.2224]), 'label': tensor([0, 0, 0, 0])} +{'data': tensor([0.5837, 0.3444, 0.4658, 0.6417]), 'label': tensor([0, 1, 0, 0])} +{'data': tensor([0.7022, 0.1225, 0.7228, 0.8259]), 'label': tensor([1, 1, 1, 1])} +``` + +### Optimize data loading + +There are several ways you can increase the speed your data is loaded which can save you time, especially if you are working with large datasets. +PyTorch offers parallelized data loading, retrieving batches of indices instead of individually, and streaming to iterate over the dataset without downloading it on disk. + +#### Use multiple Workers + +You can parallelize data loading with the `num_workers` argument of a PyTorch `DataLoader` and get a higher throughput. + +Under the hood, the `DataLoader` starts `num_workers` processes. +Each process reloads the dataset passed to the `DataLoader` and is used to query examples. +Reloading the dataset inside a worker doesn't fill up your RAM, since it simply memory-maps the dataset again from your disk. + +```py +>>> import numpy as np +>>> from datasets import Dataset, load_from_disk +>>> from torch.utils.data import DataLoader +>>> data = np.random.rand(10_000) +>>> Dataset.from_dict({"data": data}).save_to_disk("my_dataset") +>>> ds = load_from_disk("my_dataset").with_format("torch") +>>> dataloader = DataLoader(ds, batch_size=32, num_workers=4) +``` + +### Stream data + +Stream a dataset by loading it as an [`IterableDataset`]. This allows you to progressively iterate over a remote dataset without downloading it on disk and or over local data files. +Learn more about which type of dataset is best for your use case in the [choosing between a regular dataset or an iterable dataset](./about_mapstyle_vs_iterable) guide. + + +An iterable dataset from `datasets` inherits from `torch.utils.data.IterableDataset` so you can pass it to a `torch.utils.data.DataLoader`: + +```py +>>> import numpy as np +>>> from datasets import Dataset, load_dataset +>>> from torch.utils.data import DataLoader +>>> data = np.random.rand(10_000) +>>> Dataset.from_dict({"data": data}).push_to_hub("/my_dataset") # Upload to the Hugging Face Hub +>>> my_iterable_dataset = load_dataset("/my_dataset", streaming=True, split="train") +>>> dataloader = DataLoader(my_iterable_dataset, batch_size=32) +``` + +If the dataset is split in several shards (i.e. if the dataset consists of multiple data files), then you can stream in parallel using `num_workers`: + +```py +>>> my_iterable_dataset = load_dataset("c4", "en", streaming=True, split="train") +>>> my_iterable_dataset.n_shards +1024 +>>> dataloader = DataLoader(my_iterable_dataset, batch_size=32, num_workers=4) +``` + +In this case each worker is given a subset of the list of shards to stream from. + +### Distributed + +To split your dataset across your training nodes, you can use [`datasets.distributed.split_dataset_by_node`]: + +```python +import os +from datasets.distributed import split_dataset_by_node + +ds = split_dataset_by_node(ds, rank=int(os.environ["RANK"]), world_size=int(os.environ["WORLD_SIZE"])) +``` + +This works for both map-style datasets and iterable datasets. +The dataset is split for the node at rank `rank` in a pool of nodes of size `world_size`. + +For map-style datasets: + +Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset. + +For iterable datasets: + +If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`), +then the shards are evenly assigned across the nodes, which is the most optimized. +Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples. + +This can also be combined with a `torch.utils.data.DataLoader` if you want each node to use multiple workers to load the data. diff --git a/testbed/huggingface__datasets/docs/source/use_with_spark.mdx b/testbed/huggingface__datasets/docs/source/use_with_spark.mdx new file mode 100644 index 0000000000000000000000000000000000000000..07767ca447f22f0be906aec2265a2c7f65d40e1d --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/use_with_spark.mdx @@ -0,0 +1,70 @@ +# Use with Spark + +This document is a quick introduction to using 🤗 Datasets with Spark, with a particular focus on how to load a Spark DataFrame into a [`Dataset`] object. + +From there, you have fast access to any element and you can use it as a data loader to train models. + +## Load from Spark + +A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to PyTorch, TensorFlow and JAX tensors. +The Arrow table is memory mapped from disk, which can load datasets bigger than your available RAM. + +You can get a [`Dataset`] from a Spark DataFrame using [`Dataset.from_spark`]: + +```py +>>> from datasets import Dataset +>>> df = spark.createDataFrame( +... data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], +... columns=["id", "name"], +... ) +>>> ds = Dataset.from_spark(df) +``` + +The Spark workers write the dataset on disk in a cache directory as Arrow files, and the [`Dataset`] is loaded from there. + +Alternatively, you can skip materialization by using [`IterableDataset.from_spark`], which returns an [`IterableDataset`]: + + ```py + >>> from datasets import IterableDataset + >>> df = spark.createDataFrame( + ... data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], + ... columns=["id", "name"], + ... ) + >>> ds = IterableDataset.from_spark(df) + >>> print(next(iter(ds))) + {"id": 1, "name": "Elia"} + ``` + +### Caching + +When using [`Dataset.from_spark`], the resulting [`Dataset`] is cached; if you call [`Dataset.from_spark`] multiple +times on the same DataFrame it won't re-run the Spark job that writes the dataset as Arrow files on disk. + +You can set the cache location by passing `cache_dir=` to [`Dataset.from_spark`]. +Make sure to use a disk that is available to both your workers and your current machine (the driver). + + + +In a different session, a Spark DataFrame doesn't have the same [semantic hash](https://spark.apache.org/docs/3.2.0/api/python/reference/api/pyspark.sql.DataFrame.semanticHash.html), and it will rerun a Spark job and store it in a new cache. + + + +### Feature types + +If your dataset is made of images, audio data or N-dimensional arrays, you can specify the `features=` argument in +[`Dataset.from_spark`] (or [`IterableDataset.from_spark`]): + +```py +>>> from datasets import Dataset, Features, Image, Value +>>> data = [(0, open("image.png", "rb").read())] +>>> df = spark.createDataFrame(data, "idx: int, image: binary") +>>> # Also works if you have arrays +>>> # data = [(0, np.zeros(shape=(32, 32, 3), dtype=np.int32).tolist())] +>>> # df = spark.createDataFrame(data, "idx: int, image: array>>") +>>> features = Features({"idx": Value("int64"), "image": Image()}) +>>> dataset = Dataset.from_spark(df, features=features) +>>> dataset[0] +{'idx': 0, 'image': } +``` + +You can check the [`Features`] documentation to know about all the feature types available. diff --git a/testbed/huggingface__datasets/docs/source/use_with_tensorflow.mdx b/testbed/huggingface__datasets/docs/source/use_with_tensorflow.mdx new file mode 100644 index 0000000000000000000000000000000000000000..52434138e295b0d6a330796cf2f3ae4c808713f2 --- /dev/null +++ b/testbed/huggingface__datasets/docs/source/use_with_tensorflow.mdx @@ -0,0 +1,252 @@ +# Using Datasets with TensorFlow + +This document is a quick introduction to using `datasets` with TensorFlow, with a particular focus on how to get +`tf.Tensor` objects out of our datasets, and how to stream data from Hugging Face `Dataset` objects to Keras methods +like `model.fit()`. + +## Dataset format + +By default, datasets return regular Python objects: integers, floats, strings, lists, etc. + +To get TensorFlow tensors instead, you can set the format of the dataset to `tf`: + +```py +>>> from datasets import Dataset +>>> data = [[1, 2],[3, 4]] +>>> ds = Dataset.from_dict({"data": data}) +>>> ds = ds.with_format("tf") +>>> ds[0] +{'data': } +>>> ds[:2] +{'data': } +``` + + + +A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to TensorFlow tensors. + + + +This can be useful for converting your dataset to a dict of `Tensor` objects, or for writing a generator to load TF +samples from it. If you wish to convert the entire dataset to `Tensor`, simply query the full dataset: + +```py +>>> ds[:] +{'data': } +``` + +## N-dimensional arrays + +If your dataset consists of N-dimensional arrays, you will see that by default they are considered as nested lists. +In particular, a TensorFlow formatted dataset outputs a `RaggedTensor` instead of a single tensor: + +```py +>>> from datasets import Dataset +>>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] +>>> ds = Dataset.from_dict({"data": data}) +>>> ds = ds.with_format("tf") +>>> ds[0] +{'data': } +``` + +To get a single tensor, you must explicitly use the [`Array`] feature type and specify the shape of your tensors: + +```py +>>> from datasets import Dataset, Features, Array2D +>>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] +>>> features = Features({"data": Array2D(shape=(2, 2), dtype='int32')}) +>>> ds = Dataset.from_dict({"data": data}, features=features) +>>> ds = ds.with_format("tf") +>>> ds[0] +{'data': } +>>> ds[:2] +{'data': } +``` + + +## Other feature types + +[`ClassLabel`] data are properly converted to tensors: + +```py +>>> from datasets import Dataset, Features, ClassLabel +>>> labels = [0, 0, 1] +>>> features = Features({"label": ClassLabel(names=["negative", "positive"])}) +>>> ds = Dataset.from_dict({"label": labels}, features=features) +>>> ds = ds.with_format("tf") +>>> ds[:3] +{'label': } +``` + +Strings and binary objects are also supported: + +```py +>>> from datasets import Dataset, Features +>>> text = ["foo", "bar"] +>>> data = [0, 1] +>>> ds = Dataset.from_dict({"text": text, "data": data}) +>>> ds = ds.with_format("tf") +>>> ds[:2] +{'text': , + 'data': } +``` + +You can also explicitly format certain columns and leave the other columns unformatted: + +```py +>>> ds = ds.with_format("tf", columns=["data"], output_all_columns=True) +>>> ds[:2] +{'data': , + 'text': ['foo', 'bar']} +``` + +String and binary objects are unchanged, since PyTorch only supports numbers. + +The [`Image`] and [`Audio`] feature types are also supported. + + + +To use the [`Image`] feature type, you'll need to install the `vision` extra as +`pip install datasets[vision]`. + + + +```py +>>> from datasets import Dataset, Features, Audio, Image +>>> images = ["path/to/image.png"] * 10 +>>> features = Features({"image": Image()}) +>>> ds = Dataset.from_dict({"image": images}, features=features) +>>> ds = ds.with_format("tf") +>>> ds[0] +{'image': } +>>> ds[:2] +{'image': } +``` + + + +To use the [`Audio`] feature type, you'll need to install the `audio` extra as +`pip install datasets[audio]`. + + + +```py +>>> from datasets import Dataset, Features, Audio, Image +>>> audio = ["path/to/audio.wav"] * 10 +>>> features = Features({"audio": Audio()}) +>>> ds = Dataset.from_dict({"audio": audio}, features=features) +>>> ds = ds.with_format("tf") +>>> ds[0]["audio"]["array"] + +>>> ds[0]["audio"]["sampling_rate"] + +``` + +## Data loading + +Although you can load individual samples and batches just by indexing into your dataset, this won't work if you want +to use Keras methods like `fit()` and `predict()`. You could write a generator function that shuffles and loads batches +from your dataset and `fit()` on that, but that sounds like a lot of unnecessary work. Instead, if you want to stream +data from your dataset on-the-fly, we recommend converting your dataset to a `tf.data.Dataset` using the +`to_tf_dataset()` method. + +The `tf.data.Dataset` class covers a wide range of use-cases - it is often created from Tensors in memory, or using a load function to read files on disc +or external storage. The dataset can be transformed arbitrarily with the `map()` method, or methods like `batch()` +and `shuffle()` can be used to create a dataset that's ready for training. These methods do not modify the stored data +in any way - instead, the methods build a data pipeline graph that will be executed when the dataset is iterated over, +usually during model training or inference. This is different from the `map()` method of Hugging Face `Dataset` objects, +which runs the map function immediately and saves the new or changed columns. + +Since the entire data preprocessing pipeline can be compiled in a `tf.data.Dataset`, this approach allows for massively +parallel, asynchronous data loading and training. However, the requirement for graph compilation can be a limitation, +particularly for Hugging Face tokenizers, which are usually not (yet!) compilable as part of a TF graph. As a result, +we usually advise pre-processing the dataset as a Hugging Face dataset, where arbitrary Python functions can be +used, and then converting to `tf.data.Dataset` afterwards using `to_tf_dataset()` to get a batched dataset ready for +training. To see examples of this approach, please see the [examples](https://github.com/huggingface/transformers/tree/main/examples) or [notebooks](https://huggingface.co/docs/transformers/notebooks) for `transformers`. + +### Using `to_tf_dataset()` + +Using `to_tf_dataset()` is straightforward. Once your dataset is preprocessed and ready, simply call it like so: + +```py +>>> from datasets import Dataset +>>> data = {"inputs": [[1, 2],[3, 4]], "labels": [0, 1]} +>>> ds = Dataset.from_dict(data) +>>> tf_ds = ds.to_tf_dataset( + columns=["inputs"], + label_cols=["labels"], + batch_size=2, + shuffle=True + ) +``` + +The returned `tf_ds` object here is now fully ready to train on, and can be passed directly to `model.fit()`. Note +that you set the batch size when creating the dataset, and so you don't need to specify it when calling `fit()`: + +```py +>>> model.fit(tf_ds, epochs=2) +``` + +For a full description of the arguments, please see the [`~Dataset.to_tf_dataset`] documentation. In many cases, +you will also need to add a `collate_fn` to your call. This is a function that takes multiple elements of the dataset +and combines them into a single batch. When all elements have the same length, the built-in default collator will +suffice, but for more complex tasks a custom collator may be necessary. In particular, many tasks have samples +with varying sequence lengths which will require a [data collator](https://huggingface.co/docs/transformers/main/en/main_classes/data_collator) that can pad batches correctly. You can see examples +of this in the `transformers` NLP [examples](https://github.com/huggingface/transformers/tree/main/examples) and +[notebooks](https://huggingface.co/docs/transformers/notebooks), where variable sequence lengths are very common. + +If you find that loading with `to_tf_dataset` is slow, you can also use the `num_workers` argument. This spins +up multiple subprocesses to load data in parallel. This feature is recent and still somewhat experimental - please file +an issue if you encounter any bugs while using it! + +### When to use to_tf_dataset + +The astute reader may have noticed at this point that we have offered two approaches to achieve the same goal - if you +want to pass your dataset to a TensorFlow model, you can either convert the dataset to a `Tensor` or `dict` of `Tensors` +using `.with_format('tf')`, or you can convert the dataset to a `tf.data.Dataset` with `to_tf_dataset()`. Either of these +can be passed to `model.fit()`, so which should you choose? + +The key thing to recognize is that when you convert the whole dataset to `Tensor`s, it is static and fully loaded into +RAM. This is simple and convenient, but if any of the following apply, you should probably use `to_tf_dataset()` +instead: + +- Your dataset is too large to fit in RAM. `to_tf_dataset()` streams only one batch at a time, so even very large + datasets can be handled with this method. +- You want to apply random transformations using `dataset.with_transform()` or the `collate_fn`. This is + common in several modalities, such as image augmentations when training vision models, or random masking when training + masked language models. Using `to_tf_dataset()` will apply those transformations + at the moment when a batch is loaded, which means the same samples will get different augmentations each time + they are loaded. This is usually what you want. +- Your data has a variable dimension, such as input texts in NLP that consist of varying + numbers of tokens. When you create a batch with samples with a variable dimension, the standard solution is to + pad the shorter samples to the length of the longest one. When you stream samples from a dataset with `to_tf_dataset`, + you can apply this padding to each batch via your `collate_fn`. However, if you want to convert + such a dataset to dense `Tensor`s, then you will have to pad samples to the length of the longest sample in *the + entire dataset!* This can result in huge amounts of padding, which wastes memory and reduces your model's speed. + +### Caveats and limitations + +Right now, `to_tf_dataset()` always returns a batched dataset - we will add support for unbatched datasets soon! diff --git a/testbed/huggingface__datasets/metrics/accuracy/accuracy.py b/testbed/huggingface__datasets/metrics/accuracy/accuracy.py new file mode 100644 index 0000000000000000000000000000000000000000..b9b0e334772d281a5aef28b7a684d25f92dc9feb --- /dev/null +++ b/testbed/huggingface__datasets/metrics/accuracy/accuracy.py @@ -0,0 +1,105 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Accuracy metric.""" + +from sklearn.metrics import accuracy_score + +import datasets + + +_DESCRIPTION = """ +Accuracy is the proportion of correct predictions among the total number of cases processed. It can be computed with: +Accuracy = (TP + TN) / (TP + TN + FP + FN) + Where: +TP: True positive +TN: True negative +FP: False positive +FN: False negative +""" + + +_KWARGS_DESCRIPTION = """ +Args: + predictions (`list` of `int`): Predicted labels. + references (`list` of `int`): Ground truth labels. + normalize (`boolean`): If set to False, returns the number of correctly classified samples. Otherwise, returns the fraction of correctly classified samples. Defaults to True. + sample_weight (`list` of `float`): Sample weights Defaults to None. + +Returns: + accuracy (`float` or `int`): Accuracy score. Minimum possible value is 0. Maximum possible value is 1.0, or the number of examples input, if `normalize` is set to `True`.. A higher score means higher accuracy. + +Examples: + + Example 1-A simple example + >>> accuracy_metric = datasets.load_metric("accuracy") + >>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0]) + >>> print(results) + {'accuracy': 0.5} + + Example 2-The same as Example 1, except with `normalize` set to `False`. + >>> accuracy_metric = datasets.load_metric("accuracy") + >>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0], normalize=False) + >>> print(results) + {'accuracy': 3.0} + + Example 3-The same as Example 1, except with `sample_weight` set. + >>> accuracy_metric = datasets.load_metric("accuracy") + >>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0], sample_weight=[0.5, 2, 0.7, 0.5, 9, 0.4]) + >>> print(results) + {'accuracy': 0.8778625954198473} +""" + + +_CITATION = """ +@article{scikit-learn, + title={Scikit-learn: Machine Learning in {P}ython}, + author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. + and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. + and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and + Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, + journal={Journal of Machine Learning Research}, + volume={12}, + pages={2825--2830}, + year={2011} +} +""" + + +@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) +class Accuracy(datasets.Metric): + def _info(self): + return datasets.MetricInfo( + description=_DESCRIPTION, + citation=_CITATION, + inputs_description=_KWARGS_DESCRIPTION, + features=datasets.Features( + { + "predictions": datasets.Sequence(datasets.Value("int32")), + "references": datasets.Sequence(datasets.Value("int32")), + } + if self.config_name == "multilabel" + else { + "predictions": datasets.Value("int32"), + "references": datasets.Value("int32"), + } + ), + reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html"], + ) + + def _compute(self, predictions, references, normalize=True, sample_weight=None): + return { + "accuracy": float( + accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight) + ) + } diff --git a/testbed/huggingface__datasets/metrics/bertscore/bertscore.py b/testbed/huggingface__datasets/metrics/bertscore/bertscore.py new file mode 100644 index 0000000000000000000000000000000000000000..e6a7afe37845dcfdbc2fc4a68618c52e18cbd4c0 --- /dev/null +++ b/testbed/huggingface__datasets/metrics/bertscore/bertscore.py @@ -0,0 +1,207 @@ +# Copyright 2020 The HuggingFace Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" BERTScore metric. """ + +import functools +from contextlib import contextmanager + +import bert_score +from packaging import version + +import datasets + + +@contextmanager +def filter_logging_context(): + def filter_log(record): + return False if "This IS expected if you are initializing" in record.msg else True + + logger = datasets.utils.logging.get_logger("transformers.modeling_utils") + logger.addFilter(filter_log) + try: + yield + finally: + logger.removeFilter(filter_log) + + +_CITATION = """\ +@inproceedings{bert-score, + title={BERTScore: Evaluating Text Generation with BERT}, + author={Tianyi Zhang* and Varsha Kishore* and Felix Wu* and Kilian Q. Weinberger and Yoav Artzi}, + booktitle={International Conference on Learning Representations}, + year={2020}, + url={https://openreview.net/forum?id=SkeHuCVFDr} +} +""" + +_DESCRIPTION = """\ +BERTScore leverages the pre-trained contextual embeddings from BERT and matches words in candidate and reference +sentences by cosine similarity. +It has been shown to correlate with human judgment on sentence-level and system-level evaluation. +Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for evaluating different language +generation tasks. + +See the project's README at https://github.com/Tiiiger/bert_score#readme for more information. +""" + +_KWARGS_DESCRIPTION = """ +BERTScore Metrics with the hashcode from a source against one or more references. + +Args: + predictions (list of str): Prediction/candidate sentences. + references (list of str or list of list of str): Reference sentences. + lang (str): Language of the sentences; required (e.g. 'en'). + model_type (str): Bert specification, default using the suggested + model for the target language; has to specify at least one of + `model_type` or `lang`. + num_layers (int): The layer of representation to use, + default using the number of layers tuned on WMT16 correlation data. + verbose (bool): Turn on intermediate status update. + idf (bool or dict): Use idf weighting; can also be a precomputed idf_dict. + device (str): On which the contextual embedding model will be allocated on. + If this argument is None, the model lives on cuda:0 if cuda is available. + nthreads (int): Number of threads. + batch_size (int): Bert score processing batch size, + at least one of `model_type` or `lang`. `lang` needs to be + specified when `rescale_with_baseline` is True. + rescale_with_baseline (bool): Rescale bertscore with pre-computed baseline. + baseline_path (str): Customized baseline file. + use_fast_tokenizer (bool): `use_fast` parameter passed to HF tokenizer. New in version 0.3.10. + +Returns: + precision: Precision. + recall: Recall. + f1: F1 score. + hashcode: Hashcode of the library. + +Examples: + + >>> predictions = ["hello there", "general kenobi"] + >>> references = ["hello there", "general kenobi"] + >>> bertscore = datasets.load_metric("bertscore") + >>> results = bertscore.compute(predictions=predictions, references=references, lang="en") + >>> print([round(v, 2) for v in results["f1"]]) + [1.0, 1.0] +""" + + +@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) +class BERTScore(datasets.Metric): + def _info(self): + return datasets.MetricInfo( + description=_DESCRIPTION, + citation=_CITATION, + homepage="https://github.com/Tiiiger/bert_score", + inputs_description=_KWARGS_DESCRIPTION, + features=datasets.Features( + { + "predictions": datasets.Value("string", id="sequence"), + "references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"), + } + ), + codebase_urls=["https://github.com/Tiiiger/bert_score"], + reference_urls=[ + "https://github.com/Tiiiger/bert_score", + "https://arxiv.org/abs/1904.09675", + ], + ) + + def _compute( + self, + predictions, + references, + lang=None, + model_type=None, + num_layers=None, + verbose=False, + idf=False, + device=None, + batch_size=64, + nthreads=4, + all_layers=False, + rescale_with_baseline=False, + baseline_path=None, + use_fast_tokenizer=False, + ): + get_hash = bert_score.utils.get_hash + scorer = bert_score.BERTScorer + + if version.parse(bert_score.__version__) >= version.parse("0.3.10"): + get_hash = functools.partial(get_hash, use_fast_tokenizer=use_fast_tokenizer) + scorer = functools.partial(scorer, use_fast_tokenizer=use_fast_tokenizer) + elif use_fast_tokenizer: + raise ImportWarning( + "To use a fast tokenizer, the module `bert-score>=0.3.10` is required, and the current version of `bert-score` doesn't match this condition.\n" + 'You can install it with `pip install "bert-score>=0.3.10"`.' + ) + + if model_type is None: + assert lang is not None, "either lang or model_type should be specified" + model_type = bert_score.utils.lang2model[lang.lower()] + + if num_layers is None: + num_layers = bert_score.utils.model2layers[model_type] + + hashcode = get_hash( + model=model_type, + num_layers=num_layers, + idf=idf, + rescale_with_baseline=rescale_with_baseline, + use_custom_baseline=baseline_path is not None, + ) + + with filter_logging_context(): + if not hasattr(self, "cached_bertscorer") or self.cached_bertscorer.hash != hashcode: + self.cached_bertscorer = scorer( + model_type=model_type, + num_layers=num_layers, + batch_size=batch_size, + nthreads=nthreads, + all_layers=all_layers, + idf=idf, + device=device, + lang=lang, + rescale_with_baseline=rescale_with_baseline, + baseline_path=baseline_path, + ) + + (P, R, F) = self.cached_bertscorer.score( + cands=predictions, + refs=references, + verbose=verbose, + batch_size=batch_size, + ) + output_dict = { + "precision": P.tolist(), + "recall": R.tolist(), + "f1": F.tolist(), + "hashcode": hashcode, + } + return output_dict + + def add_batch(self, predictions=None, references=None, **kwargs): + """Add a batch of predictions and references for the metric's stack.""" + # References can be strings or lists of strings + # Let's change strings to lists of strings with one element + if references is not None: + references = [[ref] if isinstance(ref, str) else ref for ref in references] + super().add_batch(predictions=predictions, references=references, **kwargs) + + def add(self, prediction=None, reference=None, **kwargs): + """Add one prediction and reference for the metric's stack.""" + # References can be strings or lists of strings + # Let's change strings to lists of strings with one element + if isinstance(reference, str): + reference = [reference] + super().add(prediction=prediction, reference=reference, **kwargs) diff --git a/testbed/huggingface__datasets/metrics/bleu/README.md b/testbed/huggingface__datasets/metrics/bleu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0191a1a1f9deebea98abfb056a4a39fab8fadbb2 --- /dev/null +++ b/testbed/huggingface__datasets/metrics/bleu/README.md @@ -0,0 +1,123 @@ +# Metric Card for BLEU + + +## Metric Description +BLEU (Bilingual Evaluation Understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: "the closer a machine translation is to a professional human translation, the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. + +Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Neither intelligibility nor grammatical correctness are not taken into account. + +## Intended Uses +BLEU and BLEU-derived metrics are most often used for machine translation. + +## How to Use + +This metric takes as input lists of predicted sentences and reference sentences: + +```python +>>> predictions = [ +... ["hello", "there", "general", "kenobi"], +... ["foo", "bar", "foobar"] +... ] +>>> references = [ +... [["hello", "there", "general", "kenobi"]], +... [["foo", "bar", "foobar"]] +... ] +>>> bleu = datasets.load_metric("bleu") +>>> results = bleu.compute(predictions=predictions, references=references) +>>> print(results) +{'bleu': 1.0, 'precisions': [1.0, 1.0, 1.0, 1.0], 'brevity_penalty': 1.0, 'length_ratio': 1.0, 'translation_length': 7, 'reference_length': 7} +``` + +### Inputs +- **predictions** (`list`): Translations to score. Each translation should be tokenized into a list of tokens. +- **references** (`list` of `list`s): references for each translation. Each reference should be tokenized into a list of tokens. +- **max_order** (`int`): Maximum n-gram order to use when computing BLEU score. Defaults to `4`. +- **smooth** (`boolean`): Whether or not to apply Lin et al. 2004 smoothing. Defaults to `False`. + +### Output Values +- **bleu** (`float`): bleu score +- **precisions** (`list` of `float`s): geometric mean of n-gram precisions, +- **brevity_penalty** (`float`): brevity penalty, +- **length_ratio** (`float`): ratio of lengths, +- **translation_length** (`int`): translation_length, +- **reference_length** (`int`): reference_length + +Output Example: +```python +{'bleu': 1.0, 'precisions': [1.0, 1.0, 1.0, 1.0], 'brevity_penalty': 1.0, 'length_ratio': 1.167, 'translation_length': 7, 'reference_length': 6} +``` + +BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. + +#### Values from Popular Papers +The [original BLEU paper](https://aclanthology.org/P02-1040/) (Papineni et al. 2002) compares BLEU scores of five different models on the same 500-sentence corpus. These scores ranged from 0.0527 to 0.2571. + +The [Attention is All you Need paper](https://proceedings.neurips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf) (Vaswani et al. 2017) got a BLEU score of 0.284 on the WMT 2014 English-to-German translation task, and 0.41 on the WMT 2014 English-to-French translation task. + +### Examples + +Example where each sample has 1 reference: +```python +>>> predictions = [ +... ["hello", "there", "general", "kenobi"], +... ["foo", "bar", "foobar"] +... ] +>>> references = [ +... [["hello", "there", "general", "kenobi"]], +... [["foo", "bar", "foobar"]] +... ] +>>> bleu = datasets.load_metric("bleu") +>>> results = bleu.compute(predictions=predictions, references=references) +>>> print(results) +{'bleu': 1.0, 'precisions': [1.0, 1.0, 1.0, 1.0], 'brevity_penalty': 1.0, 'length_ratio': 1.0, 'translation_length': 7, 'reference_length': 7} +``` + +Example where the first sample has 2 references: +```python +>>> predictions = [ +... ["hello", "there", "general", "kenobi"], +... ["foo", "bar", "foobar"] +... ] +>>> references = [ +... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], +... [["foo", "bar", "foobar"]] +... ] +>>> bleu = datasets.load_metric("bleu") +>>> results = bleu.compute(predictions=predictions, references=references) +>>> print(results) +{'bleu': 1.0, 'precisions': [1.0, 1.0, 1.0, 1.0], 'brevity_penalty': 1.0, 'length_ratio': 1.1666666666666667, 'translation_length': 7, 'reference_length': 6} +``` + +## Limitations and Bias +This metric hase multiple known limitations and biases: +- BLEU compares overlap in tokens from the predictions and references, instead of comparing meaning. This can lead to discrepencies between BLEU scores and human ratings. +- BLEU scores are not comparable across different datasets, nor are they comparable across different languages. +- BLEU scores can vary greatly depending on which parameters are used to generate the scores, especially when different tokenization and normalization techniques are used. It is therefore not possible to compare BLEU scores generated using different parameters, or when these parameters are unknown. +- Shorter predicted translations achieve higher scores than longer ones, simply due to how the score is calculated. A brevity penalty is introduced to attempt to counteract this. + + +## Citation +```bibtex +@INPROCEEDINGS{Papineni02bleu:a, + author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, + title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, + booktitle = {}, + year = {2002}, + pages = {311--318} +} +@inproceedings{lin-och-2004-orange, + title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation", + author = "Lin, Chin-Yew and + Och, Franz Josef", + booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics", + month = "aug 23{--}aug 27", + year = "2004", + address = "Geneva, Switzerland", + publisher = "COLING", + url = "https://www.aclweb.org/anthology/C04-1072", + pages = "501--507", +} +``` + +## Further References +- This Hugging Face implementation uses [this Tensorflow implementation](https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py) diff --git a/testbed/huggingface__datasets/metrics/bleu/bleu.py b/testbed/huggingface__datasets/metrics/bleu/bleu.py new file mode 100644 index 0000000000000000000000000000000000000000..545cb1e3f0abe1dfe9759fe7e5d79d50bf1f8ae3 --- /dev/null +++ b/testbed/huggingface__datasets/metrics/bleu/bleu.py @@ -0,0 +1,126 @@ +# Copyright 2020 The HuggingFace Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" BLEU metric. """ + +import datasets + +from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py + + +_CITATION = """\ +@INPROCEEDINGS{Papineni02bleu:a, + author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, + title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, + booktitle = {}, + year = {2002}, + pages = {311--318} +} +@inproceedings{lin-och-2004-orange, + title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation", + author = "Lin, Chin-Yew and + Och, Franz Josef", + booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics", + month = "aug 23{--}aug 27", + year = "2004", + address = "Geneva, Switzerland", + publisher = "COLING", + url = "https://www.aclweb.org/anthology/C04-1072", + pages = "501--507", +} +""" + +_DESCRIPTION = """\ +BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. +Quality is considered to be the correspondence between a machine's output and that of a human: "the closer a machine translation is to a professional human translation, +the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and +remains one of the most popular automated and inexpensive metrics. + +Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. +Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness +are not taken into account[citation needed]. + +BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 +representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the +reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional +reference translations will increase the BLEU score. +""" + +_KWARGS_DESCRIPTION = """ +Computes BLEU score of translated segments against one or more references. +Args: + predictions: list of translations to score. + Each translation should be tokenized into a list of tokens. + references: list of lists of references for each translation. + Each reference should be tokenized into a list of tokens. + max_order: Maximum n-gram order to use when computing BLEU score. + smooth: Whether or not to apply Lin et al. 2004 smoothing. +Returns: + 'bleu': bleu score, + 'precisions': geometric mean of n-gram precisions, + 'brevity_penalty': brevity penalty, + 'length_ratio': ratio of lengths, + 'translation_length': translation_length, + 'reference_length': reference_length +Examples: + + >>> predictions = [ + ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample + ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample + ... ] + >>> references = [ + ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references) + ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference) + ... ] + >>> bleu = datasets.load_metric("bleu") + >>> results = bleu.compute(predictions=predictions, references=references) + >>> print(results["bleu"]) + 1.0 +""" + + +@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) +class Bleu(datasets.Metric): + def _info(self): + return datasets.MetricInfo( + description=_DESCRIPTION, + citation=_CITATION, + inputs_description=_KWARGS_DESCRIPTION, + features=datasets.Features( + { + "predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), + "references": datasets.Sequence( + datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references" + ), + } + ), + codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], + reference_urls=[ + "https://en.wikipedia.org/wiki/BLEU", + "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", + ], + ) + + def _compute(self, predictions, references, max_order=4, smooth=False): + score = compute_bleu( + reference_corpus=references, translation_corpus=predictions, max_order=max_order, smooth=smooth + ) + (bleu, precisions, bp, ratio, translation_length, reference_length) = score + return { + "bleu": bleu, + "precisions": precisions, + "brevity_penalty": bp, + "length_ratio": ratio, + "translation_length": translation_length, + "reference_length": reference_length, + } diff --git a/testbed/huggingface__datasets/metrics/cer/README.md b/testbed/huggingface__datasets/metrics/cer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f3661a301f1034b629c8a99d6901312ff0bcc71e --- /dev/null +++ b/testbed/huggingface__datasets/metrics/cer/README.md @@ -0,0 +1,124 @@ +# Metric Card for CER + +## Metric description + +Character error rate (CER) is a common metric of the performance of an automatic speech recognition (ASR) system. CER is similar to Word Error Rate (WER), but operates on character instead of word. + +Character error rate can be computed as: + +`CER = (S + D + I) / N = (S + D + I) / (S + D + C)` + +where + +`S` is the number of substitutions, + +`D` is the number of deletions, + +`I` is the number of insertions, + +`C` is the number of correct characters, + +`N` is the number of characters in the reference (`N=S+D+C`). + + +## How to use + +The metric takes two inputs: references (a list of references for each speech input) and predictions (a list of transcriptions to score). + +```python +from datasets import load_metric +cer = load_metric("cer") +cer_score = cer.compute(predictions=predictions, references=references) +``` +## Output values + +This metric outputs a float representing the character error rate. + +``` +print(cer_score) +0.34146341463414637 +``` + +The **lower** the CER value, the **better** the performance of the ASR system, with a CER of 0 being a perfect score. + +However, CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions (see [Examples](#Examples) below). + +### Values from popular papers + +This metric is highly dependent on the content and quality of the dataset, and therefore users can expect very different values for the same model but on different datasets. + +Multilingual datasets such as [Common Voice](https://huggingface.co/datasets/common_voice) report different CERs depending on the language, ranging from 0.02-0.03 for languages such as French and Italian, to 0.05-0.07 for English (see [here](https://github.com/speechbrain/speechbrain/tree/develop/recipes/CommonVoice/ASR/CTC) for more values). + +## Examples + +Perfect match between prediction and reference: + +```python +from datasets import load_metric +cer = load_metric("cer") +predictions = ["hello world", "good night moon"] +references = ["hello world", "good night moon"] +cer_score = cer.compute(predictions=predictions, references=references) +print(cer_score) +0.0 +``` + +Partial match between prediction and reference: + +```python +from datasets import load_metric +cer = load_metric("cer") +predictions = ["this is the prediction", "there is an other sample"] +references = ["this is the reference", "there is another one"] +cer_score = cer.compute(predictions=predictions, references=references) +print(cer_score) +0.34146341463414637 +``` + +No match between prediction and reference: + +```python +from datasets import load_metric +cer = load_metric("cer") +predictions = ["hello"] +references = ["gracias"] +cer_score = cer.compute(predictions=predictions, references=references) +print(cer_score) +1.0 +``` + +CER above 1 due to insertion errors: + +```python +from datasets import load_metric +cer = load_metric("cer") +predictions = ["hello world"] +references = ["hello"] +cer_score = cer.compute(predictions=predictions, references=references) +print(cer_score) +1.2 +``` + +## Limitations and bias + +CER is useful for comparing different models for tasks such as automatic speech recognition (ASR) and optic character recognition (OCR), especially for multilingual datasets where WER is not suitable given the diversity of languages. However, CER provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. + +Also, in some cases, instead of reporting the raw CER, a normalized CER is reported where the number of mistakes is divided by the sum of the number of edit operations (`I` + `S` + `D`) and `C` (the number of correct characters), which results in CER values that fall within the range of 0–100%. + + +## Citation + + +```bibtex +@inproceedings{morris2004, +author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, +year = {2004}, +month = {01}, +pages = {}, +title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} +} +``` + +## Further References + +- [Hugging Face Tasks -- Automatic Speech Recognition](https://huggingface.co/tasks/automatic-speech-recognition) diff --git a/testbed/huggingface__datasets/notebooks/Overview.ipynb b/testbed/huggingface__datasets/notebooks/Overview.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..1812e1af1b64ecb9a2d8111008c59a68dee72507 --- /dev/null +++ b/testbed/huggingface__datasets/notebooks/Overview.ipynb @@ -0,0 +1,3755 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "view-in-github" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**⚠️ This notebook is deprecated in favor of the [Quickstart notebook](https://github.com/huggingface/notebooks/blob/main/datasets_doc/quickstart.ipynb)**" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "zNp6kK7OvSUg", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "# HuggingFace 🤗 Datasets library - Quick overview\n", + "\n", + "Models come and go (linear models, LSTM, Transformers, ...) but two core elements have consistently been the beating heart of Natural Language Processing: Datasets & Metrics\n", + "\n", + "🤗 Datasets is a fast and efficient library to easily share and load datasets, already providing access to the public datasets in the [Hugging Face Hub](https://huggingface.co/datasets).\n", + "\n", + "The library has several interesting features (besides easy access to datasets):\n", + "\n", + "- Build-in interoperability with PyTorch, Tensorflow 2, Pandas and Numpy\n", + "- Lighweight and fast library with a transparent and pythonic API\n", + "- Strive on large datasets: frees you from RAM memory limits, all datasets are memory-mapped on drive by default.\n", + "- Smart caching with an intelligent `tf.data`-like cache: never wait for your data to process several times\n", + "\n", + "🤗 Datasets originated from a fork of the awesome Tensorflow-Datasets and the HuggingFace team want to deeply thank the team behind this amazing library and user API. We have tried to keep a layer of compatibility with `tfds` and can provide conversion from one format to the other.\n", + "To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "dzk9aEtIvSUh", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "# Main datasets API\n", + "\n", + "This notebook is a quick dive in the main user API for loading datasets in `datasets`" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "my95uHbLyjwR", + "outputId": "8db75d45-02b9-46ed-efc2-f8ff764fe3d7", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Requirement already satisfied: datasets in /usr/local/lib/python3.10/dist-packages (2.12.0)\n", + "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from datasets) (1.22.4)\n", + "Requirement already satisfied: pyarrow>=8.0.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (9.0.0)\n", + "Requirement already satisfied: dill<0.3.7,>=0.3.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (0.3.6)\n", + "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from datasets) (1.5.3)\n", + "Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (2.27.1)\n", + "Requirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.10/dist-packages (from datasets) (4.65.0)\n", + "Requirement already satisfied: xxhash in /usr/local/lib/python3.10/dist-packages (from datasets) (3.2.0)\n", + "Requirement already satisfied: multiprocess in /usr/local/lib/python3.10/dist-packages (from datasets) (0.70.14)\n", + "Requirement already satisfied: fsspec[http]>=2021.11.1 in /usr/local/lib/python3.10/dist-packages (from datasets) (2023.4.0)\n", + "Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from datasets) (3.8.4)\n", + "Requirement already satisfied: huggingface-hub<1.0.0,>=0.11.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (0.14.1)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from datasets) (23.1)\n", + "Requirement already satisfied: responses<0.19 in /usr/local/lib/python3.10/dist-packages (from datasets) (0.18.0)\n", + "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from datasets) (6.0)\n", + "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (23.1.0)\n", + "Requirement already satisfied: charset-normalizer<4.0,>=2.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (2.0.12)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (6.0.4)\n", + "Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (4.0.2)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (1.9.2)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (1.3.3)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets) (1.3.1)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0.0,>=0.11.0->datasets) (3.12.0)\n", + "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0.0,>=0.11.0->datasets) (4.5.0)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets) (1.26.15)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets) (2022.12.7)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets) (3.4)\n", + "Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->datasets) (2022.7.1)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.1->pandas->datasets) (1.16.0)\n" + ] + } + ], + "source": [ + "# install datasets\n", + "!pip install datasets" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "PVjXLiYxvSUl", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "# Let's import the library. We typically only need at most two methods:\n", + "from datasets import list_datasets, load_dataset\n", + "\n", + "from pprint import pprint" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "TNloBBx-vSUo", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "## Listing the currently available datasets" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "d3RJisGLvSUp", + "outputId": "1ece3326-6977-48c8-ba37-6b1753f1d029", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🤩 Currently 36662 datasets are available on the hub:\n", + "['acronym_identification', 'ade_corpus_v2', 'adversarial_qa', 'aeslc',\n", + " 'afrikaans_ner_corpus', 'ag_news', 'ai2_arc', 'air_dialogue',\n", + " 'ajgt_twitter_ar', 'allegro_reviews', 'allocine', 'alt', 'amazon_polarity',\n", + " 'amazon_reviews_multi', 'amazon_us_reviews', 'ambig_qa', 'americas_nli', 'ami',\n", + " 'amttl', 'anli', 'app_reviews', 'aqua_rat', 'aquamuse', 'ar_cov19',\n", + " 'ar_res_reviews', 'ar_sarcasm', 'arabic_billion_words', 'arabic_pos_dialect',\n", + " 'arabic_speech_corpus', 'arcd', 'arsentd_lev', 'art', 'arxiv_dataset',\n", + " 'ascent_kb', 'aslg_pc12', 'asnq', 'asset', 'assin', 'assin2', 'atomic',\n", + " 'autshumato', 'facebook/babi_qa', 'banking77', 'bbaw_egyptian',\n", + " 'bbc_hindi_nli', 'bc2gm_corpus', 'beans', 'best2009', 'bianet', 'bible_para',\n", + " 'big_patent', 'billsum', 'bing_coronavirus_query_set', 'biomrc', 'biosses',\n", + " 'blbooks', 'blbooksgenre', 'blended_skill_talk', 'blimp',\n", + " 'blog_authorship_corpus', 'bn_hate_speech', 'bnl_newspapers', 'bookcorpus',\n", + " 'bookcorpusopen', 'boolq', 'bprec', 'break_data', 'brwac', 'bsd_ja_en',\n", + " 'bswac', 'c3', 'c4', 'cail2018', 'caner', 'capes', 'casino',\n", + " 'catalonia_independence', 'cats_vs_dogs', 'cawac', 'cbt', 'cc100', 'cc_news',\n", + " 'ccaligned_multilingual', 'cdsc', 'cdt', 'cedr', 'cfq', 'chr_en', 'cifar10',\n", + " 'cifar100', 'circa', 'civil_comments', 'clickbait_news_bg', 'climate_fever',\n", + " 'clinc_oos', 'clue', 'cmrc2018', 'cmu_hinglish_dog', 'cnn_dailymail',\n", + " 'coached_conv_pref', '36562 more...']\n" + ] + } + ], + "source": [ + "# Currently available datasets\n", + "datasets = list_datasets()\n", + "\n", + "print(f\"🤩 Currently {len(datasets)} datasets are available on the hub:\")\n", + "pprint(datasets[:100] + [f\"{len(datasets) - 100} more...\"], compact=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "7T5AG3BxvSUr", + "outputId": "72b52fbd-2344-4802-f040-83d640cbf899", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'_id': '621ffdd236468d709f181f95',\n", + " 'author': None,\n", + " 'cardData': {'annotations_creators': ['crowdsourced'],\n", + " 'dataset_info': {'config_name': 'plain_text',\n", + " 'dataset_size': 89789763,\n", + " 'download_size': 35142551,\n", + " 'features': [{'dtype': 'string', 'name': 'id'},\n", + " {'dtype': 'string',\n", + " 'name': 'title'},\n", + " {'dtype': 'string',\n", + " 'name': 'context'},\n", + " {'dtype': 'string',\n", + " 'name': 'question'},\n", + " {'name': 'answers',\n", + " 'sequence': [{'dtype': 'string',\n", + " 'name': 'text'},\n", + " {'dtype': 'int32',\n", + " 'name': 'answer_start'}]}],\n", + " 'splits': [{'name': 'train',\n", + " 'num_bytes': 79317110,\n", + " 'num_examples': 87599},\n", + " {'name': 'validation',\n", + " 'num_bytes': 10472653,\n", + " 'num_examples': 10570}]},\n", + " 'language': ['en'],\n", + " 'language_creators': ['crowdsourced', 'found'],\n", + " 'license': ['cc-by-4.0'],\n", + " 'multilinguality': ['monolingual'],\n", + " 'paperswithcode_id': 'squad',\n", + " 'pretty_name': 'SQuAD',\n", + " 'size_categories': ['10K dict`." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "vUr65K-4vSVQ", + "outputId": "0d770257-f8d0-45fc-8ae2-7f387210f068", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-242ccd893f32bdf9.arrow\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['My cute title: Super_Bowl_50', 'My cute title: Warsaw']\n" + ] + } + ], + "source": [ + "# Let's add a prefix 'My cute title: ' to each of our titles\n", + "\n", + "def add_prefix_to_title(example):\n", + " example['title'] = 'My cute title: ' + example['title']\n", + " return example\n", + "\n", + "prefixed_dataset = dataset.map(add_prefix_to_title)\n", + "\n", + "print(prefixed_dataset.unique('title')) # `.unique()` is a super fast way to print the unique elemnts in a column (see the doc for all the methods)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "FcZ_amDAvSVS", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "This call to `.map()` compute and return the updated table. It will also store the updated table in a cache file indexed by the current state and the mapped function.\n", + "\n", + "A subsequent call to `.map()` (even in another python session) will reuse the cached file instead of recomputing the operation.\n", + "\n", + "You can test this by running again the previous cell, you will see that the result are directly loaded from the cache and not re-computed again.\n", + "\n", + "The updated dataset returned by `.map()` is (again) directly memory mapped from drive and not allocated in RAM." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "Skbf8LUEvSVT", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "The function you provide to `.map()` should accept an input with the format of an item of the dataset: `function(dataset[0])` and return a python dict.\n", + "\n", + "The columns and type of the outputs can be different than the input dict. In this case the new keys will be added as additional columns in the dataset.\n", + "\n", + "Bascially each dataset example dict is updated with the dictionary returned by the function like this: `example.update(function(example))`." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "d5De0CfTvSVT", + "outputId": "e6282b6e-d9ce-4e8b-e0f4-6c9a34330bce", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-4f3eee21db868c87.arrow\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['My cutest title: Super_Bowl_50', 'My cutest title: Warsaw']\n" + ] + } + ], + "source": [ + "# Since the input example dict is updated with our function output dict,\n", + "# we can actually just return the updated 'title' field\n", + "titled_dataset = dataset.map(lambda example: {'title': 'My cutest title: ' + example['title']})\n", + "\n", + "print(titled_dataset.unique('title'))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "Q5vny56-vSVV", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "#### Removing columns\n", + "You can also remove columns when running map with the `remove_columns=List[str]` argument." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "-sPWnsz-vSVW", + "outputId": "c116e3cb-2fa4-4304-d6a5-d600b3bc4930", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-2800c1727354fbe2.arrow\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['id', 'context', 'question', 'answers', 'new_title']\n", + "['Wouhahh: Super_Bowl_50', 'Wouhahh: Warsaw']\n" + ] + } + ], + "source": [ + "# This will remove the 'title' column while doing the update (after having send it the the mapped function so you can use it in your function!)\n", + "less_columns_dataset = dataset.map(lambda example: {'new_title': 'Wouhahh: ' + example['title']}, remove_columns=['title'])\n", + "\n", + "print(less_columns_dataset.column_names)\n", + "print(less_columns_dataset.unique('new_title'))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "G459HzD-vSVY", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "#### Using examples indices\n", + "With `with_indices=True`, dataset indices (from `0` to `len(dataset)`) will be supplied to the function which must thus have the following signature: `function(example: dict, indice: int) -> dict`" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "_kFL37R2vSVY", + "outputId": "16a436d2-6a2e-4526-8016-b47273116a71", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-e23b98819de39aea.arrow\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['0: Which NFL team represented the AFC at Super Bowl 50?',\n", + " '1: Which NFL team represented the NFC at Super Bowl 50?',\n", + " '2: Where did Super Bowl 50 take place?',\n", + " '3: Which NFL team won Super Bowl 50?',\n", + " '4: What color was used to emphasize the 50th anniversary of the Super Bowl?']\n" + ] + } + ], + "source": [ + "# This will add the index in the dataset to the 'question' field\n", + "with_indices_dataset = dataset.map(lambda example, idx: {'question': f'{idx}: ' + example['question']},\n", + " with_indices=True)\n", + "\n", + "pprint(with_indices_dataset['question'][:5])" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "xckhVEWFvSVb", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "### Modifying the dataset with batched updates" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "dzmicbSnvSVb", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "`.map()` can also work with batch of examples (slices of the dataset).\n", + "\n", + "This is particularly interesting if you have a function that can handle batch of inputs like the tokenizers of HuggingFace `tokenizers`.\n", + "\n", + "To work on batched inputs set `batched=True` when calling `.map()` and supply a function with the following signature: `function(examples: Dict[List]) -> Dict[List]` or, if you use indices, `function(examples: Dict[List], indices: List[int]) -> Dict[List]`).\n", + "\n", + "Bascially, your function should accept an input with the format of a slice of the dataset: `function(dataset[:10])`." + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "pxHbgSTL0itj", + "outputId": "20471793-ca8e-4d06-80cd-4bf822eb0d40", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Requirement already satisfied: transformers in /usr/local/lib/python3.10/dist-packages (4.29.2)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers) (3.12.0)\n", + "Requirement already satisfied: huggingface-hub<1.0,>=0.14.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.14.1)\n", + "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from transformers) (1.22.4)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers) (23.1)\n", + "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (6.0)\n", + "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers) (2022.10.31)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from transformers) (2.27.1)\n", + "Requirement already satisfied: tokenizers!=0.11.3,<0.14,>=0.11.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.13.3)\n", + "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers) (4.65.0)\n", + "Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.14.1->transformers) (2023.4.0)\n", + "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.14.1->transformers) (4.5.0)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (1.26.15)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (2022.12.7)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (2.0.12)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (3.4)\n" + ] + } + ], + "source": [ + "!pip install transformers" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": { + "id": "T7gpEg0yvSVc", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "# Let's import a fast tokenizer that can work on batched inputs\n", + "# (the 'Fast' tokenizers in HuggingFace)\n", + "from transformers import BertTokenizerFast, logging as transformers_logging\n", + "\n", + "transformers_logging.set_verbosity_warning()\n", + "\n", + "tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased')" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "fAmLTPC9vSVe", + "outputId": "4388ecc8-049a-41cc-90cb-1d48fd05c8dd", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-1d272c8f779fd409.arrow\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "encoded_dataset[0]\n", + "{'answers': {'answer_start': [177, 177, 177],\n", + " 'text': ['Denver Broncos', 'Denver Broncos', 'Denver Broncos']},\n", + " 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1],\n", + " 'context': 'Super Bowl 50 was an American football game to determine the '\n", + " 'champion of the National Football League (NFL) for the 2015 '\n", + " 'season. The American Football Conference (AFC) champion Denver '\n", + " 'Broncos defeated the National Football Conference (NFC) champion '\n", + " 'Carolina Panthers 24–10 to earn their third Super Bowl title. The '\n", + " \"game was played on February 7, 2016, at Levi's Stadium in the San \"\n", + " 'Francisco Bay Area at Santa Clara, California. As this was the '\n", + " '50th Super Bowl, the league emphasized the \"golden anniversary\" '\n", + " 'with various gold-themed initiatives, as well as temporarily '\n", + " 'suspending the tradition of naming each Super Bowl game with '\n", + " 'Roman numerals (under which the game would have been known as '\n", + " '\"Super Bowl L\"), so that the logo could prominently feature the '\n", + " 'Arabic numerals 50.',\n", + " 'id': '56be4db0acb8001400a502ec',\n", + " 'input_ids': [101, 3198, 5308, 1851, 1108, 1126, 1237, 1709, 1342, 1106, 4959,\n", + " 1103, 3628, 1104, 1103, 1305, 2289, 1453, 113, 4279, 114, 1111,\n", + " 1103, 1410, 1265, 119, 1109, 1237, 2289, 3047, 113, 10402, 114,\n", + " 3628, 7068, 14722, 2378, 1103, 1305, 2289, 3047, 113, 24743, 114,\n", + " 3628, 2938, 13598, 1572, 782, 1275, 1106, 7379, 1147, 1503, 3198,\n", + " 5308, 1641, 119, 1109, 1342, 1108, 1307, 1113, 1428, 128, 117,\n", + " 1446, 117, 1120, 12388, 112, 188, 3339, 1107, 1103, 1727, 2948,\n", + " 2410, 3894, 1120, 3364, 10200, 117, 1756, 119, 1249, 1142, 1108,\n", + " 1103, 13163, 3198, 5308, 117, 1103, 2074, 13463, 1103, 107, 5404,\n", + " 5453, 107, 1114, 1672, 2284, 118, 12005, 11751, 117, 1112, 1218,\n", + " 1112, 7818, 28117, 20080, 16264, 1103, 3904, 1104, 10505, 1296,\n", + " 3198, 5308, 1342, 1114, 2264, 183, 15447, 16179, 113, 1223, 1134,\n", + " 1103, 1342, 1156, 1138, 1151, 1227, 1112, 107, 3198, 5308, 149,\n", + " 107, 114, 117, 1177, 1115, 1103, 7998, 1180, 15199, 2672, 1103,\n", + " 4944, 183, 15447, 16179, 1851, 119, 102],\n", + " 'question': 'Which NFL team represented the AFC at Super Bowl 50?',\n", + " 'title': 'Super_Bowl_50',\n", + " 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0]}\n" + ] + } + ], + "source": [ + "# Now let's batch tokenize our dataset 'context'\n", + "encoded_dataset = dataset.map(lambda example: tokenizer(example['context']), batched=True)\n", + "\n", + "print(\"encoded_dataset[0]\")\n", + "pprint(encoded_dataset[0], compact=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "kNaJdKskvSVf", + "outputId": "17855cc9-47d3-4060-840c-8e8cdd71290d", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['id',\n", + " 'title',\n", + " 'context',\n", + " 'question',\n", + " 'answers',\n", + " 'input_ids',\n", + " 'token_type_ids',\n", + " 'attention_mask']\n" + ] + } + ], + "source": [ + "# we have added additional columns\n", + "pprint(encoded_dataset.column_names)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "m3To8ztMvSVj", + "outputId": "dc46c517-209f-4796-d70c-e99e6d42efe7", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-a915f5d1f8009aff.arrow\n" + ] + } + ], + "source": [ + "# Let show a more complex processing with the full preparation of the SQuAD dataset\n", + "# for training a model from Transformers\n", + "def convert_to_features(batch):\n", + " # Tokenize contexts and questions (as pairs of inputs)\n", + " encodings = tokenizer(batch['context'], batch['question'], truncation=True)\n", + "\n", + " # Compute start and end tokens for labels\n", + " start_positions, end_positions = [], []\n", + " for i, answer in enumerate(batch['answers']):\n", + " first_char = answer['answer_start'][0]\n", + " last_char = first_char + len(answer['text'][0]) - 1\n", + " start_positions.append(encodings.char_to_token(i, first_char))\n", + " end_positions.append(encodings.char_to_token(i, last_char))\n", + "\n", + " encodings.update({'start_positions': start_positions, 'end_positions': end_positions})\n", + " return encodings\n", + "\n", + "encoded_dataset = dataset.map(convert_to_features, batched=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "KBnmSa46vSVl", + "outputId": "17b8e72e-4434-4364-ec0c-67c5288037a4", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "column_names ['id', 'title', 'context', 'question', 'answers', 'input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions']\n", + "start_positions [34, 45, 80, 34, 98]\n" + ] + } + ], + "source": [ + "# Now our dataset comprise the labels for the start and end position\n", + "# as well as the offsets for converting back tokens\n", + "# in span of the original string for evaluation\n", + "print(\"column_names\", encoded_dataset.column_names)\n", + "print(\"start_positions\", encoded_dataset[:5]['start_positions'])" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "J1utN8K4muDW" + }, + "source": [ + "### Image datasets" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "vdYUjP60m-Ie" + }, + "source": [ + "Images are loaded using Pillow:" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "tAbviPxPm4Ce", + "outputId": "5c38e76e-ae1c-45c2-c20c-110a806cab49" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:datasets.info:Loading Dataset Infos from /root/.cache/huggingface/modules/datasets_modules/datasets/cats_vs_dogs/d4fe9cf31b294ed8639aa58f7d8ee13fe189011837038ed9a774fde19a911fcb\n", + "INFO:datasets.builder:Overwrite dataset info from restored data version if exists.\n", + "INFO:datasets.info:Loading Dataset info from /root/.cache/huggingface/datasets/cats_vs_dogs/default/1.0.0/d4fe9cf31b294ed8639aa58f7d8ee13fe189011837038ed9a774fde19a911fcb\n", + "WARNING:datasets.builder:Found cached dataset cats_vs_dogs (/root/.cache/huggingface/datasets/cats_vs_dogs/default/1.0.0/d4fe9cf31b294ed8639aa58f7d8ee13fe189011837038ed9a774fde19a911fcb)\n", + "INFO:datasets.info:Loading Dataset info from /root/.cache/huggingface/datasets/cats_vs_dogs/default/1.0.0/d4fe9cf31b294ed8639aa58f7d8ee13fe189011837038ed9a774fde19a911fcb\n" + ] + }, + { + "data": { + "text/plain": [ + "{'image': ,\n", + " 'labels': 0}" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "image_dataset = load_dataset(\"cats_vs_dogs\", split=\"train\")\n", + "image_dataset[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 392 + }, + "id": "z0q3Do11npXd", + "outputId": "b545b95d-746f-4777-f233-a7851a44b72c" + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfQAAAF3CAIAAADckC6rAAEAAElEQVR4nOz925bcyK4sCpoBTkZK87LW2uf06dH9Y/2D/Tf9A/3cY4+zz7rNS0nKjCAdsH6Ak8GIzFRJKqlKc5YwNFIMDw9enCQcbgAM/P/8v/9fAEiSNLPaqJYX253a2+82Xmwved5uZvtX+18A+4+etfPFdiBfbH9t/8dTqr/J8VG8tu/nIY0RSEJSREQEIrNHRvTLoqVH77H2XHtG9DNj7ZfL5fx4uTw+fXj3/unDh+W8ZI8mTjZNrT3Mp9Pp9GY+tdbmeTazuU3eSJKZmansyA6AysyEIjMzk6lF7yUB6RSQiuyxRgRJkAGsvV/WvvRM0MxOWKfTzGlaUj+dz+/Pl6dYOwCjyMxc+6K+GilFruLD5O6TeWvNzYCUJMknC8in9qc///nf/o//7d/+9//tj3/+0zRNb6eTu7fWfJ58apwmc5c3n9/CJ9gMn8AJdAmApT3hXsadgux5o23395Pl5f6SXmzny82fLa/u3149wPGZ1CbsedyhJDC3j8ddbY+9/Bee+VcWfu752M93udn/9HLzsxd87F0NgA5f5uE1v2kHcHMfb57GXZ/c/AYglpuP26+ZY0sSdd1t6HB/mcj9cKObEPVzjRdmxfZ4VGv9REqO9pv+IdufpfbiiHwjee0G/Aoi6ascfUx1IAVI7i4XJKSYIiCnpBZtngMxZX9jwOTT+fHJwYnT1Fqbp3l+mKa5teZuZgbj3VEABxOq7ZoTKUNjqztKhWRSSMrM0sFr5tpzUdDaw/zg03TCktDa++OyPD4+nXuXmbn99PjBzCRFdhOs0d0dafNsZtM0TeYkIUWsoWxthjqAZVk+vPswzzPJ05uHmQ7AzJhpkjJAiukSJCAgAx0y4De7+z/kh/y2QvLF6f95+8d6fuaefw3lfmfC/wpHPMqu1n+hfr9dfKh2zaYQjDQwhCDRBBjSgalmgtbam9OKlMOb+exzWe7z6TT7bJbbbseSyMwghWQgzSQpQY6T99YkRZDJRAegHBN7AALo1uT0Rnczy8Ca+bT2x/P56Xx+ipAZ2rT0cAcpEXSf5mlqTQrMD2bm7mZuyogQbDNrLEOXy0Xv3tEtwT/0fNNmSWbG9IigEUwxlSl20EhBIhNwKV43ZO9u2z7uX3zHfsgP+c0kiXrUy0LCMy28K+tj+1GDV3vtZ/SRAbH31+hjQCRhGO27uvvmyv05KvLr6Pe76/zl+t2E5EB7SEmCO1MUBO5H1NQnI+Bkmaw0MqdpWdYmd9pk02l6mOZ5arO3qVlgX3YpkjSA7lKBFMaMBAxW3YyeSjNBSTm9eWZCBriR1lJcMy6Ry7I+XS7MpxCXjCVyBdbMpUf0tc3TNE3uJDk1P03Nwcw8kwQKBkIqMxEhYV1CLpLZtTwtj+1pbpOTl4cHSe5uU2MmIkCDKdUzQKYxYQGZkD9U9Q/5zuWgH47P6uvP7Q2WCG1dSXAHeWyAMySIDW271ft1jBstP5QVxfFzAISLQRAwoBbHAIzMsczP61Tx68Eydyj8ryO/XK0biH1q3S/BaWkwZXMDIECSp6QJ6B2GFOiyTGs5BfKPf/wj00zmbFNrU5utNVpjMykgMVI1cRspmCXqBlIGJBrZIYkGJESZG0BrJBlNZJYRn8hkRF7WZelpyK5cM556X6Wkqcno88Pb6TTPzWlqpJspuiJ6ZpcK30fKCMfwBTRrNesocz1fnt4/SXxzOklqrdnUYHQyLU0RETAqO+gWLgtRkP3Q7j/kn0lye6D3Jemu3++sadVqdFMkesmcxz673EP/ISNzsyBZ+6Gw6zcD4m4/v5LlfvfxV9Dyz9X60Zb/dCEJDSfKcV41M3rdRSYIIDMNIkM0ySSX5GmSGznZH5FE0mQGM5toTjqQgNXNMzVYUhBFWGl1ExPdAGQTI4W63wTMCBjhsA7gsvbz+fLU+xoZiYRZY0LLujwt/akvSwptOs0Tp1lGmME8FZGZEb0v62W5WGNKikL9TQZ3N07T1Fozg6Q1Q6vOj5fs8f7NLKnN3prBJKC5ZRr6Kqezy5oUG+z0Q37Idy2fbblvsuMwAIaFVqDKLVpyo9bvjijdwTL1vyTSpUiCAssbV+Y8TdojShzAQd3/Wg7V3wR2/1po+763o3ay5puTM0makJkAkitBSzoMaGbmbHI5G4LZpQATkIkuInLA2WSKtVAAUrBGJFIDWTOSpoShpSW1Ai4kSEGmtvR17f3S47L2FJMQDMR5PV8iLpFLMIxmnuYk174mtK4rskPRSGX2HuE0s2ZTM5+cE72ZWc1nIEWpO0iBPYP54d07px7ezK21iSj83Yyig13pyA5rzCAhvvSWfFTp/5gRfsg/itzpdz7T7yVH/Y6X2l/ZNkCyZAqyAmcklf2Ol5D9XxWW+dWOtcsvV+tX98WdcjereVQkwJ5iukkqxdfoImCim1LNJpuyI9fUquhgCgOElo0FgBOZMkOKBb7AzJmRdJkQImuxlZBqLoESMEFrzwDN20QPMDKXHsvaf3q8hHIF0CYaOxDrmutiZilAGevi1MnNOJYmjXaa5od5ntycZgBSsS6KEACkk3Rr7k67PJ2XeYrzolOXe5DZVobTQiQzTQkFYUQyAf/OQvd+yA/5BaLbQLeDfqc2GD0B3WLzr4W47N/uSl+Hv+U+TYaprMPcQB6T8vnu2iEC5Co4LCLuz+MVVXnsc4yLP3Z4Uc9W+/7t3udu4/netm9fPp/X5Dk483Htv+lQ0EiOxZSA3jtJNxq9Vk0zEO7WV3oHkNmVSYNNjc0mP6mnDJ0BRpnwgt6e3kRExCrIYHRBcADQFg/DhJRMSBCj4h5ZYHg9P4QeHt4wetp6uSzLZV2jLz0Smt/+wZoneIn1w2VZ+wo4HGvEEhf1MMX85vTwcHJaj/VPb/5oZk4p4ny5oEejuVFSM2+TmU/j5qakNbQ+vePf3QH8CX9+05oic7mkGaWA1eoGQsAA+PwH7BHcNfj3IOM3kVfv8mce9yPv5FeR8tgfj/L7XLu8dr++1mgcA07qcDy0Y1MsR2zkZ89z7OFov/N6xrVCd3dc49axG42vP1cVwYER/w6UecdIwGUBYbMGBYB2zZn4+pb787tyN3/gdQ2OV7Dyj0x0X2yXf9yov042rzxPNS0n4QU3uDHNiJ40unl6A5Q0srnJmAYnWlq4J4AoJ3hIOfCyBAlItMgoXzg2j40IwAG5kchUl9LQEkEoledlWaMva0RCRJqhGTLbg/XQ0teloydSXCNy6STdOU3TZKfTNLk3ZCCwPj2S3Az2rFh3JN7MjaTRDMqBr5Dgg0/seXn/+ORtnnye3AyICa2V50A0ctj6Y2S/sX78IT/kN5EbV+pBv3OPZfxFYkBuh7Bklv1OpsY3N/KVHarPrfU7hX63jZes8vIPfET7P5MvGbKjZr/T8p+O1B/CnkgzmxoyLVoqDGzoZkLS5AbPNUFmUlMCgCGZEjJGbhnpRgGu7JFhYw4mgKxocYCk08lEQAxASKsF39Jj6bH0jIgEYW3YJTZFv1x6XCJprbFl7xU1NU3Tm6m5wSsRNVIRVBKEWWttnk8nNzMzMNUBkCLYDGaEGUmDYu3n3qmY3E5tImnep2mSyQCBggtgI8wQud/R60AO4/2lUf4RXvNDvmu5RRQwklRzC3MhK0LiC/X7Ho3DEUkRGva719JeMkiwAdEUeIuv61B9rtaPG8/N9i1g/Lnljrsf/tyRv2RZfdTse6Tjvkarxmvn14/BSi4ijRRpZppOYQHrQTdKISQpc6cETAlACK+Q17SMrHtFMCmmIrMyloOiNBYu5luesQNJulJCSAqhZwXAJhACQYOQ6pE4X5bL2tcOiZx88tYeIOW6rqe5NROj55or5SmSLZOAEw9mfzid3j7MzRxAZo+INXpm9oieMZZ+zogAeYHOpw/n0+SN0+kN+koXrLK7FgEE4V5MCfujsN+Vz7qJP+SH/MpyUES32nyD0g9hLmNbAO/iIMnPtkXrAMxtNbDZ76RksmTBxhUpWUf4Fg7VF63yn+1/t0G+rPRfk4GJf768GBn56WZ7Eg6k5NXNSBgkn06wIJzIrnWslwIysZEiRRNjTBsBUoJUN8+SITElUCz4Y6T/DOimAPeRY5QKZUSGwuepLVgy64nK6BE6r8u7c5eUQhoYckdrzd0pONB75HKxzDfzNE+T2Ynndw46eSIfHKfmkzmAaTqd1+Xp6ekcS6zLeV0uy9J7/5c3f2itTafmQj9fzh8eT6fT3KZcu8vMZBbqa1kbkHJKbmxFX3bjfsgP+Q5FfIGnqABl/JwH9RP2PtJQN/t9hzcoK/IhAK4i/ADw1WEZPNOV3DylzzOvXrtUsxc0Pr6eI+vOf3uz55fi4l+EBZKoibL0u5HDNQ6aT6R3mKujIdUrmtFII1lxI/Ka15NMIqWKagoI16hVVnh9zdib75wZilCkpKKTYQH3NJNxpwoQrCd6KCIFJJFBZQa6hbz13iMh9Qt6PDR7M5/+9OahtVYOgcl8mqYHnxpGNMD7d39f+npZ155BapocaOYwcPJ2alOjM3q/LMvTubVmZnnKkxmtov47SCRijh+a/Yf8w8lrlvvNg7zrd2InCzvqd+AXeIRlN/Y7xc1gL/1+F5PybWGZjytlbY61o6od4PPnOFS/QO41+0u+3E9x1JLEIHKDtswi0kQYfcybMmcqgEiYnI0MwFyOFImkSEYmlaw11XDTOxAjryG3WEyJgyKs1Hoh8iC4rOu6rr33EESrRCkzm2zqoR69miUty5JPQRZdXpxob05v3j68OZ1mA//lT3+qe2eCpDgvaSL501//1tVh9Hl68/DWZ0ugZzysjW5W7AdCrP3y+FTnN4XMGunmUAPCwUBq4PAU3F591n/gND/kH1lE8Jie+lXU1yENlZsKq9zUDZUBDsf6JnHuL2Idd8b73dUeFCvuRuTF/odjffl5PsdefhaNORx3KFbbp+hh6A/3akhuE1vAksbQSgBIypGCp1SozHVqkUZ6/lgJ1LJLAgb74+FEB4UQBNIJLcuyrH1d16ChGel0M7PZ59RFXSBbm0VGvyzLMk0ucmKbJ3t4eJjnB5J9XU5vphrwWPuyPK3rKsnMpmaOKY3zw/zw9tTmKaSI+PMf/tB7X2PJzAIZ18vSleYu+jw/RJtBs6IqLWKDrzpV/5Af8t3KL3/UX0R7xlfbIfBKpGZzOEolgcirxT0cntV+2FAOOhtg08WlE9tQS2BRGWtg0ZhIArnveXM3FIohXs18kVx7H5bjFvCrzamIW0B/bGw2Hg9/j3Lf/jxSM7dFk67f771y6gC2aMUaCZhMPQDIqM1JkEYR3gknGiwVEdl7LGtakMweyg4GGPJgBhSnP54uT309L4FKDrVi7TISomSIVFBFJSxdHpaI6BF10JBfLv3pEhfYaoY2oeiElJNEsyed24S37RTKyLVn0vBmnnLt8zT965u3/+Pt2wez6ayT+8P0p/Nb78uamTa1NfR0vmhdZ6Or/+E0/+HN/GZq3tc5Y5qmefL3/b9EhacIwdRXYMqwh4c2L+ZLo2doBk8xdef0AIcc2RANmGFT0kCvfN3tliXQC1/y/Dr2x9eaTj4fUHr1uJtNcBPmfOX7HlQk2p/d40u+Z8Tk5nAzbcdiAqgKBABSRKVCJACcJ2d0xjpjmZnIC9YnrGdAaI7mgi/w1RrbifN8eqo3zWIsKs3YzCxinOfgPtzOLV5RRa+O2zdmG4qP0sVTV/1Qr3dsfPT1vm9YCrckofvdMYtfPo9N9X+tqwsF3zysMDQgAQEqkpDyqEnJsRFA3fcEQPSR41gntV/XoKWHRND27JiGYSDjK8Iy5dh8rnw37F9745FkhNDhJ3VRP7OQ+bhx/dq3X7P99QIPRfs1zt9gRRpgBgmNlpIZTHRkClYJrbLGNhmYxehWBrzq9ZauKT+6yq4M6n/fFjcRIQ0+STeD+x/9IcWujFRX9lBYwtqSTxLWnkvGaWowJm2h/vvf/yMzm3Fyswwza6fTQ+PJ37yd2x8fTlMzV1KanMX8LskzIyVaFkEpuF4WTnPrnRE+Ti7hmZmUDJ5MmpQpI6ABU5aJMcRu3pkf8prwdpRk19oNAI6Tg2ixMsNyRV4UC9cnXD4oFiA5NUyTrAmO1qA0hPSGJCjLKzHVj4XX9ybPFSa/okN1FAO6avBNx1UAH49KfGgrklDeK/cyNzJ/Vufe6d87jP7rtlcg+N7+keDr/cLrg7szZWT2gLtSEJCAu6SKamyt1Y2JtfdMVlSJEhJS3LYjAVScTGqL3USqeGfm1iTJMpRmNGsBZaYLIfXMlbn2XCy6mNQf/vCnXJdF8eGySHKlsjPzvPzdwIep2cNszR6m6aH5Q/M/PEwPrT1MtDJjsgNAam4tMztJKhIEUwhpPV/Qpvaw+DSzhXtXd4E5B83ElSSjV/KXKFLlCD7aRp9ZpOeHXCXHrFkL6WEsALLlkVCLC5bH9el9f/p7Xt6jLzRN02SnB00zfLY22bxYzuAMiNZQ5FVIIfgr0pb8kBdEtq8I7+BrfAuHKl6bz68m5q7fD+5BO0IuAADjoeer4fMHHP/meq7699D/2K5X2l/tP7AampAb4xo2j6tthJF3Zzi6uZmk5paGlFxIUwouSpYWXqE23rYCftmzOOJrua2h4lOVzyqpC4nM64IRgNNOp5O799ZyO3rv2Rk8h5dtl8xQkwkgos3TolDGY1+WXNRXRDfiXxun1ua5zZM/tHZq/tB8anaafDIa6IQSFKAIBByQbCw0RkU4ilp7Lmsua/a1xWQ5MYNJRKcaITJBwYI0kVIYmRupBq9RvT/ko1IgzN2bJ5O6tD2N2J3wmvqTS+hP6/u/L+/+evnpv3V5j1hpiofp9PAW8wPn2ecHj4fW52X+I30CSToqc+b4po8w73/+BdameZ6Zdc/bv2p0377TF8+nQiqOap3fzqF6Z7Y/P5s7OQ7BrnhxUJHHHR673e3tm5rt42PV64DuDl1FPF4cClxztRTWSYKUsQhXyj8xdub0ZmuHiFCakCpysQHDjMh2ICMkjdj4OjHBac2tmWebChfqvSMvsfaWHkoFM9HoQoqWtFhW1RRl6Ao4Tw8PD/P8rzpP0zTP89T85HZqPjWbKFfWmkMAFdCG6Eax1dEAmiXMiSATyeh5ueR5Tp/QWvMGJiOFTpLZaEEZJFMmIPhO+f6pBZt+93I7UMNyShYV/xZAq5CkTElTP6Ov8fRu/ekv57/+1+Xdf2F5tOxG2cPEN2/bwxueZp/ftnyLOAUWtjSKfiL9yodCMg2HVEy85PH6Ib9Yng3qlYLMyF0f+qY/v02c+1HrXfX7K2/pBrwcvUl1llfiJG5u1X2fd7BM/f/pZngd4/P666a99DsgO7gOjvb7/Yxt3HL1LV0IM1Oa4GIaIWvKFKTK+C/Ipa/diis0iwDyALrHmALHEIEOTwOUBpp5KfdEKDJ7OBpoIpunaEabjGn54fzUCDBqpjehtXZ68/AncZ7neXKHmvk8+dzMFdE7UiCLBsy8VLCXA81IAAEKloTRogs9++P5UpMcaWDTnOvKcEwGrDW9ASa42XAzMZ2sbO0fuuLzJMtMP7xyDqJSmaMjQpnoF1yelp/+/viX/3r823/Eh5/Yz67eLNc+cb1gffTLW75ZgI48hT1anmAG26oIyEYt6Y94n/7p5Le23DfZNPuuzUnu9jvqGRh5kV8bljmqxbFAOCjuvePe59YXJFS09DYuVcH5RZD9xe1vYbZXIWyOgtibfi9E/XDqtuHwwjXuc6w5nMriH0AWML4dDpBNUrAqNjU2AJkZwYhkcYVmCtQW3s7ypR/GrRbMmYSQHOnIw5w3DzkktTRxcncj3eBQ5Nqfqiiem7XW3k7zG5/+2PI0Ta01ZXdqNk6klAa5RCSrFLzRKkSTrJlGkicSIVCQC31dlt7X6BEBwAHL1PQGnhV6UNpCIiwxkRKsqpf8oAX+dLmudzchRpzC3iWgRIQiEEs8fnj8+1/e/+Wvy7u/a3m0uIxYi1gtI6P7qZ8UMhIR/ujq0djcNxV/NdY3/W47wd2vdtm/RznY7LiiMVf7HVelL+DrKfdXZ6pXlPugDdjDhjb03NqgxOQWAXKngvFMKb/Y8g3ar9ewr4VftC13zR4HcoKatZDGdABK0c3gJoQWmeBwNklcHbZmaMtPAoZ3VRy4CAg73M6xxKlAlDqlZja3hsy/VyClIHOauZu709Gg6IkeDyf/48Mf//j29PYPDw/z6Y/9r/M8m1n0ZKoByFCkC0zJlJJR5QWFMFhNB4WCIiKEFNlMESGodwP7NGVrIvBmZbaa+GCGTjakxOakU5kwIDAi0n4EzPyM7CbF/jBqc2CxKnYqKSkjclEGQrGs58enp8fH9Wmx3hs2Jz+ApTdRoPk0zxf3hvkJBsYCzYATqR9T768vN2zwRzDDbxTvbbG9z1buLyrxXSc+zyzfwtVj19T1HJVVPkiKN7IRM1uWpTbcvTY+nq1+NbFvgf6vtSw6QvzcHAIV8jPs9+eWE4DtZRllloBpnsUuKSMzRhAozBKrNboMxli7AG9s85xxSUWPQCaSmZkBZLZxPvvlIxOZKYWZTe1E23yzmQD0MClijR6xspslMr0FJujtw9t58jeTneb2ZvIT3HpOBPqagBMglN2gBllrzCQSikysudbgxI1DmxxVZ7WsneTJLc3jcn78SZP5PE25rPQsl0MAJhlpjeqrLGVudJlTShLI13NY/xnkFgCUNmvgJV/UJ8kOD4bUgNLqjNUoN2gNnC+59n7pHz58WD88PbjcLSJbA83d5oT1Hlh6W+PtCXF5SoKnEzIqJYY30+1x9n3BGfbzJ/xqcuIX7udeD7zS/4j3fs7+78tOvBoVPe7j/R4yE5t/+67/VqbsMDkDlG9t48SPFzEOJNvBoapoiW+Eud8BJqkRoo/DAGF7lHWMnCEyc2/ZGcFqh75V8HnNuP74t1/cvuvua8t2Rwtqr/Y9G2KEct5q+9KVSZCEm8mHhytlDkFshWaSQeveGvvUAYSpLOF9sKoc05ZvJQiGmwHfHyiDDOqePbUqI8MUDT4bZ7Q/vv1To2bjQ+NDs5NbY5HP+3bVI15HmWL5eLPoEsAUjXm1EWp5uN1KCHKasTCaVCov1s9P69Os01tNjTQYzIwRig4W4TFTBH13NwMG9Nfu9Q8BQKUqr64+XzFZsdCSTCmYgd6jX9anp6enp8vlsi699wyogxAefKZ52lRPQdokthAbwpmWK7IjF8Bg83Zw2/7+WF39OvKS/S573qeAmq8cLXMHu2ODX4rlauuVJAuH3fgOy1MKkuUP1EHK9bpXMHkRdr879Ndvz8KWtw7AVhtbeuV8RuPRujeae10/UhGBogmgBUCEwX0QAMWcc9fae+ZaDGEkBNaJjGMTpDKGB2XAWNiWR3RjxSgywTTDRHvTpj+c5ofW3rbmypk5GyaiUa0KcJSOZg6fbUqSFXX0YKtOiEbJHIBivbvqmuemxp5IiBJDXZfL+XF6b3z4kyKbOZ30ylZdM2DyBFkLBqFKfODW9vkhLwpv43HHI1cPSY5HAkrFqnXJZdG6KHujcZrc4c0MadMDJ2Ob6AY381lsgk0mV4c6+gprUCMmGCDTlnd2NV9/3KxvKJ+i2W8s7G+SjLAvM3FV01flXmEmI1qGRzynomlHib6j7Hu70+y77X+njvdVzwvtz4Zg3/nP7oc6+EuH7TzKW+3ng9vXbDfnR/h581FXO2UZmWlAwugyWbZ0mdQAm8RM2dIrOBI5nLQTNwZgmFSLAUkwMimMCkkDzmqttdTUHGYOzt7eTqc/nE4nN+vd0RvM1U1BKHsacgzquNoxKe0pXIJsFI0aS0IWxqdDFrnJUDUJw2EADGW8L0s7+/mJypi9zRMykKGQSEWHmxSQH4bwR5z7zwnz+oaPcry2KXVA4oC2kuhQIC7ql4acJxOmk9s8mVPz/GCTtXmimbz5NKFNMneEKxAdcUFvmGYpwYQc+H0FzPxG8vwVuKIxR1qV7f9rSOG3yjTb1eKdAb5LfTQf5IUFrQOgj1PfUfg7w/BOy++N39BsPx53M4l2c6kMed12fmFACJDmJGhkT02FOy29XkU6DZYOawAsE96Dzd2ncOXG8Gs0asTOA2ZKwMCsiPWSBODW0CT9a2VJgW52sja1NhsnAEaHQSuZiOxaqQSy9eEF2eZdcyTZwApdrb92RR7VsYVCajCQCuKGqqWR7tuqJ3I5XwzMacppsUakUc50ZReIyu86rI1+yM/IEdXlTUhuSoYY2l6gkhm6POb6iH45MdPlJqfcvTWz1twdzWnNpmZsglmuUIM6IuCBrIy1V4Q/8JlvLS+aO9dXJTewHt9CuR91Og6OhV0dkNcyTHukx26550GrXjXIwTS+wZcxTGng3gx/rqa3/p8X585K17iiL1dce6xKOV6fcizwGQB5b924gWwzSlVDF/ZB5k6ki0xSUGuacj5lBkiujL6uGUqQRhsjKtGFrBKlRfwmyIpIyAzA/6aJRSVPOoiUR0hxmqy8ugbKtRGS8EAgUtMRYE1IsqmCFMnBtfbSNea474XoFBGdeZ0pMtX702UB2zy1xdkcRhjFju7ZHOyMSUh6qfgfZuEnyFCpe3wRABNMisxkpkFQV4++LnM8cn2yuDQLCEQnjJDSFcpKmvCUFLmyi27oF8QJUz0ZIyF2PCb6Abj/mnKIk4FvKv0OFL36Jr8Vn/t+AK8Qve3bqsh5sNavKnUY+FCp+/p4rMZ3p9mfb38js70MUu19cN3G7TlcGR9wD4PejI4ZgGmazEwxhZWxHAIpSjTIMto8PyQNvrobVwBdfRRjBMGD72x4/22LnqRvnHZ/zBkAsljIiiNPJJxWflGwqnwzKQLzebAMYlC+1QztrPqu2y2qPpKKxXAwOh545wVJWbgfSUiZGWtfuZhjObvPrmbulFNmyq7uqOTW+ge/svb9kE+WikI3s6L+R/ZUsPfLcj6fH98sH7R8wPqIvlKpyAizaVohdrdMP6VpJmTZe29vzGBuUal7hQfeT7k/EhN+bbkBZG7kqMG+OSxzxM0x8BZegZiNRCwzK6ImlLstv0fI8DZV9UXM5LXZ5au0f/Gv7lR8Cl4vh1nh72gNQLYy5CWHJWRyB1yYhg7PQO89PFGFnQoiAb2qSxdeM4IkS/nTawrZYlCZ2gbWzdG1VjeZgYLJrYnZ+pYhXMVdpEpRIq044IZFrcFZORzmW3EPbowJwzGOIHzcXzAiel9s5bT2dV3RG2K19MzObGJAAmKfHTa854d8niTgB7MjYtW6rpdlvVyePnx4+vDh/PiYHZlQgIx1jtPJMfkEJdIkMiwn9p4PD00JBJDj4Srk7cd9+Y6lQI62gxJ38lq7GXlr1R473yEqADKXIjJ096lSH6XMbK2Yh9V7VM69u5vZ8KeCBhsVR2GsyC68xC//yqrwRWynzni0H/4+39g/5l5haQAVAKF60IGqao1tDgMwiqPSxgknKx9HkAjRCt/RrtYRvfewcydh3XoaUpbCCiq4ihU9Tp9m625/fTduBAig0Wg0NPby7hrpVvmJIoDHKTFo8yu5NB0ERTlAo1wTBGUU8/eTLxUig0gpCEBJwSLJyoNqJgjK7JLWmMd9BpJFiJPFazkodCKNfuJMMMMY79bl/dPlvce/tfwXC3lYe3BkwlY5sQSnBT5hanAHHgblN70ohYtxZ9zWwZFd2bwgUp/rg/2NYGKWsXLrQCKJe8TrZ3cU9bQGKDGVA2HL5Hqx9dy0ol/w/q8f/vt/6a///Z9/9cfH0/kps69UEjEZYN57mMc6rT776TRNp+bTxMbzO2sg5gc8PWJNvAm8hdsfXJFoCRdMtM4x7K/lJbxqM33m7P3afl5vP55PRYIBQIojT6XS5fZbUP/pqme2V23s4eBtut7Bm6nukLVpw+aRAI2E0r4dTZX+zg3TPYa010IZgG/jOQisKj7Zxup/XM4xZmbHQL5uhuodJr5/dPfWWrWv61r5NQW5lHm+Yy9mlup3E8Zr4MkXnN4v6X8EYT5lz594RLq7pNbaeKuVKQPhiUy3KT3S1VqLrlnblJA1p2B3YRReNCyr51MaUHUXASbl4wuSlkgrqMd2vGmsvEmHBriaROWklvmOCsSAtiQ1JvMubnG7gxoJSQCZiYg1ewTaiMdo3iaasWVz86QZuMVtS0BAji0SqS7zczXC70uqyMPu9sxE730dsr19AQrKTKwAe5BbcRDrNIMxCUvzOdaMqafYQVnyRwjTP4R8ISyz6/H947H9eefS4JLqGeu9Z+Y8z2Xt7vD6juTc/fy459dg958921/Y/yPb97C7kHy5/zEeefdD+VTTXmrUchIFMJhFm54SgZSQE8hu85vMjFwzs9KKJjOQRlS1mOGY3r0tpKQkvFprWQSOgEWANBMk0rLAFpJKbm5ZFnNCUVImRCG4R0wYfSoD34CCaoCbZ0Pac3UJoE2WGX1d16fz+fSEafJ51nTqyxk6oU2uUBIBAkiXT0QUBRaqFPBWjHycgW7tpt+fDL/zFnK+M78Un3ZGcF0vy7Kul3VdZTKXTw5BQcVKIQC3SrIDxa5kKHuaQRF9zXVdhZUec8FlNRVckd/8ofG/kVxfpe3zdfsV2dXXl9APvKjBcWsn7mZ7X3tB55m5ruuyLIXSunsRDOQm9cNpHp7ACoLcLdC7QxwWCq+e5/E6942PXNeL/bUZyMBNnHsZt6XN9zzVWg0Nnye388Q1pOcopR83Lp0ZKH8sI5UptskUbVPuGZir9t5JEaFOLasqY4BbWjh9hO/oGpq5h/Ak4UUNQKt8qJHByC1FCQ4GrCHDzFSZSqINaqhupVqRAMUqAIFKZ5JCUFVRverajc9Iij1/lUFCnlgvC356n6K3yeYTaPRQrNGZGcrmmtyl4jYz0TZYDEVmO5YXO67y+9TwdaNzGOqZAy5Ik5QhhbJnrqPAi/H00Gin1gxMpPp6QXQaKv/CDGz01sy9QDiJEXHpq3l45hZU/+J7NCo0v3yer716v8/b9s3kaHZ/oeWOgx587bYdO9RiMDN3HGZX69qYBq4BNAc37C53C4VPxFi+itl+c1zw7nGUxLxJ4hmXn5JdZ4U9Ov7u50kUTl4qOiT2iQ5k0GGNrSz3JCRD6DRbBCr1t+q4ikE22L6GqNQm22ITUdifkAaDCWXImyiDqxAXS0kmB7vMITElpG3VdRMNSpPBChHaSmjaVgiicrnKq7qV8xa0GZAUQdq6LBXLEX19evfTZV3gZm1+8y+TFMjMvoanIGuEObIn3WBg1OUZyxFyfep+z3k0ZSHcNDGpTHWgasSEFGA5vXw2wiInmIDUOiF7A+TuNNHMHOZuk7k7jDKKhExZJZYN5b7hnan+IyDyO5Hrkv3LMfc7nX78eFSLO9rOrZ5cWeW933CGlNJvrb34lJT2uMP0N2376um9aLa/puJf7Z8jku+qvsuYPqxOUGZ4EQPxep67UQ9c54S7ZPEKPKDbqHSTUgumUjSERFMRAQuaSKJ7DZ2IzjUzq3Tvhomj1g3YKl7WyrlikCgLqsEABdLFrFKvqOhGBkIJq1ogSFYHATRj5SeFVUAPvPYsCyggh0AHREPqWuS1NlwSZJLgRmsyBvC09lz78uHp/PBufjjRyNZQIVQikUAqOkxQAwlzEAaSChGA3fCz/R71S1b9g6ujZFTKzuxVU6Wij4A0L//Oal4LH5ooeHC8myxXh5u7mzubmxna7K1Zm9gmd6cZksjUs+DH1xj0Sl41Ab/SOPyzysGFOf7f/3z8J18Oy9xpwOd3bv+20HZtEeuZGRHrunKLd6z+m19Om7PuxnDejffPAty/ltl+p6lJlgH+KaA895ngoNMrevsaLA/QjWa0yJTlHIKhomTSEblZSaQ0k16RRCDZe88eSAqjhIIRqdyMWSYG4DLORRYUJKPt+p1iTaCARaiPWa1MfiVpSHeDTMoALEut11kFWfHptgHu2Ajr42BUWun3h7llZsLC4O6rwHV5evdOxPSnP70lzN6KTUTvJEV7o6yrMlgM/huY/S5V+XMZD9VQrLLCzJRVjolIZEb27CGJJlAxyqIgCboZQXI6zaxKx25s7lMbCSmttTZba22a2ObKn84I/ohs/1Xkc5X7L4VlPuvMuFVyqLPMzMvlsizLn/70pyPsvkM33q7Litqo31bo5LHlU07g03t+Sn/m1X7flTXvtPn27a79d9/pnc0+3kpjBasYCZo15Jp0IQ0wZodgiFZmLywmyuSYpuGsHLhYbPFQErGtmbXlikcFk8o2/Q6Iu37XxtsmqUcCwPCCwgUWjOMV904Vik8KdChrAGAGsOI0xspdlRgL2OZ9dQAGLrFkLLTWzIS4nM+Xdb309Q/KNk+nuUGCIqE1Y3o4bSi+b/mQw7TfZs3Ex572f3bRFt636XcqTWkAlJkBhTJSXX1VJIo+CAkzAqkKq/NpmkTCSG/WnM3dnW6gW5vdJ/rk7pVfnaDvwXnjsL/Jxf/zyxdY7ru0wrvLNL5LFHrxB3uc+4tKcNfjuy80Nk9pRFScjJk9PDzs8TM7UFMixRFq//jJfPyrz+p/GMQXrm7/FRNlFZsZdE/KdsSJjqtU2/dQ8Xy8dh/H4kbDSNppaq2dPzwSsDb8oSZZroIYc9n45mCjX/yMc9cqMUamkpGjinEqWy00CIPJ9hWIdYWLBMS0rLAfSWptlkYMO1IdMoHQU64mgGlgIa9ZWM64v0qDWWtWuQmKdSltzhywe8VUXM6PKDac7BVG74YMxeNjvH2T64JlAZOcvNnUWu+r+wQjckHlTbVyqQ7H9XjqFXgecfwrys8+h3fPlQ6M1p/y8585br0sKWUgujLAbpIxllhzuah3b3zzMF8eP4BpXiWV6q2nu4t0d2uNXpwQDTTQrM3WJvNGbz2lp6W9PbXTG+F2UhkXmbp7JW4v/9Xz/+T+35twC/I9tu3t17y/4ZT62H52zXHc3xXe3D7jdshx/1ztv/7alL+77AB3Ufti4/7dVfZGBTye7MJtJBVBFbcQyTstj58zrl/79he2cwvOkEbIyGjXTSTM8VE/Aji4Jb0+ulULt9nup8EAUUqfT7IYP2QCJNw7MolwddG8Am4mTDVWsWasPTIsaWYF5SsBGEHRBITQaKkkLJAOU2H2o5IqEwW4G6CKy5SJciGTVQ08rRiBIVbVVqbBzGUGN5iJJmTU1SUzM4rVsiqK1NEMbtAookqsSiyLzpd8c3Z/oJNhcRHnmRr2OjJAIh0GKkT/3JXZP6PUeNYgZKu7hkBfEQt6twxkuKqoi0wwVWht3XHsphXdWNa6G83p5u5yEx3mWfSlRtCxBYPdSJH+/8pX/0Nel68Z5/68mwavgK6W7zbP7Eof11lOu3K/i5nZlxQfgd0/jph/rfZdv18bN1/pDs2Pb/bf5sjFv9fv1S8TpMxMW11Ec8GmB+bag0qm9Qao0cNh8sxk8+yGKmthipbqsbBn9kyl0iRj5SpZzUEFyCTVEz4KSTEo09iWEEXtq3IUsNbfBm10aBmgyxL1lo85qkJvQDMTDGSSKOf5ILwEEpFK5SgGMqZzuEFO64Ay4nK+vHtnk71x9+ayNXNle5tIGzNfBwBryA6UV9Ux1gS/a9my0wDBJIOyd60L+4q1WwZH5EwOgyOZxRkt7cSr1DUIiaYqmKo2020wFpGsh2rAfiNLs47+2139D3lB+AXRMi/aSrudvv/FrdI/wjX1sfe+K+6r7VCW7iHI/fiTu53zUID7Th3fzSVft33o9xxk6jjY79ocwrzhecfGKTOKNx3Fi5qlUAZg4D0UWzNgxLEZjAGbFECvItUEspmTIufu0Rc45GGhVJRaDQhmlchs25qZZUNbRVYUEi+morRwIerDKUqzke6kkOqdrqpMhbMDLFrJYTcmpACB7D7MfBkrw5qFB9PLE7CFvcuMI1JzXZbz+3dw+NTa7N7o4No73MVVIlEktL1uiWBEXFewJL4U3PjHlXKGo1J6lcxgBqKzd/Q1LxetCyMYnT0ZqQAC6gFuLnMzAxXR6wmgk44chWXMGmFQzdtWdj2eaQBseRufff6/94XXV5atxI3w7eLctYm7R8Su3I/f4oD178Hv9dwcM5ieq3U8eya+ndk+osVviYWHGr6NdKy9HAm1iRd43k0veaJSKA8loNKEBrlhckPLjspTYRgviY24yZBqlGyiItia5WkmGaHsVUhVk00Y8XFyMZRGcjBZsDJOE1nhjzlU8zW2MimXJdOsUVGkNLIcen8Lc6KVrhBGWZbgxuy+9Rmwm4aPQZRxzIcGwwzP7Ov56eyYTvM8t7kZpiljZQV0tl7osrKRNYtuPo4rQvl7dK06DBIkRjISPZAdfc1l6eeneHpcL+d8uqzLOdceS6yXHhFW034FWpkxE81FEHIgjYKDVphdzetmZmz8QQH53Uu9WF8tzr1a7lyyksztRQSGByfqvjCUZHajap+f8f3K4JX4dHyaGf6z7dKWEKQRKlMaaixEDvEzW/+bUFHb04iO3AOH7WLvSxlLW21UvRJgDjNOrRnDQj1sdU7lpOlO5jgfJnyavEayN8vKF1+Q6Kr5Y9C1C1BW1LPYmT4CNTPL8wmWtq8qTGIlraaJ5F4WJKvAXwzE38cSviZj5QiQzzSIgko1VyUWaa2hrr+b881hRqVcin5Znt791GZvkxnfJta6NwhsCUydcLkBgW9Ga/qPIr5hgkwpMvuKvljvsVz60+PT+3eX93+Pp/exPMZyiUH/0RVZFRQAjDCH5pZuMsgT0UzZmYaMEM2bYA5vbD6cQy+zzn629f6a5f4a2PtDPi4bfvALYJlnGvAGltEz2XNQfaz1bLfZ9/h3bUrw7qs7KOYj21/dbH+tnZ92PmNkXnp6D/b7WB8kBjFW6fkxCM0JMkCbgBQ1z4ggqTBZWDBIJmX2oB6rh6/evY+pxcCVKJUrBnND2K+xOwW4F6ZiwxVviYqGB0dOVsgH7C6JIxCeJC99bQZ3wukDp6LRkFfEjMNRAZD0ERnJAnllyUEVPrVRYuL89Ii/Gdz+KOWfHhhd1lAE5ZaQUh3ZUB74o/H++1vjW8VYFfdypnrk2hHLcj4/vXv//m9/f/z7f+X5MZenvl5iXYFaSWP2sXDyZuu6TtPEiVzdW+PcWq6JSHXxj2amTK/X9pqPcpAfgPv3JLt+/7aGz1G/l7ba61zjNnJGm+vVNTR79dkp3e92+xos87M9v27782+Pwex3v+LLwVCFmXIsCQ40NQTMGhgwGdKym6JNvk2uUUB/ZJLZmqU50EfU+jQwk76OdFYOdV5egUE/o83vW6MdkI/twd7eJd+tdmXVcsXGnwMgtHTD3AxwAraV1yCHit8VwZi8vYiPh3J3eEVMr7FO7jSsoVjWx8cP7WFyd/zxf0jSSIINqEEvRGoAX2A1/nOIAdeAtMxEZPZYzk+X8/npw4fHd+/X8wet51igAD3UQQJNAxSLaBG9d1uJyb01S2+aRUREmwJ2sN5I2H2t4B/yvcmAZTZSY7AyG0EOcqbiMd9W0NuG6dh2sABJCRHDoyhVALQtPUCbH9601kpl996jjM8tOBIF5zUnabvRSiNMWcpK7n7z+jK3aM5d+/Pw9+ZKj+17OMu+zqiPry0DtQNNZZTuSnz7WSWGlmUPQM0pDGaugcxkAhEJGCruoOBmCkCM+qIiSXCUyRLNXfSUlFMo5W/08Gc84NH/PVZbFleHApbCA9izn9dkEnBzb9AUU7fsbZ19XdfL5bL2DqnCHAxoZhpQfA7tmZI0tzYuZrBRqqNK+8EPlVT3okt/an8EMhJYFEZvzYm0JEV0WRbDCbasSEokVBT9G2YF4OEPs6QpcUKTOQL4z5/4X+/e/Gm2+QF5Qp/RHmBA5Bor2kk+obF5qydmrBErVepZ/MYdz3iBUXh9RvhasMBzK+TYfjzKsGZGgXKNyV4VKVTpAhtFmrAHCC3GCio2SzYpe64LdImIx3eP7/5y7u/a+v5heZSnvTk9vLs8NYOZla+7mMLSIaZN1k6WjXkRTjF18cFgwhv5mxn+kD7DnNaMJ4VcrUwZwcJycYg5xSv1FQ6A7VEy+7O2fb1+pCnfX8OPDvfzfdWDuq21920SrquPhltO1na/eNAPx0Me1EXtbSvnfk3jqvKbO0hbK92t/+GxHAkit43Y8gfH7vLeuN2WqluWTT0c3JLY97wiHC33LzNaP9JtR29K9iSmstCPnDPYrHhuCnQ36mtN/yJI8omn8VWu7gv2c4Sq7nt+yv41kH3pZifz/BAe5JKW2SMjsCYAK6I/pSFASzNZNwM5jznGkZ3IyCQUvYuGqtYNgPJEApGVUMokWfWukQammQtytFEAr6jNyN5XAKzAdUFCWBjlXjNc5UNe/co1r9fijDs/MVA8lOS9I+TDu/dv/0D3GRT6qnUB3JwRCSYzMo3bOg97zMYnAQX/JPU/C5kZ5WszIwJrP2/Sz085mNyJVNeSMg8CCcoJ67AGM5gsQAUsKNMckZkE2o6Rbi7wUR33fvr7ZxjMfyYZyv0T4eP9xdthHRzewxcVH7co9aKUKdIrM5vn+dhnF7vT9bzZ5Wvn9vz0vrj9NfnE/VxNMxBVkKjsyPpJxbwzi0vr2X52MoPMvILyIz6UBNAe3ra1u00x9VyjL2toiWQzwCyYcFckO+kId/OJjebwVdmj9x59UWePKsdqGHGbQbJySrOUbeE0tdoY5T3I6xJu2FJ9PRMwg8S09BSYRG/NaeEELSt8tH406m7TuNGAcrO76zm588G8/9vfHf52OmFqsJB1s+42J0LhsmR0oe2qhi+pdWpb5+2jjee5Zf+QQmp30EsVIrUi1uzreinlftalR0+s6EvkBDI7h1W8At5gAZtgSggWmGAtZknJq2Z3d/okuq7P/EbjzlE1+ysO5bZ8uX4e1/uZpvt4xQ7r7d+PX6bdGZi7hnq9/WVrFC9ZqUfbXNujd416PHS7BrZDdz/E1UVwr9n3J+BO7T5f/F7bPxoX/1z00f7P24Uomxf1zNuV/L04tgbtzN5tp/sCpDiEqUbVvamly6BgJOAO0M3MPE3GtsqI1VJJEp1VKNsEc7nMaG40d58jwlf2Bd16REC1iqrjb8b0eA8yCcCZgy9ByboAbJU6JBvrkFKsFMUOWQVDopsBlk6rlWrNUZNvk3eltw9EkJGodHgb/gaWJdo/nNfpKaYHf2OY35qEkLCwTeSKMBHGEVd5//gd4IsX7uxrX/xjSQoSFUSGwiQqMrNKz6uvl8uCqIK4SCGzjPQKdb3mBkiIBAIiTKKbTa21Zub0Rp/YHDaKRMbnp7a/CnsWLHGrDT7S/4d8XI4jeQPL4CUF/bz9Tt/dmfB3Ch2b7itprVUx1ee386jK7yz2O4Mdz6bf78RsP5xhDGdAoXy0zURMyUrpAxgQB65ZrKXftVkuI0HoaLqnjbT/NtlAwGHN87KSdGP2UCSDIOhpMlGNTW4W4c3cra3OzIzItUcmUjJrZqRFBo+TOkfCUc8kWdw1iSJ/h4TmDYAMw3YHK2M1ciFHiQ25GVQDYLZnqFbSk2qi3+GacZVbYvMs5nk5//R+XjH9ecKcyI5QsxYMcGFOKMgA5SP+x7bEP1cyVkCWAaUpU6FIRLihGcAsle21KsuB/Y4qXAZzWIM56JtbljBr7u4+WXOfatthI3cpeeO/oFLEVvt1B7E/T/bX51Nexh/yidJ2E/hOe77WfuNSOMhrmAyu6pjTNEkq/f7i2XAzJI47V+Z241/Q8rs5/0lmO4BX4uJfe6qeq++f2X8OS1gIgEyHscCZSusZy8RRkW6Y8ECli1JEMeWSvHlVivEFUI5AxGL5cJL01mO1c/bItbOt6hEBmhBwEUEEkbS07JM7c3Izy7Wvy9Iva0TsQT6Z2u8CACPrnWdsRFPjrBgKAKcqs6UAOWzouo4igBuZL7RxXcMaKClVfndPj7ee5IM1W3t+eFxC7pNNM2BUoDWXbwkUCUwozvlrio3t3/0zSwSYyJ59zb725RKXcyzn9XKWwp3uQGJysGZTwgzuaIYiGJhmc2dmEpLDJrR58nmy5mbN2uxtLnpIPLPDbpj0P8oM+bmWe20eOn6h0h/a7HCwDb77sv1973Jvud+9XR/fvlP6eqaLj4e561YEvx9Ro8AAnZ99M3jhP+Xcjj/7jcz2rcRHwS/pBV1IVYii8vuNG9Ky/bwSoiqXfxQ9xcZsmxv6JPPrLOuAm7nBwybLtUdfYvXeO6OrUWGTwwJcqUhGuCMMcjez7PM0TWtb+7JERHZFhdvrGOkIsk5aAF3XN1hyCpF9aGokCTlnI2mwRqM7reqfUyaTEqqwGZEDjnP3wnXu7nuNyVykN0tf49Hgb3yCDPOE9YJozIwWVJPE5hDdd1/OVitqfPR/ynU+kcpQhmLN5dLPT+en9/H4uJyfCD2cZv1x0RqOxmRmakozOs3MaGq01ozNI9ZQirKTzw8P8+lNm2drrbVKXCpsnRvnEG+N902z7yvRX35d5IvK/XNv4u9NuR/lBjp7Tdl9rhLcpVT5kQ+yNH4R/774kzvMp9qe93kNlvnEM//EK/qi/YzC15IG/FL1joCyxCtntSh+AVj5MK8eqqqDOryoGjvcKNElTq3QUxmKebduItm8rVggt9atdxqRnc0MvYu9d9GKicRpOdmsluHu7ovZ5XLJXPqaBd2XriVZkRjDulKZ7twZjEX0AeaKCfMkPeQGmc1k0pwczOHUxjyMip+hObxsS3f0BYD2XDYr923ztdNNqd7XC95P89xOJ0wNfYGlEEBUOUAhzazIeg536FrEmV9If/Jdi6GnQtEz1liXvpzXy1M/f1iXM9EfHmbLN+oymCKjq70hSadVTLDT3GnNe/dQJGGzn948TA+nNp98mmkNg4FOUi3QCGPmNlvex/Z9HXlduX/ePXxNuf8e5FW/yB1D775hW0DlbrOX+IHt76jvRk2427X23WJ8F0mKEU5zDaWodf7Be35nPr94/p+luz8ir+3/5/pvwSeD4DqYmwkOM/K6X1VavQ/IGMnaLuB+VEbN7XKuXA4MgL34v8jBg2/zqU2eq2tlGj0ci8xpztYNoexhLRXQ2rObW1TlnTZPy7LE2t+/fx+ZhYA3s8HfL5i1zOwRyIP3WAJLw0KRTEmDD25urdgTWIV76Ea5Tc13I31MGaCQgTZh8IyNMSmZ2wTJGlx4uix//6///LNxOs1Fku+RJNlaRetnGNZ1PKv1DwFhODQQOoZOw1RkmN+T7FTY+wMysruH8+YevyKE6LFetJy1LojVMhB9fXq0iDcnn+1trqnIwvpWnAE0o5mbWTMnASMMzWeaYXKbHDYWVgXW9cQggx7hDx55vUegipPStsf9uezG3H7m4/yt7Rd1fGcz7ybpfUevvtfH3e6j51UyajsCDjb7Zm3uZc4Ow34LOR+3D8AU7zv+Asncq6cJh2ntOYxx2NTz9jz4nH5ReaM7lX3scDxpHWRvvyP13fdwTWLa2CJp4khtFfeQ249EkX8NE/6L2zcu1ErPGeExIob9vgVH7s/UdiuKC6zqZ1SQDXZ8Rtf93y97K1zSBLoBRqXLaE3NOE+Z6Y+LemdFvDBAL41XdyBNNLk5jI2C8xQPsfZ1XSOiZxqKV5gZoS1HAappODPT54qnhHlx0RNm4yQ18Pn6I9p1JWBj4QICtCr8CjgsIQF2fXAjikZholaiR/YPH6a//hf+/Ge0CfZgTKgrRTZDxY3Ui7cNFMln46b7+s6/pXyuAXH8oSkN2ZBR5Wud3UzNWhF+Fp+075py5JcYaWaN11DUJGgGN2tubfI22zTbNPt8smmy5nCTuSqxDYPLlEdr/aO5BUdlvTNQkcRtvT5J0jW38bOGYvv5vS8Qv6nl/uLN/eI7/lnyM3HuPyt38/DzIT5SCzwf+rtlATk88XfKHaPQNkrLm5m3MSv4M6r3u/P/lds1rE4vl2gqAIcgCojyr9YIjYmgqtrZgJi518XbZoVjfGQZ0QDAm/zvJAgD0sxBmmJy86JzoGNd5abV1CN7wo2RcmPPWEGXGtmC3qzD8XZZFpyhZdy1KqnqzApuqTckMyMjIjyt0cxRQfNuFepYBZkqE7VydSuK3zTU+ja1kbDNft8GdOfuAYCo5wFuPikz+tPju0uu/9oa3jygOWTohATJfFIGOBiOx4FUEX8YY/T6w/wry/Fd+Ky3/fgqUVWgPKgAwpUTIcPkSEFUWuZh3+3BNvqmYZ2XNHeY0c2m2eepneY2TW2avDVvjT6ZT3E16Yb3fZu7c/Opvjq6OxJwfa9rb7Cj0qidayQ9vGzAfWQwn2uY35AmoU6Gty043Ou7b7+6vBrn/tqY7h3uGnf45fjkHRXf8wO9uNseY12fuVXtYG4fYWaZ6e5VG4Kkse271bMlxQvtt5j+z/bX5/bHruJrPcgR/ZKqEBqmg9hCaKAKjNmWs0UClkBhMuVfLf1eB3iGG4/XqYYr612Dgz7A7VNa86k1W7siYu1xWbKHecJDJrbI1eEQ0xsaGyfS5I3rEhERvWcI7mKZ4oPpnZJJmYNBwaxW9yy+7xoJaJ+6RUJmO2sQmGRj2ey8pQPfHAyg4L6p+2zOBNdY1w99fXo/GTA1NK/aVQRglpmkqqIVzCvXoA53SGf/7c32u/ccz96Ij/xwm/sEIGNV79nXjFWxKkLZmWkUqY4qmgr6GF/blpajysbgypB5G6Wx29DprTWbGjkyCbJWYqS2hOBKTat5lam00vUvy9GAO+qWghmP+g7YVcf1Mn9WCWqzObbfXgXX39/AMt9UXlxAvL7x1Qz549j+fJz7c3mu43DA1u/0+4614QDC3E0PNwc9AHBDrlM9jjmNx6fkOzHbgWuMIxDFpZUAS7NUgaOyR7K8phwxjiJz588IYzmrhtsUKP1ODG6fq1wL/pHDwL8qDgAsfWrN22lG5HpZVrIvKz1VSniVIUAkaJ0tzWw2g88+L3G5XC6XTMVems0QHFANaZZKChQTrEpsUBUGCV1JNYgq3FR1n4iBL1GoYoGs+75V277CMsREZCIKSGKjz9A5Yn16VLN5njBNECinGcJggxZrjIu/4FX6tvbSZ8oLxuZHO+PwllUbmFQgApHKjuyIzlTE2mOJjEpDIDwgttKVOyYGSjI3A83YfK+ODXfSAnKxYqiUI7GVfIUE73UldYx+Pl5y7xthzgGb3a70Myx3boEbR8OLW3r8Nh/auOpfRe4WEy9O5J9+679AXo1z//iYHk/uOGfi2cN6xNpenLrvZF+1XX2qJpLTNHELniNpfu+Y/SSzHcBn9v/s/Rcr+3j6o/CZlMp+B6ktv4mDAbJ2BVWNI9tCSDH8q0jb8psEwLaVrwbabihg3idU1E3RTG5PcsoxElDJjFa86k5Eyg0mUgN5p8HUwsxgNk/TtE4rW70hy7oEmDl4idKxRz27CFWJTiGZWexoYkBNpoSYkpWWz/H+G03FDTw0e5EgCcMRuj9BIswBZXaSzdvJGB09Fi1nX07uDacHGpFChiLhJgX3SeImPi9vzPaXScl/DXnRsvv4q340ifb+bgAUkrJnrNmXWNceS+/Lul5iWdaU20QW0bOS8CoiQNjg9qkiYZQNLJ5mcKeN9GGRpMssN1M6oJ3fbOPEigJUXjv5fcV2Z/wdf3HU70c1wo9iCS+Oz1Wz82itX7d/FdD7Hni50437t19Rxd9b7nea/WfV+vMO2qJlnp/o3U16bnTf/cTNuYU/F7xeenyapn0mJ7nRE96c0m9uth+vZeOWCeCacboHR+5BBfuljz3s0TVs191u+asAdhymwiu1Gfzb61Ka3eqXJNOapFS4QJo1b29ObXItXb6YI3gORxqCQqZ3Y9YKKSqFvdHmeX58fOxrAmv0BBAI0EY4vJiscEhViVdlS2WCwXQbhQlVVHnkZt9VDe4K6CygZoQsbCMyRghmaGbdEmHuBrhR0XNd1ssZbfJpgiYgEVTLPbAUEpR3LruryPK3rir0ojX3ovA6brf2e8Uw9aUva5zPl/P5fP7QH5/W5bwsS1+WrlRTs5OghCIymQ6aNRhcDgTMd5MKfi2LZmbWZmvNmldyWh+P2KazhgWfKJ/5R6/0Trlj6I2bpdWd5f6JZjsOE4M2TGaLvPjEHXxl+YgavOsg6RtZGJ8U5/4pcpd0eqcEjxf24tywb28hWLbTFe2+05u9bQasvxIv//Er+tbt46I+wsPzYv9PiOg/ZK3eGKG99y3hyVRV8Uq9u6lHiqmYaGbmk8Ed5jJAwZxIxqiaPLVmEdHVmTSzaXLy1FpIWqwTANaMDRQv67uW7BKoTCpd0sbVXKX1lBoR/SFhUD+OGlbcjPWNyXT7O5YmAlnJlNS+OgygZWbv3SJcm8sUGnckD86JXwdk/VL5XKvtzsaPdVnXdV3X5XLp5/PlfL48nZenD4zItfceAdASSiMk9r6QHmTLdJ8KG6TgPhWcvsOeteHThNbKkL+uUPURdP1lOa7s90aSrQ378nhd9fGzMPfdbN+1PDl4zHGYFa/bv+KS7SO3+Cva7M+lvbb31+PcIR0+XoP6hhzXVgCUfby/BzD9ef99o9yMV0hmY5bab8oWQ+kv/vz5Ue7aD4ArD3+fb+wfD/G8B3n9rtRkA5IFt1fNJQMr+H0bum3lYXOd1Gje9prZywAfntXiWK000P26ZBWDUiWm95NOSlhr1cweLniRdG3xDeaAz5zf+Ns/a1n75RLny+VyiQjiv6qmD5MWzG62IlfM89zWNl3a5bz2JZZl7Zel9+7TbG7ujlQKZ0npS/A0P3TGylhMzdEmTG5meLjUyoy0oAEWwzE3FUDj19FPAcy5RibQnGnI3kJ/aG3pl4hQT6LB38LLosyprUrSGwKgYy9IW8uLuqEjoiltm3I+5/6+LK/O8c+at1VKOSG2O57D7qUyR+7ycGAWxkc3ZDBWqCu79bX3FbGqnz3PFo9c/tbf/9fy17/194849/NjzyA0iVqWwPLBJrDp7fon88FA4G2VLZUunW2hz0Q3pNzQqGaYpqfS7Ur2aDTCBcpIeI5orwQmQ7q2R+8wIPuw5KF4A3D1DPX+eHgpRkCTICGJimoF5eTYzo2ybNv/tZyDmRdz0VjLJpRI37TTDQRnHJl6t5MN8EqY/quyRcftSwSDHOWdYC1oOpngeOqmJbYfaqt8iX31Mx6Fg7RDweUtV30gsdtJ3+grHXJEvnIlpueP+E1M689pdmwwxW4+fK0T+9xFybcz7Y/2xQvt4LGW07aHsf3cjfXliy0ShJmV6USySpnP+dB770uEenVQI+FVpY9wwlcPM7uQ7N7XEerKFJHuniOOpmCYCgqqIKBhyKPusjYqg4HdPluckmAVhEpiq3UwzASaWQiZua7r1C9YH9C8vBhWHgyJGbCPPeH22QbobyViSltIowkiHeyAMpdluTw9nc/n6JK4rrE+LesSDA+oU0wV5u5AVGxpzZgJA+DDUrbcSOElpHhIUNnPw0bwDaEtePWlAXz+su964HpJty/Cpua0me1XzfC5NL/fv3xTg32Xr6ncX7TNj+y+L3Z4ZrlfFwpfRb9LLyDjX9D/F7br1qK5QdwA5gAphFHK9Hafr/C/Xw+Uh0DxGzlGGN73IEHaZLN7ay1OgUjjeVlW2Dkul4psaSY4zbw1Rctp6n2JZfLW2rquH94vkjIDkUSV5iAr9LNCJ8nMbQ0TFbFJgAWycJ+6tuiIfaU2kJpKmKr5jg4G3FxMWEghret6Pp8ffMYbh09USryZOVJ1IYe6tfie4t3v5OUJR7crSCmFkMTI9bI8fXi6PF7yvPZLvzwtT++DHVJEmf8NFBogobP7CI80Ami0wsUig4FMj0TkXrFPPdKsHrbtxhHgy+vZ11cwN+DYAYS5UXODRFrayP1H4z+Xbr979wF8o6fxqyn31/GQj+n051+NhdUz1P5ztfzRLvgU/f5af30uX/wr/Z+r+O35HsTu5Rkd7aOE1h4Tee0mHY6LMHlyP+5gsLEtt/VFuQPRQGz88BVR/ueHy4I2sZ36ssba1aVQc1eHe8/W3FerAiCLhZQ9liVSIakSGCMiCFiWmi0yTAWTysHxSbhMO45U2PrdiXE/v+tN8wkWkBoVmT2R0Zfzxadlmle0VgY7ybFARm65wamXwtt/HRvq0yXrxh7G4lr8dsQbjUHOTEXG2tmByFhiebpcHi+Xx355AjdydhmQMCIIS3Rb0yAVTEcWa5Gx9240RkSE9VBk/ctM5qiqiLoTz0swvSSvvrDSXo5NUgy+IrJieIZht1Xak7244v/HlfG87YDMQWkAeEnLf/mFfytY5k4146DOPq61X9zDF8t3YrbvjXeHO0wJAfge+cKrGho5q9tOkoMmfuN/3/Q7sIVAAqUdtoJzwHObvfawBzhog3tInN7S5lM72bzEsi7ny/J0yWV1GiwCjRZuTaZkWiPwsK4rGAukHqlY1yUDjgd4qRAvx0AKpKVWMyubfUDrI94fgIZOJ3etXrpn4PIHZxhNUyrXvADZ11xXRAcEpUgomAZLKJC+x8Ucjffvm0Xs/uRKubMKoWSqR7FE+LIy0tIYuTwtj+8fLxcgECuAAp1hAgxmTCgoOAJhVJJJoREge5Ahj/TITPXBNpGZLSVd6UA1Qp6usqNbPMxK92vTg5Wjg34PVYWW5ltwc2n2qxFwOM5XHeHfSHgTjF/20Pbdkcvh8Cp/qXx9yx23ClrKF83ebz0VP1evHz/ua/31uXzxr/S/m+H27UP74H+XUUimjSpOrGzLHZS88r/v+p1pKubFq8rajnL7cNy+H9otsutZ+QS6TfNpfsi1c3qk+3K+WCiMRLdQmow+2xRhJG0ReCIVC7JHxNrXnNwgN1kbUPuwwBIC5Lv/h9xsdGypTHUe47S2lMrEHudZX5k50WSjPkWsiEB20zTGTCrNjgoFkpGpkRlwfYtes9x/c1NRV02530rVUFbacF/Wvl7iw9NyOcdlWS99vfR+yVyQgQxQSBGiIDnlIIweqDqPkbCNHcZoQFhw8MqFsgCZuEVOdgWdxkG/eQ1K2lcYuNnAS+/C9VvVAmLXGAdTXXulbPyTaPZnw1KP4q2Kv/Gt/ZKn8Ctb7ngBlvnYx+ftx4v/hSuy79Nsv5shsG+M9mB6EqySlMaR4rqtT/O622t+E5ib/f4qse2LL4c2qH5TsUWhaAThzXw6uZlP02ldHj9wtSBjEQ3Nqck9s7nbZO5szfoS/bIs50su6r3T1dgyTQMdwA3awMqaAesabdPsu1rH9osKAFAFlAz8HhBo7uYViNFXrAv6BM0VVAT6CJjYqSCGfLdo+3MZixVDKgNICpkdkRHruq59Wfr7D5enx8ef3p/fPcYSSCoUHayIJxAijeyZbsAW2F9kMKnMESKZABnh4T0KjZFUJdIHajJMCo0UbL95om7s+ldU/LUzaYMogvRj5OUhmx27AfBPotbv5GOa/SvJ11fuJT+rxF9r30OmvoqB/4lq/Wf7f612vLCAGPzv9UVpbRt50qK2PNaN/53yY32P+giUfrddv2Oz9z7+Zmx4CPYiauuSJCthlXMzt9mapqX3LoIZkCNIB5MGB0SnGW1q/dJXJ4COS2R6siunKs+xMcLth06gcVt5s9x7R/BxDBYUGNisQG1lUVkxjkY5FICio6/oK5AQgV2tJ+TIgPvg6vnogPz2wsE4NLzLt/7wVCCljOyhWLNHP18u7x8ff3p/eXzKngarMFoDlBUfmjSlm/VMgPPtEEgDk9sA/RJukTPV65hxx1TixqLctw3oB7V+VPGVB3N8KYbL1H3TAIdcvOSWNvPPqdnLSMPPa/ZfZIi8qtx3nXu3oVdqVN7Fse7bry17X2v/eOjkp+/ni/vfmR6fSw37s/s/IjOH/lWUzrfPYckkwGSV39v5mtGhYdeIAIKDAEA44jNj4OxTx2ejiZdPqkklQYSBnBon+7NZXy/nx6fl8bEv5+zjTD3dnTKVU7byzvLhYX06G+heLLKNW7kl0mlWpDv1+hdrJKRbTTYGazxXtcioKF8RQmavCkFudiLNHUxERyygY3AKajPegeJGNOK2IAG/Evfv68/5fcuhltVQZuP39eCxPpV32iAJUT0BUOrr2tcVkX1Zz4/vcVnXZVnPl/OH8/p07mfEGZGIBcYwc5KZwY50tMS6ogHFmZ0JFEMzUY7TodnJ1trpdJofHjBNrfRvha8W7TZ3oiRsU7JsUJ/e45DY0lZuBmF7r+PKqD/6E77dkxvy/bsfvjbsv3C5/7Nyvbo68ispzrXWuTs7STut69aWO9h1eDK+XKcfEYJvZbn/o8ivb9rftez3emjnqtQBlP1eeqyYc7eVTv1NARzYdVJ2sN9H/Mxm2t+ezyvXmredNHZqyTSCaThNzfFg9GbLUzufH6s435vmJCY7eVObM+a1T1Mu3cwsY2SwboSRJBH7EoUkxY1d7GaRf3tyGyFN/SwHcw45Ri8hsCz31ZBv4YYM2Iok2EGDiAw4ACcOcTnfux2fe35/peVIkqK418a97xFrvyxPlzHjQgfPBAmYVFU3OEbiJoti29goZQbtB9zqft1pz2MSxnXwvmKNq2sS0Qua/R9L7obuJZPuRZv9q8GGv1/l/psg8s+3K36Z6YWQ7FzBmyXuVVd1j47c53myCvh5UUru+v0YH/n8uu4ypPDKo3RTIlMGJmiYT621Ns8+T2iup6dcFkS3Zkx6EqGYp2kKrb21ph7oq0l0g8OcJNUH9UeB7CQqdML3TMLnmn2Y7Tt/5i2NVAqZgrAaIPQV5DBNFUgDu0D6xCwUGRwK7rvU7MWrcbhJJlCQoiYzq9h/yiinZRnzl+VyAfvmsDDQhmaHgyY6zCHf4DersjgwFjObWXOb2jRNPk8+tSp3nPXAlDm/zYlF0vwRDXR3gz7VeLqhbPyH1+y7HNcxt6jANRoSx5X515PfnXI/rhY/Rb+/1r++fd7+Kf33r/ZVcFFFFq3Y4I8cHUq/g6OyNpgjhGBX5aXfgWJ2LaUVtkdMHsyr8f9Bvx8fpcFPuadRbz8sCF8hM8AbjLMR5jbP87rq8QMiIwIdGG7fYoul+oq1IcMgY1UahNfJH70sVw/qkR+tmC54HdirfsfISgeRskIFIrFckNFjaSTckTWMNVkKiq3cnutZMN93J0UJfWxgpQyI5F7EzMBT841VGyj166CPVQ6NsDSHN6DBG+AoVub9n7nRje6tNZ+aT81ao5mMA+5LIbVDQ8/lGDDz3NjHp+j3W26A475v+gDf6ZT8THRYH97a7HHXOIrPA7ev41fQ8r875V7ym5jt+1e4fdzHbebQ7yLISuupe74zP9avBo7DQfbOQxmKvImP5FD/22GeGcX7N0eNep0MbiCdhFB1umloU3swTvOcGc5c+7L0vnSsYXQx6S6JTrSwDFMqe4Yi4+TbCkPCqLk3klRFYB8uu1dtpd9BFtprmxlZBQRNQO/INZbFQU4N5sV0IjPIC/ypqYCsqPuXR+N7kSvBf2JQ5BDb8n1AUsx5nk8P0+k0XeZz3xf3xR5DmlEOc3BCm+ATrD4aUEUPne5u7jY1NofbKFlse4bv1StwfIT2jIFd++7DeTRodl/dq/IxMpfnmv0fSYbpdtg+fjU2XlbiX8d+/90p9+dq97m2/ZT+wMvm+cf738EyGOA5ZJRU+h3Dot/43zXym3a4mVv2gGmzrcqwvY2PHPvXgZnjVr+/AsjktfNBWmv1eBZCDndzp3Tiv8S6+nnp0xJLogd6sGfGSnNzQLCAYmS6WF79mZmCVbAdqhzEGLFhmXMzWIcZenuntr9lyWYipa7lfDGzqQcsQMi8csAKaVcRKvN6O1686b+h3NwibnRi+0dAGLlFEaEeNLVmp4f5dHIqqvaSNRA0Fl02OMEbfEKbQNJ9/DPTAGRa853AfYs0GtPm/lTvZ/FShurzl+dLXZovYe7/gJp9lzudfvNxe9d0dbTiB+b+i+S3MttfBN83Fb9zywRzj37ZddlgeafKBn22n8Lcn+VPbQd+1WbHrdmexPQ8GkoGWHJHX7G7Q42E0Vt7M01xmXPNuCz9vCTWNk8MQwdCZNKM5pa0KGWRmTmKf5fi3iaTF2gUhxbmbr9vVuX4GsOqDSjXfpnWaXroyAlmVKASX5WCmSAGcWAW/O7U+0tnxMwMy8wI9ci1r+sle1/X1ZZF0jRN89zWtYrWwRq8Uggc5jSHT/AZPsHUqpEGa3A3ulf963pmuhJKr/kD1+BI7PZ74WP7Iu/2fu0WzHEm/mRF/5IS/8fU7NqMCd7q97sOY2OLmfm68ntU7vhktf6z/T+3/SPfvraA2Npf3s/dF5JePuxH9ftRRp7hsTMTsiqjyF223BMsK9xBuk3uK1KxdhjbPOWCjA6mBCc5tSbwqe/G4Eb1yBeNwZuzehZVNoxxYWj2LQgve0QEItHGZESgmNdwwIK+1K789sLBHHcnxcNcBntEZI/el947iqiZhNtAtwyWqMrxZjADfUSHbgWJ6z6ChGzUTtBOa1NHUb7ATaTvHsv6nuROp7+q4r+ZtCNv+3NazueyT8V3G6+d5ZF467iHby2fO2p3YMvhJF9AzPGMwvTavjuX7n68AehDHe/qrA37x0FuCzQWtF2gQ72FWGtnlm+AHYIHYzd3gqoCTMkRJ0gQeVeH6GDz3p9h1Uayl/j+mJMfwR0hCiIA5cgEDJPQfGpYLYJrUMkU2XJuoGXG2iNi/VOjRKUBLhmD7JAsY2R2bOMDAjTYOk6B5Y24nsTYLncfWcUF/nDhyRNThxbMZidCqVgFhoF0wqlJcHaTBF9fuN7Pl4/Aes9atp8clkLYNpREEfqOlgqmUkqKnrFIAXYwUmv0y9z/0j98uLz7gGWZDWzIKFW+Fm97ITNmg7wnXXTCYUZKltKaGd3mE9G8zY1/ML51vgUekg+cZm+TNYdTporIET10tal3346Jx7Ka++V/ZDa90rEDYhIjmTnUx5M9ChXsHe90FLf/tg7a3MvVni8Hot8ubK+S15Wrbx2Pzm1d9QMDgG2Fa5J7vC6q3iO3POBWWKaAvBsHbkWH9rxhXP1n1UPL2KBDEK3eeY2EbWj8dtM/hzfk5y33LzNOfyv5Wmf1rffzvH285C9a7h+r6PQx/vdfA3MwAtzUK+A+TS0fThlrU1PCMhqMSgPDbKpglcRwFwwDPsecwTIPuVkFVxp33V7N/YDs3aTMtDwMpnRVnZshT/jziJTvR44Y9/OvsKG3kqIrIjJTySOiK13/XVsS3GqbF6q2WfEkj98c5GtXVvghv0Q+Sy/9jHJ/ESb+yPZvKJ+LpP9W+7lrv1sxXFcAqeQL7QOI/wT+901LfmPZEW8ARjSfTjPJXBcAqfSgAS5LCMymJoUyEUYEEplJ5VgI3di5hPJzo5yVXT1Gwi0r1L1nFlHCbkUCd5Wyv2MZs/hhYAb3S2Rm9p4RiijYZp8AKW2VDnOw7JSFZ2YjPamW3fuqfSzmtjxjG5VUK7sU2ySg6yn99q/870pu9cYn/eRV5X4HAe+a6A5mudNQv74cz/N4/V9tP6+M46sw1OfHxd+p+CskvXulqsbeeHE/g/995B99W6nIiiJ+NBgxT80wrzOMnbK1U1D0Ko/bJkcwKCgkJsJSqW5+rGWswxL3Y5b7sHCvsXpiKiKm3kdodqaYiEQLySyDbIeove+OREyHSlLYLvDGWi+y3D6oIdkjOqIzU2W8Q0ZaZoqFUdTCqBAF0yF6aiwK5dAox+EbPFulVL0gem4kzPuvfij2X0vu9Ak+R799EiyD56vg19t/E/kHNdtRiGK9ujf590QhrcdTKHBmIyr4RP73b22f9iIkQQTQlFZaYGoPb9/Kz23AMUmzzEAj0kF5p8wyNYzG43VuIOrHn9/NzLiqP24/y1j7urZYkadi15RtrFi1wbJjczNYvyMZqnyfrnYXtwRpd6hGxLquufbo7B3RER2VUVDpAJIpIdoosidV9RJFVjpaOsEkEBEGIB05cib9Foe5zqAcmn3XMj/k15HdWv95vXSwVz5mufOluOzn+d+/rZZ/ri6/7Hxe38+r/T9rP3jNnN/jRUa44xbaLQFVZPfWqN/6fyr/+ze+LbHVAZaKvzGdgoQ3M00tOwYlQIacXbjktsaXmTErTF17Wg5wM7jX+8ifuZYaHDPFGrFcWg/EFuGDRIaaE2moukJBOpDfK+o+xEYOwxYtsxns67rWXy3ZV/WOviJT0QWMFNZ6nKR6KizDAsgM96SogDlU73Jmo+FQIGkvplrEvJY5Es7AH5b7rymv6JNPugUfs9zvNPh3C7j/45rtuFX9dshE3pEa21yPezu2H6Dgl1f430eHL76MTxOScAKwVPEXpmTYstUlIaoSsxvpBmzYMEmK7iNM0zb0YLu2gPAq6d7Vcr/CMgAAB3uu6A29IxPoMLOKx8xRuHsLbv8eYffrs7GjcwBSiJtQyIjItffesdq6aF1yXZWhTCjDTCTNKdFcVpFNksRkZJJpMlTdlUx3B6zTW0S0GLiPpJpMSGZWIdXCcX7L8fkdyo1a/xy99Elx7q8pu+9Bs5d8rTP51vt53r7Huuym+pV1b/9JYe6j/fP43z8fVv7My69UJoCMzEoETUjoHcu5x6K+eKQpUcFhSlCjWgSravUWzbVdMUYgC7cCe3Wc2/i1Q3jM8YwbtGQoB04BDEoWk4QcUZh1BPh3WLiDA6Y7xvYA27pEBS7F0PHZo/foXUVln+VI1mAHcwGSJbwB2xooTZRisI9FLQwDsPCsLF9pEA5Hwq/18DKz6oz/kF9fdljms+RGuY/aKIUFf0LM+yee1ovtr+nQT/QVPN8Ph4p52Tdwh418xnE/8/xfO+7dD/efGzcqrA1PZyKJyjXk5sgiyUoyaVX0QCSNbTuJRFQxUjEMwEjqB3JT7nfH/dxxfvUyU8VBYxArlDcyo+tytlh9FFlMxJprV6y+rshAdihGBGQt8/f4aG4ckBzFHLZbUNDxy6N6FWMD17VrWdg7/nDiNG185YIpMxOLsZkJ9jMBBc/ls5+f158THWSPgN72T2njRJMkmVkGmCLZWstpKoP68d35w4enx0dFRyYyIMEszdDC2gTfJgUylU5HGKiAIVkMdYlMuqN3W1csS5qJE63BbSukmmaHN4jMY5QlsM1GoH1b9f/6rdezDQBfH5a8ewK3UdkmZOzPKglmDk+JkMqkrj/UhnK/dAnXbbutN7D9/CMm/HUn30uG6vezCPh15Pn1llovWpgbF+PtWuzI9S8F2bYOV/53wl6zsb76ONt2oDKsFYk1kF0R2Vf0zlitr+orekes6qsQzCqj0Vm5S/sZXSerG6ciNpyXt9ntGNrw8Fb0hNJS6it7oAc8EEFjKgpzV5UVVfK+At93JHZ4742EGWlhdnTYSMpEBKOr960QoQCWu6GquO1FOOSe1uAOCHA4EJXiBFwuF9A5rVgWuXNdvXf2/itf9d067Kb996Qfvor89sr9ayHd/yhyd733S4rNrWrgzsnOww8H3WNW+dHS73tIe45KTKyQhgHabMfdPbrPFOKNfJ6yI9LrHIXsqb6qL1w71gVrx7pwXbGu7GuuPaMrV6ayzHaFKYkqwzcslKxT2+NYyqI/Tm/DXbgXAirqmO1yslsomNG7LWdMDe4wQyO9QQE0ScbcHM/fl2Sm4+bu1NjuTRSG5zOG7lYiAlGVOnLko0rIYrPPo7EoAxTANDwOKrdDQ65afWXvFitjHvuPm/JqBQ58I4fqDih/iqfqh3yK/GbKXa/Eg39vt+9z4YuP93/hemtbh1CZrYAZNTKVar/HKBoyC1ev8HbS93a7FlktJ5q9eNzPuqiPiJezjokUo3ONvKzqi/VA71hDfWVf1TvXbrEiQ6WEFEBKsc9dwMihuDHedt3NYbpf/z4fWwKCFf3CumrtjI7siEYXcpi7xr2k0WenaHwuXPO5YqP+9AiC1LZWiwht2HfmULtGujUzK5gkE0qYIRNb4pFB2oh3AECBKjubuJZHB0YEk4Mk2zWPyWJHBQ/8P+Xu+azxeV1e1gM1CNf2fedfaZy/4ivwfcnBx/YbRwvcIVD/9PLi9e6o6xWB3cSO9COHzpKEOPwqjp63ohH+2eN+FWGKAnqid/TQ2rWuuXbLsAyPsJ6IZIyPwE09EG4CpI6+39Im9uoyYncO3ftaKDN3QpkZ66ACVpaTkKmxUT5DvUSk891Izf0l5dW8xrwDlUQ6TdM0TW1bn9Q8+DouPfIQzNA4qMQO/67Sto/PT+kbXemL+/+96YevK7+l5f6iOfm9rby+ouX+4vXuV12Y+6i/sRXikLTHz4xXujxso+JeEG074rDfK0Onuh2L7X2rcd44xtG71o410NMykMleFasDtcDvHZG0EdxHVkhiJUzmxp9DbH9ltKoFh4OpfgvCcDfq62/1cngylLl2z2ECQ5BCisqiksly6M6P3Kzn8q0t992hWuuM0utW7H6bZt+dnNhqT7tZa5WGCgBmNSncrEtqx83hDc3NLZ0wVlkPT6OZkWy0u1nzuXNIrzv0vsByv4vz0+Zp5PbY3zy3XwkS+j3MFr+l5f57m5Y/YrYfv9J4L++7HTd221yIveehf15ngl9hnFPooR7Re/ag5AnuOj0S0UuzI/q+zhiXuTtON7yFW/p7bQyr8iDKVL4ewphbIm8V/4sVGoBGDQGwqUneVw//HuSoTPeHITOzODXXdVmW8/n8+Ph4fnp6enoaFj1phtawmfDlgt3RFbbW3L211pr5qII92SAFcwCttclbazskY7RfCeP+YbZ/I/mNHar6nTlJPnK9Ouq4j/5Kz5KHX+r/qcf9pSJAquRJRNHMgMBQ6xWdl0IORZ9FyLpZpdqt7yKlJfd/5GbLa+OJPCx37i/n6CJO4UCAbpm6myaliu3+JgPyy+QaCrldb2aa1HtXX9d1vVwu58enx8fHDx8+PD4+PvTpqN/LSWPmElprpeu9yZtNE93dLB12HeNtFGdv+wTg7sUa9rXioX9WXns+d3P+h3yBNKj0+9UbjyvLMA4lQvgsGfAT5bdZ9u77ucdkbzvcdfsax/3Z/jz8RaV37tmk5QUFIA2kYjPldk7dsRPLnVcsAKRvjlNWIui4X4gTbhDqUYSPoA4lzfb7bVpL61E21N/BJQtmqEuRzDLvYE+IrjwjVo+VfeXasa5YA9GxBtZAhIIyg51c7/a90RxW0Z+DCYBjZljBXhHTgawkgO1yB35FyQCqambvRZyFFKYZkIWWZZ2eup26YcHIsExMkdZkDk+z1fuWTHAkvt9DUWV397Nt0UdpEBHEkcgbGMNKjWWTr2+2n+b4Wy+XAhCZUpdEE81gtv5d0zzTkblaBqnAZTl/0PIhL494/35++qDHS/zt8fzfH07vH//yZJkZE7LK1k4yA9mN6S3dNbXB6t4avCFPEJGGKFDMwAnmrgfEDJuQzTh5uPfKDkjLcGVTNrCRDjqt9dDtqz18tvw5N8bd+yj10Ywirx6KyI1V8z3HG7qlE9+919fgroPKOvI0vXI++129O5++xSCX/3g/zWdHGdKvvh5isxcoZIYhTQFFKqgAAkzblyM3tHglL1YGvz3iwOZqVDZidwAcQauMh73vl/O5/5BfIi8a48/b8TpE/rOW/muH+HLhHhh3MPkikKt6KJM7DiMhK0xlmM/KBMQU7PA61b+at2SH5/iYmMrjxnU0Ps3K2Jhwb14j6Vo97jhA2FDe10JBAGw+gNq+TtRbgJNV4L/t8M91zP1a3BpA72PVI0pZatGk9jDBKMUSS4+zSX1dItbpYda6LJHvHh+fPrx7//7D+6fH5RLbFVSyWJUQN5rc3Cy90Uxmw91qNgqeHAcem8vnxcvlQb6dEf38ydxXMIdvfn5R+0OO8oV87j/ki+VOj+/a6m5jl6NSq9+/2G4pGZ/v3ICjW3Vb6RK4Mh8AaQfj/SD37CtSEDlKdhVB2HqJtWcE+prZbXMAKoIRUFcGUlIwJcDo2yKAFS6DwRkWkGErmDe2AWPugaHlQa5K23plRYjDZGBCRGREOST3eCQegPcxVviYPr8VO7AkACPjzGpvpqvlzt1Eq7Ni3sRxNocSBG2z/BUBXfLsdG9094a3mR3nLsz/63/+z/NPP737z/98/O+/XN596E9PsfZkcfmAm8E7aumR3uhurbFZmssM3uTuyTSDg4akVeRMOz5LyePi8AWHKo5P49eQ4/z96fv9iD30Q0p+ls/9+nHTOz/GEfjI8/3R5/7ucSzo+TXNztsUp90l+GJ7Adjcom6qWhkr6UkgfA+54YbnAqHrAraCKW3Erhx1OreKejnUbb2DmSGpL6t6ZKzMpKQMKpCB6AWVlAfVtNVLo28qQ1VKsMjOirVY3OmLr6m4I0CiQCnhSKP20hDfjF7vfVo7I8pULwxnpM7rmph/d+8+BvXKC7QpX8MxlofKcXwBm3IPJm0L8yiS/UoL2Ox2AKwsfykzcYIs0ywyzo8f/vrXv/7lP/793V//+v4vf7m8f3f+60/LT++49NbRUP6Iil0BiWJgNzNS7vT2PNJx6HNSpDlzQ94dAA7LGL4k2jwWwlfTA3crs33jNcs9DwvZfeH1Q78f5PpqfDmf+w/5AnnR3Hiuqu6t9UP7a/zv2iIpr23lLhmVICt5dZh41K4wUze2+dV8F5OH3VGQAsjKtYKSkRkro2cGozPTMi2hojCUij2mGN23s0FWWiQF2BXd2JLtx7SBQjwOOTbAp+oT1qWRpDKz93VdPbJmGg61u5nwx4SAm1C8jxzJxskAZI482m35ROSOto/TudakvVkvSJnZK9+4YhuLxbc/ZF+W3vvlcnn37t1//F///u//6/98/5e/nd99yKeneH/RBXPgRDTAAnMOI4wjqJG0JEmLwmd4M1cmScNm6as+1QkVG5GTTmuwRtKscYtc+oSh/3J5ruI/Lj/M9k+Rz+Zz/+G8LvkCy/0lsx1XR9xL9vvz7b3PHf/7nX4/WvTbfUwDVSVMoes8cAM8P7+9uxWfQ7MPB2agr7Gcra/MZAhV+C1W9hV9ZVEU5sYEicEiMM4nN1OTRCaG9siqkVyHGyapbRWaOCzi0vvaR25Ev+yDWUxkNDNExNpj7YpkymqvqWOc+1b8BPz4gmCTwKasr8DQXsu4TjOxnw5g8z5Banh0ASDpRoQ2zsV1Xc+Xx2VZ/vL3v/700/u//e1v79+//+mnn/7633/5+1//vjwu55/gCzzwIPyxFRU7mGh51ezbP5IqKpoyuPfqeABZsMs2p+/w0YBftmh3M2PzvYDqEZzRt7HccYBljuvU2+l9zEh49h79UEvP5Wf53I/bP8bvF8lHzPbXkBk8s+KPuv6O/71Up4kb/cCNft+6hejVX1sADIE9LGS3pXF1EuZ2YqiUfSgRPdcllsUjkJkRVpHsfY2+sK+VkopYkUL2q13sh6dIm9ou1pybuu/73LNzV3FL1xVwYx1v39ca4HAZqVQqomhYEEITU0W9gi3X/xVM+bWn/cqldV1XMIEjnHTggokVV9wsxs4pAL0vvfceS+/9fD6/f//+6enpfz7+X//1n3/5j//4j7///d379+u7v+PxPfKCf3mDqWMGTj6uj2NCuiL55jDmFqKeZjQXIV6V/vYsEdxyuEoO4e1mZvRRRlW3mv06Pl9bGXy68X7zHn3ls/inkl/E5/5DPldeG8mPLDPvYLGP8L/joN95p5rhG887i3oER/2OTcUDBw1b4Ssm1k5o2Bh6KwE1An1ldNV2dkair+ir1tWUiK7olDY63/rbrouJpCpTJgUf0TImyyMPQSEnRhQPDxC3b/WhJmhdyI6ZEKg4tKRU577Ht5igO0TqE8XW+4ZNxfP6X+6ftSzAPinFpkvj6elpXddlOS/LsizLhw8ffnr39w8fPvx///P/9+7du8t5bW363//Hv/wf/+ZOzmr//e//4avaihPxlq1JjMQq8TpFkqIVLKONW0a2reSGjk5RRglmLAaLCkclYT46eSMJerXU738d2/geJ3hmuW98qD800ivyKWX2NrfG1b9xdHQcP358lF8xiz4bxH9tyfzF+zlezvMO126vVNm8ewr3/vZK+2v9abgBWzZ57XpzhLWS3Eog6RAPIwmo6jkIkExu/O+i2agNbcjci7DRxI8M4xbWrW5GwLAuujzG5Rx9ccCFjFDvGavWyHVh78iuTGYwhQwpqEElY5VXoS22JyEItnnreGvhAuZ1nldlPiKFuQUaFut5bk7MqWEJZJA8TZMgRD59ePzD6S28IZVr71WE2hRr9/nmjm7Kd1tVPL8NXKqvmbHmu1q4TJO26ncRsTMETNR+Q5flXLKuK5jruj49PX348OH9+/ePjx8ul0tE/Nuf/+3Pf/hXiesS6jLY7POpnf6f//r/mMTWZcuSHx4v7z6c379f+oXMwmRQBJoGM/Mm58hW0WBnUO8iU/Qsf3BkEt5Edicu6zpPM2CkDYCGNNtiqKR9jUDSaM/oi75Qckz826pif012V+m17xUWvuqf7Ye3DzBvf/MxuXsfsdEZvfb+3p//Ns0Q1xW2Ko4A+5/t7+un85p+OPzkikQeIMHnauLa8EmW+68wSf6sSfsL239leU01vygfOeE7YBG3T8cVdteWDIXhTXxxt1KY2uZHG/Y7IIp36AQBHWNmqmBP8fRmZwYzLIMCorPKM6+B3i0TRS6egcxS8UAM3y2QGIF3dcB6SzMFr3JTukbRVLfoIEYFwRee5GctOZYX2kGcCCwrdp/qnZt2NzU+EiFzHHOW9Q1WMcFkrD0ilg/ve+/r2tfet/eTAfm61A8j4nJ5KlmWRdJINz0/9t4zMbWHeeKHaA5UsZx0OKzZNLE1o3UxV3VTGGXNGmepn5/Z1onhwZY2LbCpeFRaWFSZKxZJpCSNlFR3eGH2hN0U8dqhmK+CyXwn7+k/t3xMuZcpc9j+JvIaIvG12n99uSriT1DxvPVa36ny4342NXQNXdDBg+pCYjciDgZOhd/tJVUVrHjCTb8XNjPCSHiHcWTSbPgABQmxIrpiZXTLRAp9RYVCRig6+6royK7IzGRG0XURhAEkMlTWH2wrF5WG4nFP7IrjUCz76r0jIPG1W1yDFoFR/kaSlBmrUpdYVp86phkmporMMnm3g0+YkhmAEOoJidmjr9l79h7Lul4u66VHBfTXKuTNlvgaia45K7PWm5vZpPlN/JnkgU7nXdDMjA2wSlx1WhMf//YTljUfH9f+U7dlA1lsBEFauRo2ha7rpeyLkH2QzGAADVukDCHubDNmtpnPdviVtrBLbC7oL3zFvp/39J9efhaW4V3L15IjOPPcPv1a7b++PNfsn3syx5F5PuajxiZx5H9HQRyHEBhuC91tb0ntBuvQ7wCMu34fmMS+C426GaGxHAxlMALrgjUUXX1lCtFZ+UoRzICi9Oaw2VWNwji5LPudLkimPSySo0AcYh+v0l0IoTZyMMxspuimGjb8ZPxVIJFQlZswoUf00HpZbO6MpBltJDTh4HPGLdOhXoFllIlQRERIHb3nuva+pmjLRR/O/WlZ11BoKFpbY8cQMrN39e6Z/Ptf303TdDqd5nk2WvTovffegw7jIP9KZIDR0XPSpIgI9NC5995j7Wv03gqbggFZj8xRs+NQvrBuao2gbKBZ2MFAFLxuShae3zMh2N3u7sbnuOuX1lF38tJ7+rJW2eaR+98+X11tZ3I7g11/9jOn9E8nn4C577I/mt9C/vnM9v187jZ+tuf+sVoqCfQjRn3Z7FX2Ftrq396GuuPmOS+dPraLSkXPUlhvvKwAKmWxfpaZ65rrBX2pksxKeSaUFhKSozh2ItMq/FEJFbVvBSwSo5IUaVtRChFIsFR/IhPSSHRKXism2bDbt7whblEnBfiMSh3KCnUcBOgAKfTofVna2tvgzlUWA8E2WdxdNF9PlVRHRPZVsWbv0ddcV/U1lrWf1/7h3J8u/RKZosiQmFfDHAAwyyjqX/7v/7dS7tM0Aei9L8vSex/mM5uZZ2asGZezlv7v/+t/+nrh5RJL7z3WWNfMjPXtRvBbhnaFWupA6c5bencbWUx7ACWdZnatlZqZzERmRDDCpu0p1bZXHRdTnyef+57qAPQf228Wr9/B+/59ys9Y7newTOmRryLPb/PXNdt/q1t+VNafouKHdj78pDqXo4mbYL+iqz+1FN/Y4EYcZv9/9t62TXIbVxaMAChlVdvzcu59dv//P9uvu3v3nnPGdlelRAKxH0Aps97a9ozHY3uaT7usVCqVSokEwUAgIJO/SHC6+/Ywtpnjl1GxIyFMnncs79MROlf2WTHYHBF7jmGjWyQiFQORioQCkVX2mmXWWeXzyqzH4aHDvU1LLUMOiMfyXyUhmTnmo7TSOXSE4CfbfVaqevt8dUNs7LDsaOYhMLJvexujVe0Om6mzJsZhr966C+8+uRgsilDfEWGjc/QYsYxUZgMXtmhmQSaNxPLw7f1DPG2ipACeyeewuUZ5gJGe5ekbSASkfewawKe//E/v13hqgxHjmtsS/hSG6JMSY2ZKwqdl16H4N8PPNw9BzmninbcOdlr2UEKHItDR3vbbv8Nz/2Ccvn/wL+W5/zsb/h/x3P95sAz+uG57tZ/ovL8yK6fLWYyXanaXSxI9cBh3M8NBXl6w3EweJ51DUuHad7coMGt63O087PvHLTFZIaFR5Y0GciAToysiI3jIhClfeO6KTMVttcgBgCKSLO8xBsi8RiBOo0yHFzZRoHIUEYQ8ahC+eOiY47miDjwwkCplsQBbZPahUXD8cc/x4jz4af1H2ZDIgDIYhEiJYjPLhgWZBgMDHpTETctpdo5J2gEsyzINZ5IoyNtBem4BYcpjpozhERYP3/4lny3GNZ59QKMmUsPoKLGBzOTkyd7s7GkPz0xUe+mdneZ7WnbIzrtj/GKX+NnttzlO/8DtJ8Ey/7yv/+gx/1L7/+Xtp8My5F1KidR7P836LcGEjDHuvS0cZt/dK3/nxeq5VuV4YcrPdr719qI+9HhSWeqPp25MWfPSas+04pVHwgAJqdTIyZkBANpcekS5jUcy//P187QygDmaWv0Y8+PhTjCgsKMjrHcA8LcrtBeLy0JDmlnRE8utZalt6T263Y+2UkJWAo03cMOfn/s+dA3toZ7WlR3KwPXIUD2eppWw1/fb1diKo8Jajuxd0jfYDpfWJfWtb9fr/rxv3/9nPn+3f/6v/fv/HZ//O65PiA4egvmZJTRx51LcW/ZjXsEhvSmWlWcV77vLG7hbZ/zyqgO/2XH6h2zNMFCuXpEsakOz1s/pB93WlR+gke8uk4E7RuaPHX//qbftrBtwXs9xotMlfLFqexNSmfvvGACvj3v3/PwgCe52nfcBPeAuoPHiCz4yIBctMGbk0AAEZt+fn58/mxMZzbC4QTFikGzm2RaSZk0CZDJ3a9aWfU/3xbwRpgqOZTMz25/gZtYIE5WgO0A3esUuxUwmLApnvq6PnGzCoErvXy1D+4bnJ3v63J43z7Ah9KE+OK6OykTdMZ4id2lLRh9j+u6iklArs+j5PWAymrmMPcbnp6frdQ+odEyc5r4sQ8OzmbUH0Ag0yBAmktboFlFmVSSToVAoUvmghkyK67IASA1Ay2LhCew5rgw3M4ayC5H2+AQSaqlGenIpNRYJlUdAwJBICcksCfeQiY4OQZaujhELujEGN8SGMeBBouHS/H4EHdu4LOuxrcP4krTOC5UueMJ6F0ZipI2+SGotVsSfNCI2xbPyenUnSSVjJCDjkVT1IoTgRCNLeKabwV0sYMzRGQa01txX2UK7gBfjo+PS9NBtDTPTwaERkSCizQFyyNNDUxMo1/txBLtdx/0IuKXMvYo5HX/meBdfYe6v7cPt5RsA+d3jb/fk/XH9cbDl/Te8za/moTyKim5PYaWQ0jJUeu5IHfryuvPAPj79nX0TcMuYOxdgUUJ7J8vtnqb6wnP/HU2qfxjXPqFXtOtCP1JQjKTkZsiMYaC5elSmeNTqntZUin4mkSITYhrZ4AkYm+y2KFebNmQC9EQeQMZRGaTvJIueaJBLpiz4hRGIoRw5BsaoxKVlf4YCOVJd2SM7ckjp7oFQZgwojRrEYqZtdJIQc8c2+nXftn3vGb13s1b1gC5LcF3NV7gdYdgXTa/GRL6PC1crg3piyl79nLQ3KaoTX+btJStPP++zgHV38Ikds7W2GNOYLksFW5ZLzluxlHu7c6pxvXorLKdxD5m7AaZuhuY5Nu/q174P95goWcEoAFgO+B0E9GKjFGbIWYNp7r8d82J1+OFIOZlU96NJhheE+K/tt9Ia/snA+i/e/hiI/Nky08ySh9OiotlF7kOjG6VmDmkECfkYZIl+kG5mtLAWygUWykENWgMpOtNglmawYQOyJF1AwcQZFFMkD7HIVABso+gdstIbKP2viLw+5f6c+2f13XuP0bWPGMP2qxTCEDo0MoeQxkxIip6RoUwrKrZgvXe6aeh52z4/P123PpRm1iOcI1tbc+oTtNYkIXPqvb98dDzxmcPanty+e0NPzjubmWOMHOEp8mBbVq+oCfYkzKTu/c1qqkXOfHWrK3J+10EzYWVuBaJyF0ds+MC4337F3bvpuhn33rfrdd+2bbvG2KP3MYYkM1uWxR8eElDvypL9mYCc7uz7gRpNhgwgO6t2lOCMG905y6ueMFHpEPjd7PW1/f5ae41y/Fbt+wFEvjDT0hzQ7+3Hu/s/6qg/Fyb6+67/bRtKPzAxcSYu54joPcbODHhzSjFMkLvaFACkmczdXdnkg+5IVy6aDmMd1rqamZk3MxPN2OAN7pmZBN1mRbgy78Y1E0gjgURkRtfoGdF/+CGv13h+xr7HvqOH+q7MkQNIIaRp30sZ+GhVTQ5MKbZM3zMUo8e47tvW96EUIea6VtHOJlCKHnsPt2QWKnMsR08pegdlLCPLBMGcSpDCIR1feVFWysWSRu9jX4u6o0QtSiSwIUUIFDNEm6WRqrxITSsUhLiBbMdsPF8X+RJjxN5jC3QhSMDAZV75G/v+ruf+2rhv275t27b913/+r9if8+m7ePrMH77D5894etY23GBmmTDdGO4FuN/9u0nNtBlsL4lf+azosR6lUxf4C41fSYX+11ir7bv+XI9AR5zmqwv/G2o3WOZLy7HfTPuDue046Qq8rfwpIx1ZVYwyohuhyFCQgQVn5YVSFWFrcD9qITtsFsWQWZKxLumLu8sa6LDG5qKrxALDZKJNdiQAj0MYK0eMEfuWvSv6/sMPGF3brr6rD4yOkZm5zIho+exDCqjXb3KQbs0aEhEl/B7b2PfRe+89AkZflnourbWJCaQyU4jQyPREQ9LKI51BH0GsIpsTWSAJ+CnyqNJOKNWdiUMYmBHRR46wHALJYJtCN7fEekgMpKFC0IdgyD0sc1q3Aw+RpCK7eNITjum3A7ZeHvBjlv1+Qw2ncac7JeYO5H/8x3+M7dJdmzK25zAromkG0qQ0zaSCF877vEPMSnEi6QXSU9XLzL21xrr5zUkaG2mQaQru271fcvx2kvaeKf8t1hz/t23tlcl768j/RtpbM330uZ/Hl/+lPPefe4s+Os9cetw8I5pZo8ldtCEhDYSlFEjKOWgGHmEsA9zTiGVRI81FVk2GcuHHuLg7fIEvJNMX9wb3KS1jBhNMh2BhttElITKi995H36PvGD36hhgciTEwujItg9KQVxU9IaUuTJkwdwdh1kwWQ+XjR8Tn/dp734dItOZtcZKhhIFOZznrmjrCiimC5nmr7XGi4ZiAO6Zw2C2sRLoUJB2U6EAjI5V9V3REAkNJKJmUlY7xkTCVykNrcpYQwVG8aertxHHrCwqphcFNtLlykKYC13vuOb4Ay/CIIb6M9X/69Gk49nHlvsXDQ+x79CFAzyNzVoo1USlVODwBuyHvYBxZy/UtYOn1kJw0HofIQyyhNs7MroJ95mXdRRru3PZ5wb85w/Fv3Kbn/tuHZfBHdNsBwJiIIl8bUUh6a41YMYKRLlmJZhVEMBKszZCyaiOZAYvDHM1IgUlVyE79uqZ7a010enN32qLmmQlTsgqZKnNkjoDYoxL3x0yF7MihHExRWVg6FYCMycqNtKrjUePcTnqDEpmjD/Ye+963ax8jtxEqc2sUEcpSEIfRmrs5o2eXFEOjhSXXG9ZwxFaJI61+OquJPIuWzqUEZ0VWlJPZWouMjMgRjI6EWSvrfYe5T+AahJRHGPrGg7qPpgKA7HQWxhhT8DhypAYsaCRjz3ft+2ncX3nu6UKGCzaS+749P2/Pz9v2/MP3/zm2p/HDf+/ff4fv/1awDHZ8Eiwrmpq6I7nP7LGbF6+SbxuJqgNQLJ3MTIhSQo6TkWFJOA03aQpApvdtQ2lIfAVkfnPtBSzzL7yOn9g+MtM/d/9vqc1UQkkyllF2d8slfJeZJ0mRXkktigQ5He0xhACRphZLNGFM9VeDsqDV+Jvc1VaZyZy+WGtsnmOwKuCZpMgcocyMMXgGdRVDEVAgg2ZOGWWCao1vNLMRWct88uarEijhwxGx77ltfbvu+64xMBzutiwNZGaOTCebuy/LsngzH5RGpDIidhulWfhiwTVdxsOVjsm4RInDnH7k4f6WLI2bMYIZyrCMQSJb+bcEeWDrRJaYrei4952LGlxvJTElDI4rEq3qkrIS0NSzDxAyX1bdpYxxYiIfdsvMpJQ5UfyznVSWKew1/91yslCZBccC4jTuOFerrFn8Rgq2zKF0KWcPpEp/GQbZZPQD9+74dNsrEjLrq5xm/WUc4mv7V7cfFw57s/H6yC+HHD/iub8CT86+/vNN8fvnP3nxr77ujkvwPgD1Ml70U+Ga87CPbsZ5Pa/O7z4jVGZzmd+WZfU/f/e3/2qtJb3HbsKCGtuQmJkSDKJZcVLGvud+bQZ3NTdS0Mz2dNsDnq25L601eStFQW82ImBaFoeib09jdDPrfJCEzBi7IsFczN3NKFTdpwwCtBKJldDMOIVQIiS11sxdYI94ft6u173vKq1GEKCBLhhBOJqZL601v6wXSj0GMiukLOfia92rzLRMKCoySBEsbORgoGtyX+btLgvNSSBvplQ+LIvMY9+yX9o3Fzixb/ALjWBYeaBGo8F83HuinAWvxMzAaXU1teWdhJKFaF/MbbVVlk7ISuPhrfN+dptXcE1YIsNSNrIWKMwHUtCejlDvUEbvvffn6wh5m+KONWG4V5JUuoums/ZepTuR8FpZkaz4OqmkJGNjif3OhgBNNoMKySyVHE5xH82FlCYsI94R3u/HyW28HHNu4V3HeMfrYXgc8GKg3Yab8f6w98evzm+DNcd77WOY9OfBsIcePUzFN/+Fm+48iPvdH2y/uP6fpOf+B26/smv/tkudAbqJKhiRkHFZFkTa0qv+JwDJRVBOGmKkzJAAQgIYMaogaGQ6VbmkUGQ8ERhmck+7S4lcmhTejGEZXdsTM7w12hwbFiEFE0Y0pR0BNFUJCBIQGO5rGTscE1VEbmM8PV33fWzX2HdNNTBABL3R3cxh5kyauTV3i6hUS2WMMUZGqHsskYfqCQ6JNJjJoEp5zeCEwnkwaYSIORJosCy9+gItlIORykAMuMNaoTpMgoIl0mSCZFCqeEcZ5dNiqv3UgyP9fizPawMzNWL0ZAwD0NYL3jPuJ0D/yrjPYIJu65U68i9/+cv+7Nf+HGYiW2v+8ADbsY2CcwAAfnxO0hThuWdG4raqufkuU6CCBbwYaYIJBtkd5i7SP4iX3hdHfKfD/+aXzn/Y9u9r3H99RP41YgvgmPmrjClJ00ygb8slM1u/IIDRI2VVt16LMmSEQqIyS/g7o2KZSIskpHKV0QYEhHIwyX64clWYwbC6LPvY+y4Avu57MwqlzWuAmTWEwf3I6k+QplKAlChEZGQm4WbWM7dr3/fx9HTtU12rfjNIpBnUlDNwTPjEkJK94pwqdbKuoeDIsYYFSSthAzdAyjRYRuk7hoCqt12iBIohSQqnwSb2TFW2VihdOdA7RqAFzFA/Qqo1kTwpq/gp7AbFzESpk/8ne+ExycYYI22ERmgMDSFogD1v3+M9415ikG9XxgrcPPd937dt3/dt2/7X//v/nZg7f/jOnp/5fGXXn7w88epXIVmmGArCavYxSFNL/0VvPMOppO4uLKfe+/2hdpvFXiVs6670yctspt9HxOsP3f6wxv3DZddcr79PsPkVrufVxvzLSc9IwuFgcmktL+MyGBGZhfAG0FrTYGJAZOm80AVldgFSRhQyLgAUGqDTl9VBf/fMzMsF0gKO3jUC7ojAwE5WIQeiKIFkMzMiK/LLLFuUkjL3ftVkjGgkeo/rdd/2MYZGR4+JBhi9ZC0iZvmog85DBUZmjjjVJYsrnwHFrBbFiR2LnNByZppQMjGGAzIHCoyaK2TO8CsThgQdElMZ4VM+vmZFEAbmVIu0SAnmL+ZglPP+6vG9iN7Xhpm1RoBuDthDe8R7xj0iXu2vjcEgYVVc1m/tcrlcGm0hPj3at5/y++/j+x/i+dn2az1Tzkp7aSWvmSotEZ0FtcRZM7LuylFu6UDzLYmTLQOYCrFJgmfM4Bwy1b9OHOYIdBy2/jasfq3x9VH7F371v7z9YY37l9u/yq14x8TPEXIj0lWODYxo3pbLWBKh0BbTV/U0SU0aBMSWSCkTTVnaiZgkZgpA0FMZ0g2rIDggQUY5JeSAhNLiHcoGk1GFvRyqk1LV0ZBEuak8duW+VawPPXLb+rb1vRcw7Vn8GRJ0sB1RPYNMSQsDWVLwphyRGEmlYmTAEg1sbM38hJJoRcyAnVlNKSvvegL6ULFmaEfIEbXLaTP7XqHRNTpjRWuZaccvRVQ9K2jW44amFlscIElKdmjhVvBjJuO2tqQsqCgtHXiaA7CX2jI4DN+6rq92vsLcPcTWmlljtGZLk8amZwYR16cExhh937+tFLg71qOEvCO8H//IKtdbq8Q7sd/KHSN5JqbWDYZMp2zFlG4TUVXKp4m/Q2Ne0N6/uu2/hfaHNe5f8Nz/qW77Rz35/lvuTfwc20fJtym2Doqgm62LZ0QEMqKEzsugKalSY5fgsAhZQpYIySQjSjEyYRIjIsaJ4gLlv4/SvvLM0RaEmWTCCOIofWHFvhilkRAKTdw5oT4iIjKt9NPHUO/R95RotvRIVL1WlJjV1CxsVhg0SHeQohSRGnsokgpPaXKyzQ/f1Soj99WtRlWikJQ8JAOol11a80bXciSAiNAYtve2DrSZrAumRYKhIgRm5ZTZwTYRgGTOBKsZUC0zerJaGKkx0ANbRJcNEMB5x14Z8YeHh3eNvhqKCukh7rf26dOn2Nn7c1Yi0rou336breH6XzfuzIysgqWhmcXVpFKyY6lhJ6H0YGTaZOAYmYRPFjz8rq+SeAW4H324fHYehHccdv/1+PpXmfiPRve/w5TzhzXuX2j/WrfilfN+PwBOTDcl0ujWii84BmKUCekqhQIYWUmmSQM84EiVGC+FZnQayb24hLJkRhz1iYRlAZO9U5CZE43wEUEfKgiCQKpDADzRmkUoJJmKPNIz+hiND2OMCG09+l4SWy2E0YuHX1aaR6qnTcJcUjbZ2RGZMfo2mMFayWiu7x03LfvzpqlqTh3bSCnDhKg63+7ASdLT1JRXkk4LhFIWNOy77xvds7UpfF9TX2RahbWFaRATR4QzIeVJUETOaC8yOYZGcgyMwIgYh3FfHy5vMRkA23bTnLnfKMz9NO79MO7/9Z//T/ZrPn2Xz0/+9ENh7tj3b44RfK5VcPNgTrtmlZcl3eq4nN/7UsLMSd701O5CC5KIs3rWnXG8h93PI1/CMl/bv6T9Oxp3/AZWi6eJ5431fAfBa6aqGq21aK0N92GDYkRVkoMko+WUNrQy2kwo6wXhMHIc1RcET8uo3E8lkhxICVRrHmgAI2Mt1gitYrUlJyaTpYWK/0cjE4pQBJbGMfL5aexRPhwlRWKMTDEzQdgsCIqCk6QDooVLGTFi9DEGc1oJllXBXZpPWZ9UViE9I6YI4hSUL4kaYOI187/MKt2XGWwmoRCq5M7RxrgsEXW3iyJk0q0SyAe2aQJTx1xw/iux4gYuRrTVwROWwXtG/HK5nB3gNeauLFhm6sNhmAH6xnTxTyv3zT5/yu+/73/7bjw9cTzVLTst+/0GUP773aLtaEm089cd4Ntd41tuzE8fNf/y8fW1AWjJ95eNb1/Whp+z9H0Pertxc7V+mQv9qK8UTfDt5dxbzy98/GwfwzhHlYmfdp77INuLvW8goNoeY9wf0I6fYTNLRPS2Pn6S+Q788P33n8YVkZO0IIxQDsWA7CL1gIaiQqiOsORjfouyt7FFlU8imlcOZjdHczgz944BEutT6XHnoEiZc3DA+Nx3M/NlJTmCY0R0KDCy9230DQDML9LDdcvrNvpoCRMxDWeGOUT9zZ9bWxweO2znwovG0p9jPHsjmmGPIPDpkdKadkEDTKlhTBBe1JZBwIrKnxEaZ8VUW6KKmRh2HjFVdzgGIro5Hy/LlqPvf7tQeFyz/5lzDdRIGuCGwMjoMgIuYwoJBSjgan/eNTpHt0wyjL0x5c999KFd6JOz6WYOwPEZh+0tqmHSSKriEHTBSc+JZZtfWBnCBvon5uPePz/k09Pj+o31zfZn2ufU0nNJtnz8Nv7Xf56YjM9cisnchNEqtn50OzNkwhv8YuYmYzeYO73h4VHLks3gLOWL1IYM6PEICEXF0l/187tx8Q6j/GYtTiyo/p4q5ZWvgMqQRUVFgEO3E/KCKMlSfYh8ZWfuRpNORYczuxiv6T23q/9gIHPctu/qEfP2617Mecv5Q+qKD4IVccvpyntK0Xt8ii80IY7NuPvqeF0r+fg5p0YVyX9Tz/233yop8X6KOossV5J5+azSDQVWZkSMMZADyAi52coydjOPFJXCqfn3vpWfOnpRMxNMEpZiA4XWTOLoSTJTY4wxsgfGPkavXmekR2REjJ5DVR4bSiSTVEnHd4TSEsghk0u7hsY+CEgcY1iiOdy9dAox3cDXzmD93jEiIjRO84VgWgWlK/x6OhlQZASIwaQG1Xtft018lolRmppkIxLm9OZQFQGxgCyrdJGFnjGC0RkRCaaVwE7b9hzCnkxRCMzkoJHPJMGcvBGbMjbLchEMbDInWi2/AGwWxY41sImjb/3zUzw/9c/f++jer60/a3uK6+fx+Wn0bfXqHkd+6nTXy5OoeIDIGWWR4KX+X8bfzHwpWEYvG84V5OwbP+KJf3TAT9//5a/4/S4FftR8//PaV+P+m2j3mHJtn5lvmpmH3lpbloU7SQ8EdGAUKrrMDQxW5R8hg1wqJtnKpNz8lXPxjhIwF0AItvXUzNYBCXMwYSY3C0gamYiIfUx65dggwh00H8HrHtetbz1HIjgzO7NUhbtIhKAWIhWMiMiq8NEbmApLGLEaWmu2mLufk9w0QACk+oklW1mIv81aI+gMB5MUdRA6ASBtToc1LYgae8/n54fHjaBxaoehOOwpdAAycSghJmYS1+XJLIbtg5GZaIkeMAEDS2AZGUIKmRiJzPy0TrZM0XrmnQX69Vk0WiNdbIQXNWU7wsaNRmFE7/G555O1MHXFRmyh5xFPyCfEdW1+b5QpiHIy80hKLfX2EmgA3eZMCZhba8tyzqBTkP4w7vX3Rw3qPcJ+b39/1n7dfdHb4/HeeX77Tbcl023Pr3kBX437v7i97a/VAzLzfKvUZsq4J70UIXNadxUU/vIEykwIpLZ4bostaGXf5TBNuIJ8sWaVLJPZb5IpdLjgggyjMbOyTxUDEZNv1wPNQbMctvf+fI3rNfaBLW5r0vJKK+KXHRXcQ0Ij1UMjMBSQCQ2lXwlfnGTPvqJVgPdm3ycBMUMZsylJY1HTpenGZrur8VHVyAwsT9YF7aM/b/b8nwJEwWgGeEmf8dr3lEbGkJRMVIom8nmMjNEjIgKMqbWrhsaEySSCHmUuA+3JNTGIzOlPQ8CnZRWdbuQieuWFAuiXI7lJQGrs+7V/jnFdSfgmu8o3ee9r9IhgotdzUenrJNUMgsYoqowWRwJm4MxdG0LJ48zXhAvMzCM4czPu53x6RobuTfPZe1/t/Fn79XIUvJjFz7DBi/P8Mvb9n21n9XJelF5Xm/kV2lfj/htt99P+vec+3IckGqSKFdaRpY97aJ6oYpYSthAY3tLh5tTBhCgo2oyEqYowyQBEJ6Ajmb0ONZBVZW8b6h1RGUZCObsDGB0xxnWL5w17x0jsHVnoKcFWcpEgETuSGRxIxh6xgwkXDFgNWLAsWJbFzEb2fehxMQDhh8Cujlmt7tKMUBym4SDziciqWTk9ZmiMgAQiCtGV+tifnrfn/0tSIKZqMQCmyJoyMrOXiC6sEvR9PEVEBgLl2ZuSQyI8qvgHjfCCREKp0Q7IOLOygZgAxrqAThqMhMu8UNR9vdwMX8S+7/t1y30bEKMzu0uW/ZLRsIePPaY28ZHvhWL6K1E1s/OIRps1YwNOTLki0jem0BEolt447PeW/SOz/ne77edb9Szffu+7x//227HOrIHyL8Bnvhr3f3H7KND69phifIe7Fa/lLpQkaYxBZaqGbtqhFtAabIE73Y0267VWRbry4AaJUCZKh11hKh59yZIYGUb4NTMii8cdY1JESPBiEdgj9w3bXjU8oFPnmwAn7JPFrhxVKymRGBvGFSasCx4arOFy4XpZbKmSSCNCmZcyKHEQqDOLLXSL8/NIyQFQFazLwYuiwQAA9r6DhJcqjWgce1fkFn9LjZER0TNH5qTsjOIKpaSJnhcm3vSUWT8BpQQJ0RNtucRAhA4pYpa+o0YDUHllRtFUq4Q+hirwO5NCLWGS4uHb87FWBCXHyBy2NkZQCSSVyKDSA40tCTN4Q/E2zx51ENWhWpuIlGXWZG2SIsQx3JzW7q3PcVdfm/L79tbs/h1uOyr2OOuivOj/uh8a9+f5hSz8R6b2l51AvsIyXxvOUfRqJJwbhxqMm7skujE8KDESgBFGJc2dBF1QMRPzsVVw0ppVHQqTkodi7iRVDow+oR0fymNAOcBBmIuMQIg3nz1nqkumjxj7hm1gBAIIAmBbTZxJrSgQSQDg9JiETIyOPrAQJNaVDw/t8tjapbRrEiS93a9gNIEoVZjhRJFPJobqt0EABlCivcXWGRHuXqcwUiWNkwN6rkvRGNJQjnnPR2LqPZoETCyadtQlxGRqKQMmNDyjS+PmGTvQhB++33jUgrJK/CRoeKjqe4difBxWOI7Pn040CDY9rE2hzFCOKieigQDS5MaimGZKyVritBKOqCKzQg6lYSDcShjSlYxQ7vtii3vaHSnzvp3W6N5tP3vsL+K23798s/165++lvYVlPtKb/Oe1r8b9N9puruirnu0NIbCihrd8k9YaklWCzgDISRH5uNCsWHlAZEAI5JHBGARSY2QOxLQqM72ehiqyygDNe3RJGRZKgaIq2XEfGMO2iFEgu4Ggkm1ZAkJm1KIAUgLEZVmBSJWySpBwR2tYLmu7tHVt7gn0BESy+ZF8c2ffD3zmHC08yB6AKmW2ljWcmVgCIKOMbiYCZBUll3R9ek5VVHaUxtkhoAsSnCsBalYoyVxmsAFTQB0xkIntB42BMaaWTjuI598+HJb9rjJ1MRcPtS8kboIBvT8DVcN8ElrcnY5+/V6z2hTM4A4ZPKEoCG0W1lCyxHXKMBfWBCETESoGpnl1LYsI0CyCiTfUulsPPNu7WMq7Zvfn7j/f/QiW+V23tzDXr9PaqTNeN/GVw3jfjuXSB8uZf/Iz+NFl1Eee7088z+2EL+/DR4e/+qJ7d+Pt2b7Q7s3Wq4/rSGVSMRnIh4eHeGqZqcHKvJTRmlc1VagjQJiXN6vIHO0ioNL0bxThTEzUuLJdc2IsFSAFMALZcQG0sg/98PQZVciHJQ5O0Avg30ahtaUIyyqWV6V8SuGSsiwXuOxFmjtyqF/7vpdrXV+tMcbWk2bt0tYVbBzZE83MVKouJx90sevnq6TIUjY2cgqxTKFJCamAOGt6YIxBslKKRu8pVYGQ0lHY96GsaYYkK3kixxTkIdjYjCboaWw6yoRkzsAyE9crnDBDW8FK5UyksC4wAxvawiKyFqQ0BlRnOAAuMxjx7fpQE1Ue0eLeow6reG+Js8+SLISjxdAYo/cYHbfI+hkVZ1lzz8wxAEdCoe6LfLl4W1tbeNzYs9V8mTmSy7l2/IJxeNvhX42Ln9uODxbodje+Pjhex0ru9e//mfZKM970U6//jsvwQs99XvfJXeDcvoWL3oz399sH33/3wZdnuENrv3ruv3a7X9v+He/SGhiQJUrXqcql0kTS3JubNS5E1Lo7YpsfzpCkVFUu6gkTJhf9cEUleBESyrU0AiZlHtUv6j2js/AKS8iUSSads+bmhGtyer4ES/wFABSzbEceK270gecrWtvdGktywZFE1OLiruVtaVtVq2hmRcs/Vzl5iBMAFVadUcKExLsY17HRlkuKnsjoIFNEZbwGdNA9DbmrF0XnyV8k20/2huGbPx9xCKEyaOqCoqnqIEqqHKDi/tfAp2Nh1YIijJDpGmXZqySLqqyJoXLpvMLbdqM5uQX8ViVv9Lsy2bV8ARQq2X9JpUtHr/zlQ3OyXU4UqIquED1FyvgmM+kXcdV/rkv+S53n36p9Ne6/anuLWr5969X+0xLN/QWa2p0jY26lPXD7TEZGjD7GTvSj96ciM0u9ZJqtsuzArVCdHRah/I04tETKuNvkghMknZZHoTo4MklN5WKkN6DwbqLiifVN+5YAzFhozEhIGIF9x2UdsqVccx2pducFzl9W4QKIZBoLmZ8almXDabW6t+M2lihP+f5FJDroRADQ2oPETARdxTeKHAMZUOIQBoakCjvH4+GJlbcL9zLv7sW6L8qhu09Jg7UDEPJIxJxP7eEBU4URlnMREJlhqtLgU5is+JtmuDjosIZaISUsIUktkwZbplKmCaNj1PxR8/F9mWwgBgQwZEPWik8KFmoTwQh4mkpGMg+dyBftVZf+pZD3j9qJ1fyD5/k3bF+N+y/cvtzPXln2Y7GGV2adb+Kr1czM2+o9YQ3GROmkp5l5RQkzNaJrZGy9byO6Ox2YQt5H3eQUXrX7uFkyaXQ3NpOUyEpT0oRQSBPEQOQs+Tl9vnkeJslmlnd3okRsJJknABDesMx6nXDCHKX+mCyUPIDGQzVsEhylgtp5WOc8Am719SQQMLNaQKBWJkUPJZKTnZkESs3czeWZS/FLiyATeaMDTTlmI5LFaDc2zOWLHVroRrL3rlRKZuattdZQabQeVdXkvMdVRARygGSrAtMxYgxETOC8ClcVWL86zLA+sqQx6ZZgKHvlJ+/DedxGq0UAbGBoUnqOubhkH6kjkB4R6EO+w5uBS1tm7ixphkqsMG/5sU0/e/u9nb0fBT9p/xfh01cB1fn8X5znxbD62u7bV+P+a7dX3f3c+dZtfzU2aru11pubzWx1EQaWbLiqqHWVtJbM7MIlIFE2i1GUuAj8ELtgLeEDIpSgEJkEzNgWM0Miyu31U32QxRyJIyu2Uk4mlkhAFKCsMtKcwVugLgrmqaQ0Q6lOONEMDxewMZER6KaLg27WnAflsQQ6dJT0q6hvRF3geQ/T3DklM48SH1W4sCyfEQKPFB53r2Rfp1W5ISWNMD/Cyzr89MoIMMbpyWfKKthhJJ2pBm/t0ChmRAczbtpBE1inQGLfw4wOkA4ZEyYH+LwPGpxww9KwLLgsbob10khm/Ryxp4GRqfRjxeaIgBm8YzTYDmkujJQYAplkLpf5u46I6dQ1W9fVlqW11lo7Z6xXNv2tDf0V3PZf5Dz/nu2rcf+F2xe6mt6Qyer/b912vBdvqf2+NN+9ssjFWUbVaL3vOfYcnbEb041tcZNTeQxigalIKZjK8uUr0ZTKnAyNwnbNYY1miJ4piJNGfcjx1lVLUjHvoBni4yFiNWJuEJP0rqN0hxiF6TvBhBscsGZA9t5pMG86I3v3eDo0EZKKaGZmZmTwdC/JRiOJQx//vHXuzuZws5RcJH1pvrQHOuQZVKm/GFWERUcGjhT+nCJk1MXKqa8VTnCmIUkTqwEyMtQjUxiJZhXShFXIVDPK9nBZJGrY6DnGPnqOoUwMgxPLioeLP67Lw+pLgxkUIyk7MCVXxVqkZf7GVKWhVnoa3RlDTI2OMRBHWGW51BxjlRO3Xi6Xx2/88rCuK5fFW7uLpmZq8E6L5uzGeI/N9fe47fhQwOutd6+D0PHR+b+2V+2rcf/1Gj8A3N91289275uQdGtncZ9zJ4E+RkbwRilJmpmwsJGkwWtUtJDEVLdeaUWZyBnkK+66RMDkTkAVACSOGB1gpmJP00loYTGsSzP9hao4DzqgfFJKCpuxtEakpwQkFcoqv5dQJA0Pl+buuNNNO1sc0NLN4mdKXOhmVlACShB/6kSioB5Ol9qr7DNJNm+tteXiBsawHAEMiHJDmKEDCgwgA2SUGbrUVZ9sSFMRL2MADTREYN+LJQ/3CZW4oZUCz/HcVvPoufcxttH3SZ6RsPwVl8UfL+unh/aweiMcQxqJYTnlexusNc5iLNwBZCACgyi5mwFaSI4FxHJUUgfmasnovrgvzdezjF9E0IKZyAF68Shp+eoRvIcr/lPd9tdD4M15fvQ0/77tq3H/F7S3Rvxds447//10lEjyTug8BT+W1RBNzeVuIkYqEANWZYzgIDnx6qoodHrfMJ4SrHnEVM3KTUR5TplnIjtI4aDQNqs8o5BsCnbNK82SrKqP5AFkDzUAzJkcFH0MjQj0DgK5YI357Zzh3Bc0U2lmctfPn9c/gRPzSsOVTsXYmZF7x/A7T25mcFuWlcroTdE2hAMyyM2dVeh0xpRVfFLEVph17UEe7HVraASmXi1c8BWXCzzhXv9mtoGSlH74r2sEqoA4E6ujXZrR41u7tOXxoT0sbXFgbDFGjOvS7Fy2uJfvvZjZQGRolCpZnMYucz44a81sNVpzd5I9frCjbKEObozGGBmeTLbFG03FpLHm/Yt+8Udm+ufu/6XO/7Xdtw+NewGZrzzE+z2vXn7cfoRnenp5X24fwn8/9sHX+MZRiuZ4/fry+PKs/sH3fnQf4oPfO0l8hyrAGS2iXi8tP7LyUwnWk9xova3RLB1oRIMU1CAGTM2NxgWRyVzZSZpVTDVTIzMT4/GR+z46hrnlyJKIN5NVnTe0bbjEPThSzWD0PYLEeiHdQkOEGS8wSWcFPNqMuaINAFIglcoZQQSeNc0KUhHhmd4gIgJLw8OKTwsaOwdp2ZqFDYUSaWgOb7IULBChfYsxsq2Xdmk0Ezk8WQIMEjMtourgAWgezemOzEymma0tF4+9Lfqm+fLIHz7377/f9z2V7r7vO1KGHSNKrX5tWFfDJ0E6CnajJDQlMNGjFDSxfjoigYAvAJAAZchVaVOzhiMwBhSELWgPWB/HssTlTw50kkkfdCwIw1hWLUsWQbJSzAhDUNlH1IJJK6yhDWgFB5ZHRGD0zMBMVx4RKaXYBgkZBjKhUHIf3/6P/2m+ul+IC3SJXDRWixZe1CZHaaxz5l5EJG+Fs1l0VADMM2/m9vfFeHk5vrKCGzwlz2utZ5WDB0Tlyp1csHZwM2shxYMTehuYL8//0Xj8sMXB/XxtmM7zxP3h8gDMBGlUHhlQOcxRQhFSWIbQp89xulF3luALTfmOSj4qgeSMZdWFlrtzRy776rn/a9pPd95ftwyU2DdN7g1ywpRISwOdBmuEV6GMgNNJ2VR1rXSQ6fW4u5K9DwCtHeRISZONF5kRdzkXzJnGSMphNJpVRUACzkkNPJbMUXOqQVFFpesko0q/iorKsA1nwRxwx8OKdWVrXqixOWvxkFVTSWDJLmresVMv09xJ0tnydOdfzOun8477lRDZWiPpyLiM5+fna163bVOo954DlbP68FBohhnbNvZJODnns7sqSNOLv8OmjHPpUzR55WSq9N4zpyZMa1gvWBa21rKe71H5b16nMMaQlMccf/5Gv9kiA9IdENywH+kNBWJJeejMYQ/4Pgzp0NKWBnd/oRsccyL+sB0G/Qa+H5aQJ4b+Zefvl3K9f6cu/K8TJ/hq3H/t9i7C/qPb554cochiUxDulgtBMLU7CLorvRB2mBoXRXG+wdKGRUU6JbW2ODL2IWFZlqCen0eUqORAYpcyArQCIlJHknuj0WVmrXnoMOuTX6Gib4xxUiQb7gxHYpGkyHRHpBqpRoVB5rwsXBeuq7fGI0sekiKiqJRHYibLxlWW6bquE2pw2pDuLGDdtIof+gFH4IwHzvKubIuv6/qwrs/uEdG3OKnui+OyttaapIxwm5Z90o0Oy154+hlyuPNYAUAVzEgUChJDU6WgoS1oC1ozbwdjFbcbdv6K3jtJ3YzpYTqpu+NJyp1mNnrk4WJWTlbmKNWwkly2kWjRKgu6+WQ+ZbLqr2eCqRb3mfO6i3aQPMI97/TwVxv3Tfp5yPtH7Zc6z6/fzm75zzbxX437L9y+3M9emfXT03llze9R5lef1RjIoNBo9GxuLlkV2rNZ56e40zRZHUZOovkUCRchypZlkUony5uvysgco5aVkXEs79pEiktxEM1taeZu7t4W44FfV8LLcZ00PyzOjQgvAJEmKUwYJZRoJB2tOUktrgm+VKFmDPIyk3HmKWY4NCJwGPdlWeb00qwigSddEndq+AU6HwX5yjJSGWSD1A4CyWVdoQ1IGTLghgr3ltDj5dP68oEWE1M2s79EHsHn6eCfpHxBKTEDUplgeIP73C5+SsUwcAfElXL9vOaKK9zFD7a9GPq1wKkuZISbpZlO93reO02xBxYpkw6Udg/6SLNgRGUzJaIxEWBrb/vh206ugwl2TjLvHvlqv/SzVc5vz67Oc7fzd9HeWvb7X/TLtq/G/Vdt77rt51v4Cd00es8RVBjlpEMOAelMlo44Kl9n6gIuM5fVhIgorruhwnqzOQ/tjkxEgpQwkzPXNrNgJpHOsbo9rMtyGHe7HDGMWbX7sGWO0jIo+3660s2UKSfShJRZsXm0NBpgRmOYERiZLE/2/qZB829E6MgF9XJWzeA2qw9FWET90jLZJao5p4WI84bP2pgT4vB1Wfq6AqB2FNuyq3dJ4Y7W7MSCSEoxRkFYxxC9A2QAkOglfpMiRSZkeXD/BSkhK5EGuYOG1tpU5D/ms8ysMABZdPt6UkehruTU19HNLSBqrhVxhrgLRxGgSnkVS6FN3EegN1sUYpQ4XDhzaFg2f2mGXtFXTpteFwvcMORX/vs9IIYPZoif3n6/bnu1X8d5/2rcf+H2ha721m0HUAjJKyjmPP4cyac3p+hl2Uk0g1tVrhNNDTLSShWM9RWGdkaOCRBxKM+SPdQjRYdxCPuIkRiSm5kyVZRpmE33rxlaw2VdHi9r82njfJm5nYmK3bHMb5FpUpQoivRiMS4aMkTICVnRwNNp5iomPllV6kQlAlUQG3eW/XBCxRNwN3thUM+/RR51b8syoROAkpmdtxStlZ5BlYry21wxJ8VEzQVoDe7svZsh0wsWH2OMoYibO+zt3CaAsengTaZxGjhJ+y5zeJuolzmWxb3dIJd7zUuCy7JIihkLyVN5Zl0vFX04FK9mSfKcGv3F4DFSdUlDYQkYEJGD2Xt6N9i3j9+E5NK9PKTe8BFe9uH7tw69LbzuvWf3fsdt//nto/P8Xkz827U4/mlT1Ffj/uu1j9z2HwXcq9X2tEE0a81NppRhusBmzkAwAUPQjHa6qJM9WeCFJML23vsu0kkbPXqPQpOdJjIznKglPjEZGq35uq6XdT3il6oCrsqIkETI6K3ImoKZKGFkaaiDdGJIOBTVC6cW1UnDrchHqqpOSBFhh9+HaRlTpTHgvixLa41TGwEApuTuy1DqRMTPPKu7ezoDo6SZLd4Wt2Y+KlMgMzOkA09nISTKvBVBlGRT1fL4hhnArJIoIJ0soYAiVcZ0xnOuGm6EUSZwB2FxPi/VuQ7a4hhRWjT1i90m3nLsqXuIMaIUK8vE15RhRrPQFKsHIlvCJJQQsDdYgxu9/hYf9H0rfBr3c8fRm2+9Gi9t7j/Fbf+7z/Kvbr+C8/7VuP+q7S0a89G77x6pylFiSbbDzEyppEhzd6ZR0KSOlZyIDphAokBN4ACkjxgjVfImI0cfR4Ejo0BYCJAR9CzdmJnZuHhb3VAYfObzOGQPJHpB8LSEqMNicZbNAOCFSJTIiSVVOoowBFgqYTzoYhKLal5m4kSihZAdiAzdUaoKExa4JTTqzqrVPX11MyFh72W5Savk+/LbW2v7vhf40do03/uu9jgtafnC08knbzjJYZxzFK5lB2hRQvQTRZluvlexQwBVxUltycO4FwxTszJ67weFCefHSYxRx8cd0B9SRKD+ZeKY+QjAl6kzPBIukeZtXZYLvU33wM3M6Afv573eevRMvdwnAGxfYsv8Ui7q7xSNedv+ubDMR2f/6N793OM/aj/3V71CLe6xi3f3/9zznO31/h/73p/YPrqwL6Rfn38PLzIzc4UEDqBkE0nQTDLCpByhGF0RBqFxEUcWTmqZGcExbIzMxOijh43wiJQi5fCWY1weP/Xex94BrAvK33Tz1Fgv6+XxW1rrI2LC9NpRRiRJa22lNcFGqsoux+GlVh4pgMTuNDcDmSqRGTp1MKmNyEyetWDXw23vvSdy8bW2zdraWmtNETShNQDIgdZQ5vMoTOit4XD8IcGstTbGyMx921a28skjgtLj5TL2fez72OOy+NosTkBMArCPKOOOqjpNuXNyaY6LBlCrHAAjAiDdyJJ+zAgJWFeYz5IdCYSgBA3jupuhHTIAOKxmufAk3TUhr0QEYoxzUrv/W8eU5ENEKf0YDdsOGczxsPrlmz9/8+c/P/7pr/7wqT18g/XBLxe2ByyLeaM73fyN/3eGqW84OzBlK17mx7xCY16Ngrn9pvtP0O1wyyfKf3zQrN1//B4Fencl8aP1j16N63wrqnd/tW/anNSPbzsPmtdzYoiYGx9970f25CMz88qO4fRmZl0e6Kue+z/Y/m734a1j/gWs5v676hFmpiJYgHQOpIwwVEhyFZk5MJBUxyCPdNBQ39V7xMjeo/foe4wRkmKoBGvFMTJmMiotSzBdNPOER2I/8oQMQfIpK0+oBB2baUZrM4fEAojP1TPJxQyztFJWJQ3zQvcPo5BWmoupBKo2bMnF2P2S/9bsZRCzZpKjkQd55QgF3r8LIPuRqxkKjd77GGOMcQYAwbLXLDs0K2Mfxn1C9AcJRxVjmBZ2xqyLvjOvwiakfaSIIgLm8OLIO3AGEprfPfHXGYVn1zgtxv0GMDXia3nBee0isT5U2Hbxy8P66bFdHugL6QFaIkSfT0eGMDGPr56/5VDYr8wDvmzAzcC+tebn/i+Mmp/rkv9hXPh/Rvtq3P/+dvaqn9W9+B7y/qPb9/NzZsZQRgZlqCjbrhhuMAzFVHIv9zJcpBePZozoe+x7FsJeWMoJzk7TsM8kHTckkbVCoBLcpece8Tya8xzEuwAcHJIkiYkwF2IdcVYlLQ7fCikhltFPIE3llqIMIQpBUgUDHUBmmhlUSjhZGZ52VsOTIOBgDVrcvv2cCO3eFk7kepRxX8jCrSMiYkQfOeraQgeGDeDM/JoXcqDwPJL4y7IfOaTndGMH0HR7oMXZmUHTO6NMFqjl98a9LuyeL/vKrL+17PNshkpe01m4QwTQVrottizr5bEti9siciQsoCZLMI+ys1IicEQX5gUc4Fhr7X6SnSrLpMYLj/KVg3lvi1/ZZf1MJF2/EIL/h2x1278a97+zvbXst/H68fGvzDrJj3ju93/vPxjgUEbEiIGUKTNGZo7enSXU3ff9GmNEdKSwNnKQrvSI3PexXfsYGmNAhdUgDj/UDPsAiTadMIaAIAmDtIcw2igIJQlIClvrwhwyA5hV8qmK/NW2Qe6OxdAyihOJyByZiZKy8qqfNAkqgCBLQMrH1spzp5mkPvoYGfuounFlWJmsixkxPHAa9zkr4AbE4zD3cRAiJ8cQyBTndCAKvfd6Ijmd3xNbr8c0zxoaEWX0q+ZGzsp8ky0ja5huOyfHvMifqUGSDm8wF81LoL+9tHdzjpxzRh6A0/z2G/xyZ+hnR6pC3lZSOdBRoTxSifAwja6xavQ2FnB1ZYFoNfdapoyu16vJc6NgtOOOKmJSXR3+kaeClz7KfEu3/bqLl3xkr+99nXMa+AjT+Dds97fiq3H/h9rPdd7fddvvz/CulcfhI2EOePSM6CkEUohQilhCkSP6ntsWlXcKcO9jrqcVMbDvo+85RvaOUgs7QQsjHKic+DSQHMrCdsBJjx97tAHzOK4khy5g5SmFIYF0ozOIVPZKV2qOZUknKWJxTJiCE5Y0VJjxxGBFHrXiLFsiRdJZjBEV1PP2zk8cJm82UUebt1gzv6nOUMa9b7taM7OIiJBGJSUlcgIQMac9STHr5/Fm3FWZSUJrGaEZwLx7pmTW8eZQgrTK24ohM7pba2yNRiojErBR8LrpyGCK6Hnj2JzzVF2Jt1uCUi1gztKEZd+rNshZfGofAIIjPJnwiz9whUkRSces1Z0lv6ahXF/8lluEaN6lW4+dwQY3f2tqX3noeDNe9IEA5Eftq9v+U9pX4/53tvsu9dLQfBiQeeu2Ax/y3F/tPNssFJcYAuJYp09zpBjcN1039W1iyhtz1qYTY2iMHCMzsG2TcWE+SzNP7fI745WJTVEvWyMSjHAHBzNzjDFGPPcFisxADiia42G1xa2ZiCDKmMNMYwyg2eJAkkt90aSyJ4UsjDopTZUbSnruz0i6+2WZpBRJZymJGXW8Bb81QwgRZcErFu151BWdRadjjFHkzW1EZpbmbY8xxkCkJHefZVwxb5S7w7gsJyumMJ75vCOKInmjshjdzWFR6FImMOeGqc1Zxrn6TGY5+JKrwudVLmT6xaXePp3c+e9Aqg5ZGEw4SIZbcPEE3AlwluQGAEERHjEyF5H0SDACSqVMCmXq5rnfddp5q/d9LxDteBa3NKX749/2/BcgDzCz0l7aa3zRc3/n+H8g+vUHa/d346tx/0fbT3feP3Lbv7CMxZ2tv21jZpeIxpwIRuagPKVEU9oI9B6ZeMoTikAW8TnKykGCNyxFyFs8MzJxmRq2lDTihIORitOOSRpD1yv2HZ+v30slbwgjHi/45lN++5CPD+aGxaYZapx58zGKxylMCTCGJBVaMytgSywPVKIjFXB3Q4Nh9BgjLXl633Qr5ucYo/eOPp3cUtoiGRFLxIEexBkyLc89Es6bUGXd5KJCxlTUmbAMDGY3YsxpnWjAYdZPz5qYSV7l+Gui8ygxhoK6KuDMYZlJy/r5bjVPBOz29I+/k/54r4CWYcIk8Nx3wDOIgjuLPEVJCXrR/1v98B4lbVi9JJXpMsxuMxdJB8B1+4qX/sdJm/kQc39l2d9un7DMl9sL+/6jR/9btrpFX437L9N+Lizzo+9++UjN2FrFyQyA20LSrBn8KHeszLEd+uNl2WvNfmK1VUFpWS5tYYT13skbjJuHo0dHjnIOEcoIbBuenrBteN4QgZLZaob9E5xojm8+NeNwQ2tYWluWWYGub7tZrfoLShaAsKm/iMpfSuUhUfm4zlpImTly7HvvPSy5LJfW+xijwq4Ceu999AqoxgRZdOAtcQL0h+8+94CNpOE0mVPPYBo1lkrAQWq7M2f31hxvIe9DTO0EaWjIJMrEH48vQkKYyfyFWVMxIwmgalIRQGYSOhUX6sgtx6u14uEQv9xzp3jDqXRWxRpReu5uLXSX/iuxqqbPH3twZk6vsLUTijkPwMt0M3wwLt5iKScs8xPbVzTmp7T29hl8+a793OM/aq8c3ttJDr3jicDerbjx3hu3wfPB8a/P/zOv59bpXn6B8O7uH+Gt484uH+dxHMbhfsOsiBZnjM+KDd2bhxblAk+GrHL7YZZRBjOWdEMzc+e+MaXRce3YhZGgwVc0932PRjxc8O2ny7qQGOZqD21PcXrBgLAuWFpVcrPn5y02oB7RBu5ogRV42qGOdcU3j/jTN/jrX/E//rr+9a+fmql5rEu7LARSkZm7P3gP9Z59zwhTOnlj5maOnFpmaAu88TnodLLtw2L0vnUAl3YJQ4+xj74aJfUxeu8RgesCqvSvhFDAQKTWyzJ6H70Ekx3pYw9J+U3rtJGRSFmI0X0f7MN2oUrUTt4izchGROWLCoq8rYTG4cW7T5p42BjqS7Y5R1aBK7Gkd5+3QWpqhy1wzoD2c9FMCy4jzQCbGjvutrAdRG/WImDwCiIDWTVCSkZBLXLUJDfGqDKuhb99ep4UHsPutmPZoNIVXYgN6YwgaPKWdPdso3p1isaSvSCAqg3Lo4EHK/+IOB9v3Ijo74+7I6B6K+pbRb6PWOsU0jgTFW566DzetTkepdv+2xcYvthejcc620eHvddWTun5yQRAShkgbFakzGQVFZCY5+W/+d73m+WHb7347HG/5acF/ArL/Ivau/DLR0/6XNKSLHS40RIOipRRDTA3EyALGrMillqsh66xgDvM0aKqoZp7W9yAvKy2NHOTIpUZwFKVsA1hELE2LouTjFA2N4WZS1osV0cEvnsGiTHwsOLbb/Hnv+A//uOb//iPb/78p0szLQ1LYzNE9LFvvWvfxxi572PfMwJQlCJC/caqEKWCMgIBNXkoM0dHZh9jD6eZeu/d3acQLjmiF9Ji6cJEbIQgNcYEMQ76400qHbJixbhTrIz/qbnYWivx9NN7jaHkIIq+glo14QASmt9c44iUSFMCq1+m5EDUU+EZFz0eLoDzgxPiL18bEGmSUBpqJRqTR93DvIVSJ5oSJR+ZJayGCdnPrzgrRuWMm2Yc8V8zO7meAPxjB+UVuoKf4KG/Pf4P5XQzi7D70RxyN6g/ttN/3ze/9LDf3tWvxv3Xbu8i7z8dkHFrjdFZZaVlVayDam6mNFmKtko5TNndttHHFEHEqLI0BjIvazPk0nhZrZnSyERV+phoT4LEZV3WdSXt+fk5XFAx/EiJkIQ0u6wJ4vEBf/rz+h9//eb/+J9/+stfHj89uDGaoxkyR98yRyWaxhhj37VtM37rnlYq7IepurGzEz2UmXsGAtGhjmaJ6OuSO4fbqNvSIyJ7ZlJrJkaMMu5A7vtNmEUS6UXwzwB5Uh6dTvdy05u7J5IzBwpSxlAgAbQJXbCEfqe3CkIokOlc2pVgjKy+OmNMKoruofnDsy3LjqPO9Xz+DWQCRmZxXkgH4nTbAau7dMwrJQLhylF414mWlL9NmLcMIVXrpGxHQuZ0nJG8db0EvKhKkFi/9Xh59OaJ1VRsd/7/jUzYq5fzBv1hrPxZxQYF482mA6qzY+cvGCR4dRtP435vOL4a91+pvYqLvvJ3Poo74aXbTtLZgkE0oAN2DKqobE9BJM2wNsfaaHgca0S6j2XJkYw0iQLcEsil+WWxpYFaDWrOnpLUmVS68/FhWVcnXeHMjFnRSSV+AEDNSK6Lf/Ptw1///Okvf/30lz9/+uaxGQMT/R7R933f+7aNfR9jREinCZte5wtSJjEVV0ju0WLv0ZUD0YGBywJjfgpFKMYYLOWFVAhCJcUzhSgP1bOPkcVxrJGQo2ffe6bMLK2bWWtWqsEkvRQfBzRL6GlkWcMzVuF1p1GyASp5yOkH25T9qUUAeu8RGgPjCCpISB7BjwMLVNa6HVay7CUtGTCaCICHlU4Ad5Z9HHHXG+6fmRkgC9AhENW/KmS9GCU4IHc6ShYYGUJUpY6KeriyyrJWfZRXDsfZJ996JEcE/n3KwCuTJOkjGPN31u7s+7Gjfn7OFx+oGvz9X3g3Wd5Dyve3/atx/1Xbu277+RbeQPP3bx3mfSGHszk8qsuEEgrKCCKJqHDZ4mbwv/zpMUJbj4iMRKb1yIjIPmhYl3xcsC5GWDO6s6iBJjrgzk8PPjVSFjoYYcaWmZc2MkHyQb6u66dP67fffPr2Tw9/+ubhspq7YuyVTjX23nsf+7bv+xhReoRthS+MUeA4lHDnTFk1wJAHsSR323f2HdkROzxhVa07mKkYItIcVVgUN3Gxw2c60kdLeD0TVVlw33fAWmvt4caQqTgHyZLiEUFQdDMAVb1EMSvQmVDaxLN01Z0EI8yQPsGZ2JWJkTN2TSINfswT5PncMQQJCwEgA0bIWDe5BCalglxmvu0Ei+b2/PYKNmRgXeeigGQh4dP0N5JwM7TG1tyMGchABhVCQIFsMkli5A3DfdPeri+/DC3ef+oP4rMfze5+NA9DXn4684geSPylkZkvt6/G/Vdqb836Wy/mPBJvjPsNc7fWbHUf7guSGVtISFgDkeWqOUknV/ekR44RZp7pgPXI3tF7WDNQi9tlybWREJnuJFjEvDA0bxeXW0aEaWuM5uaegGldKgCw0x4fH7/59tPDujw8tmUBsY/Rx3Yd0ffr1nvPPqmHmSgdQjMjzJo4Ur2PLl8XjdAIAeOk7Qewc9sQG2JAOxZiJFLeU9azcwBkAIAQJEfmiDGKzA+VW02yLiCGKk13jOG2XC5sD/bqWYyRPUblWAWAGRpzElL2mV5PSkVGLzlMzvp1qHSB0ZGRIHTQSUsDEg5TqfhW0Yz5rM+iy0qg6q0kGLWGk+kIMU6mjQAjAbGyi3XHxTw5TqV8cPfTpnKZDBQxy5pLEmIwh3IoAj5EV5pyiMu7XfHdl3fO+zsHvMXfj8N+31bebrWksqhV51sl0lMGnZPj+4utVF55gafRuL+Ar8b912sfue0/BXA/nfdiYLe2pF+QVKRyyygUIWAiwiAgi4yhCvcAcAJqTkthxLKYlIvr0rRYCGFU8xZUppAKorVcWihT0S8LwkCysvWVViXucvWHh3VdYdalnh2j9A/6OHOFJqWubJyv5MECEWiD1nzRPsYYOQaqUjY4GZbjqr4rdqiDARiUSFnfwwQKGSghvFqeRuxjjD72zCTlbgxKOujtMUb2HZm4XHJZ6KAdgFYMpbL3vl17jw5YEhUC7TkLn2avPK8krRJrI6ipZgkJyJlGFHNmAAAzNAcbzbySZ63VBSuKYTU7xmQ0Wc56WhlpIiV3PyBcFdY/K8DWuv8kkxyh2syoCh4HTlIXw5FSABYQ0ojFuSwGZ4RsmIciiSG2tCQHs563zFh+6D3mzjvMfUaGX0KL947LHxNwxwyozqCFSts6U8HJnwEAIe5m8F+g3VuJFxbj7qZ+Ne6/avtysPQjrOZ+290jmttiy2AEvWUfyjEyk5kmZ9n3TCWlpdAZz1IQiJDlcJhiiNHABnjZfUNrGD2TCVMITjVkKJjjL99+KkilmCeZcLdlseG5+nBmjOj7GKYxxrY9T5nfQodppEFQJpcGIGWSUhCNBiKfPo8xJma9TIXblLB3ju5jJAKWcFamkfWjVJ4UFsCRKpmJ3vfehxRm5ke1oK1AoY4IFMvTF4p+3G8r3H/E6Hvs+z5SyaiiUD0xDnl0jco+lVkCxlnfDlufzJl2VGKaj05glc9uOIrzTToQoJxBSAEo1mUqkTU5z+eumrXtjDjbwb5lzbVQJWExM8lgLR2GWKpAB5uXMJbIgaABKdJ6G0GlIZFhGsrd5JAJgZziwm+d7vv2Fmb58vH4Q1n2ahWFfom5H28BOFdS1C+5UHkXFrt/2c6kg9M3/PIZf+7xP/3Kznf+vhO+avdBBrx3nT/x+l8d8Ao8edteHfBmEfr6tG/HRm1XEo1OjxczySZ7mJkvrcWKBUhpSTANzbRLPTKICIYTItzZ2iIRKScBLMyGuF43o9xJDaa1StNEKIPAujgW5KS5iMB2/by09XK5zLJ2R1svntnHHr1vY4zIUaFNM2YihiatRhRNtH3Mwqcgpdj6fr3ufUfvM5oKg9hCiGQoMnxE3zsYuDiWFWy2j2zmQ7FP2UsaWDqOdOu97/sVQGtmaVNzYGgM1L++g0BbcgTL9yyhRhCwZmbuy8hdiRRCAGGGytBdL4XjY6QOxcRScxwgjOdDxOrNHDr4PAAyBwDRvFltWJGfM8cUB0NGMeVx0GhAqoBxWvUN3vcQWMG6VoEWAFKonwzLmfSUmaXq3BZiKAU3OBnRo+++LKGrETTPaKR7jDRjMDhm+dajN9pRpursrm/jQ++OizOx9jyypvDzkOPIua37/bcXb+3Dl0D8H40BfHn/Rwb0PCwlzGB4UCbJkLV8oSIllLwoS+dHGS/u1Uf24UfbvdjD2Sqf+Dz5T/Lc/1jT7G+6veu8v9qjqm7f3LJZLBaRuQIDuQApLKQAF4O0UG90MwPDAEVq9BjXKvDhSBbGa0azkm7V2aranWDAZfFlsXWZirXnJW37dsgVhu7iRZWtIyGSEpWMUIzsTFJAZOboue/7vmOMyZwpRzMHMnPbsnfEUx8jYsAPgEnJSIxUZmn/xsvFPiJijDQHK3IphphkKEdiJEIgkWwyz+xTP1JWki8J1ZEJjIJHVHX/oFmwtuiRxUGRgQm0ZgCMcpc3W93c3Xzqv5/DODGxlSFAAVFiigfXIr3B3awZDXQdf6d15a1N3hLohwdQSweRMEc7CDlHyNbkUUy8Ix4LKTSh9m5tIRJKxaCNDCM9GoDLW7vzChP46R7679ed/3CSQHEAYKSQJJBTmvqsYmWVteHlvtz66um61cz3s773vsj760s6dn7JuL91J7+2f157F5F/uy0TJGt0zWDXTKwPiUk6VGVQeXhbQyx5rcYileTIEdMO88iEnH6hGi2VkVUtFMTUFFubN7eqDwRkaqae931T+cwzljN9ywyEkLLKnIzMPnIE9ixB9Zk5WVBMJtxRnqN5y0Af2nZsV4ynqwQmvFVM0ugGoEdQQZ293w7rGRJoaEaDqkZg0gIKIIAhDBUXxWRW1WEPxKOGHCGLmAY9USOYRXfokXc5kvM2G9jcgKzidN6suXmjmcXYy0WlG5jIIm0yD5qLaJri9SCttRKlqayiQ1HSDMhTeOD02iSBNwo/GMSs0K0jUQAofiwkk4cwtWikmukj+57NnKQ5YtCbIolMGx6sK7vNCSh4nffb5wFv0Zj7Lv0KiL/v/LojgOtnShH8Ou3V2Lz9BOKeB2nltINMqWimJJE6FP2P1ZU0FYpwnvDd7/0oAHsKQuDlCuB++0Pj/ha1OLZ/Gdjka6t2dne8tO/3+8+3SIZJpMEFgGsSLZPkvqWsYOw0gOwyJ6X0RGWNJ3VGZdF7FevBnTNYZllAKetOLaqinGvmAY2jBt407jEKLy6vEkYoCVrEgJAwwSLV0/eRY+TziDFG7znGDDkWIzAE0MwWYQ1FH9j22DriChJLm2n9rTV3MyqzZwo5FdCgEm6ZSUCtVSqmEVYG+lg8ICYHBqLDVun5cKHOwQoAmeia5bp1AiRQHHruZlZLopoXU+EgCTM2gqbTOSMpgrxRWArYwUQfKjJhRaAhz25QU01pI8uM5vDG1m6qYUrSGBHAnOOrlrccUqkJ1fqpgnl5P3g5JYIjRs/h1hZEowJKImmoeevUTb7vh+eel2um95OVXr18vQx9cx58Ea//pdpHxvQj+/b2eo4rBw7/vaZQnjXF6j9VwnjVOch+BIHu3XYcRv8nX86LC7sbvzPAUts/DsvcTwVf2z+pveu2n2/hvi9aQESjWSVLMi8gGdGVSCTSBYELLEUYFtBCaQkqSRbLpURacECfpJ8Boep15VCf2ZKZIjULXGTeQa4r77pIQmEA6O4JKTUCfdfTtl+fc99xHXuFNCMAojUsRHO4O7GkfIy8XuPp8/b0hH2HDzSHoY6pgtEZSDciMSrUKRw1HxBEq8LOzdIJQznAIQUZ1AAGIGCAAzcxyFub7OTD+WXJcDLJZJYQjoEkk6CUmorFxWCkWSYREwcPQUq/q72Xh4Jj4tRxK7fOABYuPxPTyjU3kWlu1TuOf5Qg5qxlCrIANmTlWMVQUBhKQDE0rxQ0r6Dr4aqJmmC/Ial00Axmbs3c7frSuL/toq+sM+4M/Xnw/ZRw9udj5fHheX5T7e3ArHab7RH3bPeCYnRoKimi1sLXfX/3/B8a94+vBy8t+9tb9yOe+9tzffXcf9n21qyft/p+kJz7S2FbtV5z+GKL1jCLCKTHQAaJDQhY0sFIkJYxRbIFslSqym0HyXJND+oGIVMp7pbI7wuskLyrqQYAfKjtJFJFTYcSdKciFfsY1y2envP5CfuOPWauDcpnR1ViWr759KdMjK6+j+06tiv6jgysRJvVmtjMz+p6NNKtMjsVOMw7zEGHNYAm8ADGEUJkIUUzNLr32PaeLedclUniLPFR4VXNLCHOOJVK57FoLULMbFsIa5FkAFOkSlAxrSaGQzoYM1v0EIEQplhjLXdwgPKI8wLmwzHSQbeSHSMpqN6mqdx+skx/1coolYJaIpQBnWY0Ac1iu3DOacqgKsvn5WqSpPyI3L41yvdd961L/pHP/u7Om8t/9y7wm/PcX33q9vEptT+jvpzxDVVWaiozZrWDew/9zpz+yPXYB8Jn52dfmfV7o/Ejnvv9x36zk+rvvX3ktr+y7HPboEwBYqMRTl9oIB4/ZWzWEUNKGYa5DHKDgVTQDBGIqXV+gLkkvWz+wcilxKmKVTtmao7fYzi3chl4BADmFMcCI8cQFDmCvRImQRhsQQMUU+6qknoul8u6rq2t7ot6hMbex94jBG9oCz453PFw4eO6rqu3Zu6BRDJllMFmOmutMGhN1kBn0RSR0173GCM1Dmcfga33676NdfLxvYIJmpWPrOKkdyNOSbFc7TTNIiFpB8pzN1myhAzLo54q6FLe2I0kzJBZodq7h65Z+OJ4OnJHa94qymo3a3u/WCJ1gPJVrqo0xQzITN2UajCjqyThMoOZweBEM1vMaSiUSTq08mmkjEc+bpmSVM3ubwF3lG/wgS9/bp8b5bm/fvc3aWfi0KS+x1IAGGf9dz/SowtWyqwKxT01ModTZiStrev9ec52lnV83X7sZryCyHTHyPxJsMyPHvO1/YPto0XfO+9WVB6Cour40KgGz8UtDUGMjADlJlIWMoenEybs0XsM9R52LMxPezHPr9fdTmdipF44HbMKEhxAwDjrV0SlQYbQK3sTZs0f2LxlJnpf5u8wLMuyLL4sl9baf/7nf4+u56f+9HS9XhWj0uPXx6bmXFe7XJZ1tXUxs4G0PjbUatjI0usS8hDOrYu8r7Q3xk0hoGD63vu+76e2OzNJO6uVkhVHuNn3mu/GUBWuImlu7k4veRadbJapKzClm98+TZRFNDsILeTt8RpK795M5pxLljYrvZLCGQoHDz+93G3zxlKRTKblLLpCE/OceOr5OTiKmGickpDuxZlnRX11GHe01577K4cdL233l3f+Ix/517YTh7xfu0iCTTpsHjNuHRAROUZGpMIk+KQy62646a599L1fvofvXKHbaevbvSL63d/zk7zbP43BT78j9xf3aiXy4e854s7HQme2O6rQy+vE+3lfH92Uj67n1QF3+3/0+vnysBfyEW9/5pfvw6vLO485Xy7xKGkpDmMegHDDuiw51Pdsi6TFphXIy/49mAZSjI6Ijotd9ACNi896eMY0AQqSPf9KhfK69+u+ywwPj2WFL1VZz93WtbGVY5h2+WGMMXqENGAyhCkST3smPJdLUPueV42RyEw+fG+EOc0fZZdnLd/vGJ/ju++0X7e+XZnwBUYsrrVtvqMRa7NlAd2DDElMu1wgcYilyBUqweBLuZk9x9iBVpLrkdZ3Jig6LDv3McBx8XG5Po1ml8vyCbQYmTG0wzra2pwZhqGELKQq70EuGZmKkhszMzFTKSINMMFVFWinj7wlCTrc4TOaiiKNZmBUWRUJSLPJ9Vw81xWPl2VZ/KhFnmPv7tZaW3xq4EipjD42M3NbjDZr2UqQG7JZoiXUi35JQxOMV61TRIyWrYEOKfZ+ZbGswgqQcgyoJR9AI4WSkpclMAMRVaPK0myCOQCIhS9bjdwzCHy/7APQNMOAd+OCAEuZrqqwo8K/BCqteQ6Qc9gJZTZOm8tzHQrGLbZ0P6xeMI7uwI1t+35x99YgoHqPAuT4/LmOyRmOgbvDzWOZtVMypFlxC5L2z4uZXxZgBQ4NChHWq6fqLHZ+EFTtaJy+f2amt7ybCc75gM3X+ujd7EAAtL1+jtF+rxmqv+VJ/hdpfANo8vC062UJhNRYEODeDGm8aG3UgCKzslYdwZE9R8/RE2ptxar9+nlUvEd0kkgv3VpgZG6j934skWkw3/cdSHPImIQDpLtzVKlrsPh8gcnu+Pbbb0eyJ8VIpcybF3ayN1+9Xdy+GWFPn+O7v/3w9Hnfn/exJwMNs1S3HQmf3uBOmngUdCA000eKt4O5wKdNDQDNeg8j0qrk9TaK0lNs0LKwo2qTYirZkEZf2oLLhVFFq6Tiv5skeUztSh5jGAASTAjLYqTsiPqeYQnTXrj5fHol+HKAXTqEMM3gS5X3i6kvlhkBqRYKfNvba8+xfrp3A6kqR8tZ6k+aaatnHa6bZbQXp9V8XY7UrabSzcH8EFjHy5df8ut/C277OIqN4BhrdQEPDw/z1qeQyjG27blv+/V6hdHMkllVVNydzR3D3VtrxSQm6QaSl2+/xUlLiNTBLtu2p9ujuVvWtXU9OcLnKgHA0+fvSRZ2OhdaJM37fi1dvJvaMElaHTknzl/tbv5S7dUK8V99Of+sdi7xzpdz+/zJs1+izLwk0s1CEpKRU00FCmWlnE+Ed4AJ9pjRvKSas00Ilgn1GHuPHgDQDABG5uh7a7Z6c5VUYbq7WWut0UTTSDClodJzrBoyVTaapB/M7db+CjWhxbDrs56+23747+vT533sYICANzSHEY1YgMuK1rC4msnLrAtCQNCQIhWpAQoOgocsweEjl2UfFRyeCUMV9wSAzDGy99jHaGxszmZt5SrmdX92UmGVOGDJWaRUCRzhVYVuS+yc9zMruGsAaKJNI44sPLq8UZTsjxWUVHyhxd29tYn0Z+YYWZIyuHN1j2ZFbzS2czjg8O8wcbMmRUXwzHJyImuhkTOuW57yYSPuz1+/0PIoMl4G8A6/xyuDjjuP5C1689bi4+X69Xzz1VTxT2qFvN0WWDP4nFb1GntHikL0/sMP31+fnoHiOjKgwrmOYoetjHuzioqouZvhcb1E9hxFkqnsFEnqOWaGto7QBSDp+Yend+/MwaKpjkYhqJn0gGOtcqyZnGTKzif7uzHu97/53r7/zkXlflJI497K6/zI/GAlqLiZkEqYNCIxeuw9+tiRA6xInok2UnvPfQtyGciMbO5I0lpVMNtG7JF7KAR3JNBTo/dMFKkiMfOtJ5mDzR0wQpWFFy416IfrPgIjCJhZazA1SGh2uW796Wl/+tyvT7o+x9jgWpRD0mp4cLs4jdGMF/p6Ge4zg2kWWD2Q9AyMQzQYKBy82DqQkEAMDKGcsGWZha4hwaFEIHr2533YsrVsDoMv1szNmKhpxI4swynKJmQcCd4EqNPHEipcUUZTqSChROP0lIuGrxJwI0hzaCSadFArz459Iv0ldSBO/Z/q83ed4S6z8ehI8+WZiEDSrdlk0iNCZlUgsPAVophON+0EK/99/n3ZXnnuumO54B0b/dqyvzXxOkmsxx78Kjz3dzGQzHz+/BQRRXAkMvY+9j7GeHx8lBTKzDwqEgwzg1r5Lt5oYGaaobn/F1DiG+ZYvN1ylxBkAuMVTIQ5TlprCw62cUSY37x4zpqCCWBdLpKkoxQyynfCiVIL+t0Y92r/Jm772e6dsnsT/7a5exKODDSHcFllaqNZjKLHSBGpAU8uaLmslxyhKGYLxywfnbFn7z0AOOBIcg9p4HIhzFOMtJSoGUCMPjNvAozMEdxHjqDRzc3NDC1pSvaIiPjhh/H0tH33t6cfvt+uz8gBpZnMwpHRYCvX1Vgl+i5tWfw7MzqBjJxjcNrQGIeYV6ENJtICMx1/5BRSz0RgFvFAMUANCfQA+/Xa0HJ/xAMWtmWxRmgLZCgkBVV8RVK0kgd5HXUqPkzphZmfKu3z8bXS1T0STcWKW/rIAGwBJIYyhoYyc3gkZ7HuSeovUbPTOtyv2Wsa4CFGX+v3euN63ecMcUd8woGW10cSxaLHFNGXJaeFL9JF8iZgMoGLevfYc/RJ3c1Mr/0wvPTJXvn4twt7w7H5p7bTrFc7y6Zfr09FGYLUx4jeASzLUjnAPcY2eu99RBRKs7THiJgCPhElarQsS0WwSC7mR35Gtclzr0fQWquU42+//bZ++xjTrFecP/W5FmF3s5GTfL4+4U6NogpVkgSW8zf+boz7W7N+9IZ/8YX9g+2j3nw/ou731P8lQVl+Ur3eR0cqFQbQvR51eDT0fd81YovIBK21B2+XB+2DGCP2jKAiIvu29969FukHs0IlLs6ErQXmhNIcBkNajtJIMrcF4EElVwLmF0uNoVSGuI++bdu2j6fvcL1uT5/79Rn7FbkjRyLyocGASkxy88VspV+8FUkEWVHLLK5L1aSexTGOknVKJLNjKQsYypRGTi9+RHFpRvH/YAhhJAIIKC1FqUGmAY2MQFRgb1bSIxyQYO1m349a0DijgG8fYp4HOGAwmz/IYDK6TASCocEZAnm3J+jw4sEKDxxxxaIGldjZncHiaURwTAnVf2IAQM4sSi+Sqyr+TrgIWJIGS5odZZjedtRX4Mm7XteXYZlXFv/e5cevwnM/h9jpJmfmsixmRiHGQGRpVkv6/vvvM7PH2LZtlq40mtmnxywWY2ttjH59fpa0ruvDw8qpZ6fMrGIDZnbx83n5sixKUzMz/e//77/P1UMtBUqhr62Ou7RBkufijKTRa4YoIqwZx7g9r9+Ncce/mdt+76Tfd8QZPDwV0o/VcuxRrJg0MyEUe48x9o6IrgGHrUDCpEhE/rBtuY+xR2ZSyIHrNfddDSoGuhmdbGZrW7xR7j2jK820cFncBU9hyFou7gsAkalMuKgRuu79usUeGYkx4vm67/se47GE0Q1oRBjKPW0GSzBlGZZgmBVPBFPHvLr3lKM5sBdg1no97IF3sEZTJY3EIZ/rqPghrNFtoiXmGMQuXMe+9J2juXMbfRu96taTRxrAgUTx5LQpI7JOYoYxXvoZx3Ym4DBOae8JsigDQljWRoEnS2vr4rnVg86AVBYBnMuRJHlQ1Q//l4Q5vVkrPeFifIa5ZyZU9TzmDCjpkEoGyYTororbVAIWLWmQCUYVm/9m2evbX3tXLzvtK1jm1V+8nCTOXv0KlvmHR89Pbecl3eLSHhnR+9T/jz6q2szzD597xr7vz9v+dL2OnFDgw6Xv+06qvPvn52cAl8vlYb1UzkAh+CQvl8vlcvnTZanuY6btGqf6xd/+9jczW5ZlWZZ1Xdd1LePu6+fDO3fetXVdUQLRwBijphAzy/zdBlT/Hcz6fTuXjTjN/cQ9eWfcCcgPloYxM0ds+9bHtu/b09/cffXWLg9IjN737Xm/7n/77jmroh7ZaEobyiH0MdzRCpUJ47JweVgeloheouru7s1hS5oD2DblkkEA2Lu2nnvXSI1UH7j20feImlAiMrEsiwbMOghvWA12sSbjSEQuhDMKD47hYam86aONHlX8YyaUTDtAcFbrkKxTEiXLrKxcZNWWMMDgTlu4gjQl4I4e2IWnvtnuuMpbu/bt2vfJ+TadxfDq/ptPAU3JKmOoQtFmh8iXv7BfRB0A8wWlGCgFECMiY0gS6NZ8XYrNPoqs0SMSAW8kHTMzaE4LtJNSnYYFd0y+jBI4DjMDWJNief3VegI4JOnnNZqMBziDYh0JRniBMzfLTh5JmK988Ned9l0n/d2XX/jgP7WdVEi8/IHb03UfY3u+7vuOKKCyb9sGAIExctu25+fnfd8rffsH2/Z9N7OHh4fMvF6vAB4f47/Hd5WbUMZ9WZa//OUv3i5bH8ez077vz8/PdbZ1XR8eHv70pz+tD43e9hFP122M8fn5/3b3erfauq5mnnPsD40XP+HhgnMCmMb97Bznxs/df+9p/qz2wnJ9DFO8CRzNjapY/+pseG/d9/ZX3B//kbbDG1Tk9fW8uv6jCOfrS33Vje7fffcr+BKUvG3r5UeMDgfcFpgZkWP0vcfWR0Lelk/f/LkwxN77dn1+ftpG7wqgXdzRUhHaqszpUMjaYnDnsixrW5bWWkviuitCRDNrabgObN9vtYxd//QfXemxi55J2sOywmnPf/uebXl8aOC2P133fRd9fViev38i7dM360OL/Rq5JUcSuV68wVrpjLvcQGbk/vSZnLWkmOmZzJCi2A5+3J9zoIqrk0gMERmIwku8uIZYVtoCwHyJuoWNSMee+GF73hWtmTsvnx6rrgZNwMR4HKLB4ZIyxzHnFu6fJ+YOIEI4AJMeWB1tuZjZyFBh+WSKtvijL740wqua+MjEzFQ0sykZOMYI4cHWadNx/AdMMtx52JjwC2mzquAYYxyakQCA1qqURymsjbba+uDeHmhLTJtubgvbYr7AbRweRs3N57Mws9exhzs7cP+XB7T9dkwBmCD/nT0pkGue4ezxt7jBWztT33Us5V5+yj4Y77fZ2oxmmLp4cb0+ZaY7L20JjeAo3OPp6almTad9unwya9d92/e99yvJZH63/dBzAuV/+/5zPYjyppdleXwkf7he9/w//+Pbbds+f/789PS0bVsdQPJ//I9HsY3k95+v23/+rd6NiMdv4s9//vO3f/52fXgYmX/7/hl4Lpi+XPuImYtXe9b2XWut6i782p77aaReGdnz5Znm+4V2f4bseb/zbPXT3v2un9XuIiE/qb36xo82zpf38bF3Zya8hGXu7s8xUQkAluXS+7bve0QXuF4el2XJzHX5FHs8Pz9v+/bD0/709Bx7KPLh8ikzNUIZcjBgzWDpjtbasq6+uoTnfR/7dYw9cjjhjau3ZfG2uHOhMdSMC9vD0loKY2TPgPjwzbeff3h+uj7vPdb1wZfLvu/7PtaLjb1nT1APq8FMQ+hyZCs4wIpMAlkkgHjE9NyrKDZKxv1cdJK3gCHJMQaYKn1MwQ1w0LFe0FZfL26LHWh5AvBSUlxqjdLYGp2tUHWmMSQV0FXqMDoM5Wmg6iFULigwM2BzJoXKDKND2Mys6KEFHy0PjXS6EZ4EZMWLuettBiRR+Qw4uoEJouxIKJyJp5XNFsrImHwTSAToNNitm1mMLoEGB+hGt5As05qbmcxhrQR6RCf9MJ3v0BN/ukv+U9qvvCh/oYd+B75/88032/P1ae/X7Wl72se+995z6NPj4+ZDEmRgH8o2xjALAELB9mPECY5DFsoIaMTec+/59Ly31r77z/89xijQR1Jh6+7+fB3b/vlv3z1FRO89Iopq+deHP6Xa3777/N33TwXHl2IHAGDPjH3fxxjHfkZEi0igRfx6xv2VK30XH3jhpX5kTD/y3At1etcj/rmX9+7+v/ts9z/27VtfWOi8euuVX190KJIsoAE4EtwUoYggua6XmtJ779K+R3/e+7aNlC3tobmYbOZVL9q8M2EL4elABZRE7pGx923ft+etjx0Jb3hYmJdGb6bmbTH3HmatrbZ4WwkTR0Zm6NtvHsEWsNZDZCbMGrkxBjRiyA1Lc18dPdO7BZEwiMVinFrkyFEDp6g8laNUDtcsvVT3hKR5lbspEBtGoLzpBd7w+Lj46pdL5XImKnaItOiTp9yWtq5tscWcFBRkspR4Aze/MOZ0wlNY8faoJnPxLFFtVvU0ETvo2ZpZc4MnbWlNMEkB5iF6IKloqzVPkD5x/4OXnUgWL0XGmQfrmajiJyfDovqt8sYGOZCZ0/LAis90TDn1rRQzoVRVQefQR8BtmfuJCf7DzPT7M/zjZ/uJ34g7y3PGVLfr5957jkFyWX3xx8uySsxMY5eUMBgzNVoskWbToMcoRvuh0FYJDtDIkNBjcAszY7+evpqZLQtT6cn/5//93/cWY13XT5/Wtjz88EOX9syMnLydMu6Z4y5/Ldw9kiMg7MuyrHv/NTz3ezji3uS9Mu7nXf6CMX3XCy4xe75sX/jUR1b174aVvnyed+34/Z6T4nb/kbfHnzuPmmdeaoGSivh87buI5eGxgIVU7DGGsn/etm3ve7otjw+Nj1V3OT9//gwYXbY0J9gdEZLYvulj771v23P0PkZXiml95AWI5QF2ka3gIljKEi3kIR9pMBchprtS/PTNny4P335+fv78+fl6vVrzT8s3/fP3GYZWBQ6i0emWyyKNQ9cE0k3Fd1y36bYfpZ+r1qhzxlmlkpYUDSAvS/WlrFwmOpYFXPBwae3C9aEtq8Fxiptnrma2mLfW2rI0NzeniRlECB2ZARms8jyZk2p58lWqlSQyUKA1qmqH2UxcEtHcl/WhtRYgAwIlDikmTwORiXM9oklnJmeif40XA0ibFl/GqRN2Y/JN+px04CcojcIyPNKU/qzOlNDBCaREBeBggqFAmhJG93v36yNn5f3O/1Pa/MjLgOov6MX/FKetzHp509fnZ5LL0h7WCwAExt7HyM+fP7em1tbWsix7ay0zL7mMMaCe7GZNROkE9RDMjL761MSf37usR9gMAvZU74Mjnp+fTy/ezHJEXrct8vP/+u/ThLrzKNWSkh4fH//0528fHx+X5TGAp12ftyuxtzbWNX49WOatJ/uRr/2SxntrH8IdH6Am91b+p3SUX7Az3f/S+7kN73W1t7DMW3Tyzv9CJTwGiwDBKS6RZGVENwK576P3bUTPhNKN62V1tNmDt/2673vfRrn/dPoBOEZG3/oYsW19e+6994xuSjC3Z+gR64XgKiwhV5jJ1k+f3F3wSEZG5eOY8fn56u4Ja21dltFHaMxqos1XrkxEbLnH3uQOC9ihkYpMHHWKkF3HWABZ1ZpA0tpxSwiyJApgpoeLz0RQIQl3tBVtwbJyXX1duVzcvPzaJDnUvHjibFZ5tGZuhILZUYWiIdBJpZI5qWm6H7EkEGUiyZwcFjN3HwwCS2vr5XK5PLp7D3VmRGTRTI8I5atuo4mHVAhHEQW21M+fCm711PPWjo6SPCr/oIqUnj2oRCIlhFTVwY0NvrA5aIfmmQPM4wuPq7r56bgPI9111H8QlvkHz/Cz2r22jO6SmP7ylz9VSESR2WPrW8GJ5/21aWe9+ZqOdQVgEVUmJY/VmDh5slYBhMRRLHbO9kcBtfnM0pdHkiGMrsyen7fjhsTp5hfBudYJrbVPn0YPftu5XmYuyxhjMbbWlqW7+6/hub912/FeALPu8pcx7rf2+l5b/L5nvBl7c+OjyeOjGf6j4z9qlRT+5Xnlrfn+wsb5Q+b+3MpxjImqlqTsEazf+77vfWy1amutbVsJCRmriOgY27Zdr1e/rXiU0BF8G0/blpmR0YMxNHqMoQw0ImSwi7eHpa3rurbFmrl/+lRGJqQMJlSojtuy9V7hpsdP31weHp+enp6enpB0gLZ0qEo5B5I0iUqPRISGlIlZBaHmb01H2L1qBtLKzCM4uSizYlRrBJCTyA13tAXtgofV22oPl7ZcmjfCCldht2+AaQkbGykvLowIVolUkyoJxQ1EuI6F/PFoDIBPNb4Jx5+zUWWpt2VZlotZq3J9BGAwGB0MhCXzxmWej7tsAlCE0ImuC1PP/SiqxeYadecP3RGzE6nTKSxVxRJVCsCa2Uje3N2aW2u+XGQX+EVtRWuyFeZ5iBx8sJp8Z/vntnMye+n+/xqe+9sZ2swunz5h79frvm3b2Hrf9t5HVkEWVUmW4qS1xSLcGSCPKr6VbJc4zjvrqNT3DEHSkfJcSXGgeQ3jtizHGIwjKk6S6/Kp9sfesdeDSAArFU/7tf/Xf333g/kdHDJ048j/InfwR+/vK/uODyyXpB+tSPLKbo6Id/vBPexzv/GRsf65gdOPOs39YLi/sHt85tVPfrsT792fOYGjCjpDCZmbNXc3tjHGPvq+72PsQphZUfS2z8+LlQgGeu8KubeHh4eyncoRkWOMHjPI83Tdphff94hUTKWry2N7eHy8XB7X5XG9XB4fLsXKunLU5YVi5MiEkCR9XSwDmM4MjwpQO71QBMDcFrk01HsArZybigrGkYa6tmnWSzPpxBnNRab5pJmfqaFL1cQobqShrVwefF2Xy4Ova1sfSmDYzMq+0+1REnLWi7DTK40zCchIM7RSMkuedrPcLs4SdiW0mKecDiZvaqZ82hjZS6GQpZLYAAiGRkVIEUKklqOf3D36WdJEh4yBHVdZ31vK4QXKl2U/zxBx+qZiCR+w6BludLbF2+KtodROvIW7uctakkAhADcdc/LWP+8H8gnX/H1WXvoX8NyLQZR5U1hblqW11q/fZx9jdElmXJaF9Bx6jq0inJ4ylbCSubuZbM6prZY1Q6mhEtvUdKE8qQmhnpbwXKwAJHvVaxJg7t6OfGNmAmZ0ELOeItnMMNL20T8/b6l7BTT1z3PJyF8tienerN/f3Fft/+fu35ocSZLsYPCoqrkDEZGZVdU9PT2zHC5HuLLCfdj//3O4srKyJGe6py4ZEQDczFTP96BmDkRkRlZldfX0kCbdWQgHYO6wi5pejh4FkEDRL7RXcjPjDLc3yhf7IfFKvn/Z7fPL21vCPaJ/xnf0yZH22R7eEvq3In6xqeFRqLYsACBmP/74Y/PeexfhshrJWltr7X7W0U6+uYzCl1LgHhGtR7Q2VIZoHr3HobXeLhlEpZkc12Utdjws63ostlIFUIiWsqzr4Vx98A5TIlBbyxFe5WhWDgc5Xy4fP37MgL4uRaAevfcQh9kii/TozZupUJIzt4OpAJFgKbvOPpb7OOSUojJoZ4xZyRRAWiQmRhG1WI/L8biWoy2LLUtZFjssxRabh4FARriMHnRo0iNnztKUVMNvgmux4l1gxsz8vFz2+QIJm4mIWbVVPEuOdwBqxWyBEHLVY7yPZEhA9yJKnGQ1csMLz+GwGrug92GPJ6guA6RT1Eoe07MTFxEZBbhLsUWXVbQkbP52Eead09F/izTmpB+4/vnyi3+huv2X9/DL2+5mSbVjF6anHy55Zb0/wKPXfrnUFr2U4hFmpp7/UlUNmfSgqsXMCURAoiUBcZrLmUagGChSonHGSPZnwFQuB20/EBGJmQnXUoqVYrZEhHsjCbA1jxGGn6dv0l5vJTOkRCal3Kshxuc02SluUljMGEiWjgCS7wYj7Dy0ZhE5lAQJhQcY0slMUEztE0BiHhxZvp3SPj+7pZTPyr63VoPqG8KXff/i7TnxaefjFl+n0MNn/QS8dA29dRj4NKOYUGV3AKraWsvoCgD3rJwgqrqq994JXY93d+tdI/78/dPj4/P9/f3z48eIeP/u3nRpvfatkayxHY6LmarX1bejQcKDnUIn1TsQFHOUGkt0uTvL2rRtgl4KeFiXO1+K6kEOC8u9HN4d3h3eP5S7JY7rtlqP95fLpfeuugYOW2wpx3/407MVWc0IFbGIWtvmz03EsUBFWnOvLQpEBXf29HSOBk/yFoMQDERg29laEEKYoqwwQ20oBVKgBipQRBP8azVnTQQw2GHDccNa9HjEGrGwL+YmqkXUVPWI1b31iGBIJiKRpC8rotNDwMUp4eah7n40emdWYWAYqQwhRdV3f2jSmQECqL+vpUgsIrYgOXZEKng4rPS0kDbvvaOBXRCnWEiVcIQoQgUioYLaYAW2AAdwkWq9iiNQGgApUvLQjTBQKTArEb3zkjShMCRs/9CLmFopWo5Yji6lSWmwZbkXPUIXtYPKIiigGAEesgxfBq+VA6SxZKAVVFJCpiADV83dlbgukaxiYZGnEUSgfEHIqECiPedJmi+y8oRMh8c8Wl0yO1mSwFl2QveM0kBHbUPx5MJVvXKt5EbM/yzLEtFnALpGH3GUxdZJ4SJh0clobNqqXBq6a0XpJlyYLpSqSymCEvCt99Yioqgud1kJC2nfTVHQAGybpxdeSINALa2qXQoFGBFCFhUTs7JEhPfx84qULGUmENMimhDaiCnp5bj06cr/yzX3oQin9Mm21wkUSd2BvYd3tojeovfIvAoAzO3D1OIdQJH1s7eRNoTyW2rv68d60/3ysttPQp2v+5ev87m3+b1PFfDPP6ffzOuNr01VL5crakpmhvT3T/92//D+3bt37v6vf/6356dz7S0C5/OZHqVorVUJRm+twuO4FtxUgGZGi4Ik85y7OSZNxM1ynxkNi2BZtKxlWU0Vgai+9bi/X3Q9HsLYWqMsPfxyqapdtaT+ToaIefdT31Lr1aWUnOVM+4t9fJwBej4AkpzLZrFuEsd1+JqBZLyCCRQoB1jBuiArZphJKcUUDwswvNqAwRY5HBZblmVJfH4xM+jV7E2SFzMTJUIjnMmDj8HBzeju3qO2rfbeReHO1jx8PFEKd2By3Yyc1WliZglYW7SsM1CpIuYReXqBhXTQ0uUzy0I5nTpSSRE2yACLgJSeVONgBO7FMKhmLAZ5L4PMFBi6ExSVTJQVwbIsUFMYASWoUkqR9IlFkN69CgLwGRpYdjN/3zKqWpsLhiPIboqT9NZkEplNtmEdIzFkQkr+zzrxPx+g+oI6z1/r7m+tceSTidpgbiC5Fo3Waq3baHXbtkSmt+qttfTF7VtVIQopJlEKhthJuTcKGwSvVgIzZ2KPmaf1J0J5IR9uh3pU95qjl+PIa2zy6ll69UX8hvQD+8PdVnqMiK223nvdeq2tVa/dW9IDTrEVYIJ9889VDl/u/00p/MvaQCJ8rufP3sXxRm3DN5rHi8zVn31CVfU5HvKy7atBbxjsom0Q68Hm8fjx+fl8AnRZlm3bHh4eSjnW2r02USLo3uVgksMsMXmMmKylu5AloGqqrqpcXSNEyYIiWIouBylFoUyOXE+imiK199PpJOWb1vx8qSRLWUXEIwtiSGve2ibKZVEZfvFIh37vHt2RRIlBCrP0jU7hXgYEG8e1qeqgUmEyqaBoRkp1WayYJkQs/z1m4RAy4CLUpSxrsbWsh4Mtqy2LlpIlREVEYKrqlhzYGoisuerewj2iMzyiw6MPn5afGpONci5epBvEdEm5TMKKiBlMkkAGpUgUQxFdCFU1UNqlTuKzABdQkloxeAHQySycUgJqYECWdPVg+lvCiQj0UcdaxgMF8+Q+bxcRAUKUmudletFDkJwE7l16oIhE1mbzcM9vRwfKKJuefjyTlO9mtpBmI6psQ9veqS/Ys6bfzBm+0U5Sgd13R4KLRob5rQT/Ja9x450f8vGrdul0GMxzCPvTXp6f9ySjiDDTpHkxM1NX1TSvI3Twufug6s3ePPmGUixT3D0DEiRbOMlFrx6IfXCmD4NpAe3XAcAzjnLz80kAOQXAaxmYpn9e+dXCPXYzKv++CnRP5qboXt39cjm31ratbpe2dW9ZxbN30fRAZWmDfCAHoDdsdi/u9zlN/Aty863TPu+Cl7HKL/b5dZr7V4JrUIru3piM9OWh2HtPCiFMHG6e/7/77v3lUv/847/UrTORIkDfWu/xwcyWtbdava1Fk0W6mKpR9+pks35RrVsEvMtw+o0FKvBzRKiF9BARWIS5K7SImNpqYWzeuV2eLuePT4/3dw+towd789ouQ2VTbc7Wo3YnvXUxzeUhgOaiZ2Zmm8KEHrEEHHQCGLLaTFWPpZlRVdWgoJqUoqoQhJmWRYtk6Dg9ziGbZ6aP5rbQMJPkUyjFrBSxwiz0JCJipANBTd8CIJFMAM1rdA/vEV2CGSXWYnHuKZRjls8mIxyy0j3cmTxfqpqA9HCJhigSnoWmLUJB7S2jFIIoWeA7qIrQYowuWVeOgEIMJihFRUhouDBTaRgqYBiSvBOMYHLLRERrkQEJuUm20lH4SYWaQHZ4uHhED9UerdE8nGGEp4TqrZldp8MUKemW4bgwM7mSiKsuIzMcWZ0kzQFcvck+I4gyR+9amBe7pCYB+ez13J3TdTMqR5FXXuOsf7I3idvtfDXHF7uWn0VgR4vV06OImC53d3e5GXufb9V6OS+pz/feMdzRmvkJKcBJpXt4UPboSMJjsaf7zh/4Gvy6e6D2DwDIxIS9ND1JmeBU3JwQu3HQ+1Wm/Qrh/nnpZWZ7lCDx1JfLpbV2rufW/FK3uvXWvPfozXuMX+Lk9HuMeVNun+3/s3L8C/baWzQGpditTP9Z9V/e8N2/desrHdMve87ea05PHsUxqZx325Yz/KKq67p66PlSn57PvUdZj4dSIOruh7ujLYe0B3vvRYutaraUglJStksABL31yUfB5hIuQSNVrBjNj2dxmAsXUSoEoQGFrcf1cCh3Bxd8PD+j1cu2PZ3OW38EAF0cUbeqUtZ1PSzSOoMKlt7btjUgrKiZHZclIrz1HiQ58XtW1Ehm+UkVURUzKSLHFaoshaqi0IQ8LioeNAu1mNiymJ7MmLsckDBRUykqxQbPqtgCCkVBFbE+ikWMuoU56F2llNKDrUWWPZN0PMtwDqfKlIHKQTbg3WNyHoCEzt9XRBawgBYhHnAGye4QSNblTnglIE6lCmiQEBs6GkWhIDSCdGdGIIqoGlTDFaPKj8w8JkZQkvpLRBM8N8s7C0whUgrKGloaNTora6N3WmPmKyioVAU1zFTdzIoMh0wq8vl6yT9TsTUTkeO79db4vHmdMcaxrndPwtj+s4Ki7GfRNaT38jpeavrBT6NiOguuvmyf2chkRpu5a+v39/fzvbH1Wqut9SHceAW8A1DF/Xp0927dhBKMqO4gwp1JxA9IyE0I8w3l7yoiXmWK3ch9uVHq8xkSxLxrfjFJ4vKrv6Vbxn0QHWRd+WQ7O/XeWtu2Vreeqk3bHQJTZwciZPzuwi9BIV85lfC23HxLXne2z37m9vVtn2/Ze3uJrFft9ulv9I43n7P1IccpGqOKqVD04d07EfEp2UVkXQ/ruv7Ln/4MwJY7sQiitl5KKaXc392LSEbY5xPquq5l6aaEBKMLIug+GE1b77E1dkdwATIbHlqEpomAVCZ7loVoFI1ladTHp3N/uti6hGBr8XR5PB6PZktQugvgIg5oD2Sp6qC1dono2lBKebi3w7IKUWuNViO6AKJqxTTpqKZ2syhVeVhgBishCYYfSUwolvyLUIJpMAkhtEEQCSrMpBQtRUspU6iUokYpGeYVUXBi0CJoUKoWKSgoKiIDRhT96jOlBsKJmPf1gDvQryuoeVAz8MiDmUpRNdUSUKQbJkBKZKZLVtemRXShtOjD48CpgI5KgTHVWUBhQSlU9cExEOKecicPSKyrZK6yCAhKMH9wOCjQAD08eg2voVvApTQUD20UD3UKCVCxHlW1WK9TSC86j0mRdNfkCixZAIRVZgm6FPqDRTNSGqbVMLiERaTFS731JVbi0+syr8SNfM+ApNzI710lm7kCt9sywKTJvep/IjQTQNfFUklPe7q3YTRfLluqTXnSp5EdEZWLu1Ywovjii5urk9pb43DI5N7XAfGYDhbZf5EIJ67/U4nEebjdXt8PmMA1/SqvXEs+/ZqA6h5glBm0AgDkiGzbli71WvsoT+lRG7fmtXfv7MHwwf0U+6ylcgHB20JzTPP+U3dx+abw/fwh4dFvf8w+ajFrJed17EbTG0ftW/X9smToZ/p52ym4G1zpesqdY2ZpDAIY+NqI8/n8dLok57Oa0umAipqVw+HgvXl0E0lCRyuyrGbaRCnpOY7O7q3XpJFrNS6XVht7NNFVRCHGNTL+arKEJn95EbGtaz23Uz399PjokPv37w4P7wA7X84UXRftREAAbLVdtnp3PGSlXwZEiwIR/XKp50mWJMIOdq8gFOFOLapGG4hGmpkqisEMxSDTJhpZS5NIPR2WnBiNYsgQ7sAHF9lRj3NgC3QVLSKqUlR7dJ+wM7rRuKAgOjillLt5RG90R4nYuRAyIpqae2/jsCGTFdKtDGtsmEyICb2KyC1KAAiIM4O3ImSL4cVXNSGCTkIDGtPBohCiB1ADgGHW0AqSyBis6CA7NFOAHg5TmBRbRA8iEmIMcYxwH2nMlLBc26R79IhwePMJ6B4ZUmkDLctSBF21FHbn4t5URWSroVOdT+R4clHHhOFPhPhMWZDySoL/wtd6I99lHnv7i7c32zRhBohjKOAyAZHb9pyCcr/i3SKitZbad0QEoZFQHK01mElL3ulBuHLW2L36rl9vdrz0ue9+CwAzM3m/8sJ1s7fU4QLXTvIDaetnV3+x5p4cIbOqbK0DcrvfL5jqmwYti8V6vpm4/N00mzQCn7GycpwYe8iY81+8raG7f/76rkSPf/c+Z4e7AZh3QbzVz5uH0Aur6ufargRxplTkzsnxjIjD4XB3dwfg6enp6enp7u6+tzhfqojoUq6k/mZ1uyDa4e5wXJeiNJNDMTAkSDgii0l7uhenyenbFi2RTlZE4AwRMZirmhTTYlgEVpvXc//hp4/f//BEw+94+LaglFLbpdQuaBEhpgg27+2y3T/csY3jqpQVlG2L3utjPd3d3R2Px0NZhC5ViSgqrVLFlxIiakIRqkGVCCSvoSlURS2B2shjTBVSJBjKIftSK/J05eo068emMhFDpn3ZolJUixbr1ilgS9ctI0JZLv0SPthVtBTpHozu8Da8MUw/NxGE5y4mTI0ZTs5i5Ar3zV0ilqC5hEPzvMsy48EAgoIACSeYBA7QJRVRodDDEVnS1KyoCZOzP3oE8t8YwEGIAQpTowo1U6jSzyNqJmV5uP82gp3h6dCiKM1Et64OBYyODmc4yU4+n867z11G2r2YWXeq0Mysu5ktOiKux6IiYTYszpSPu3Dfe7p2uF5F0Fsb59Pru3zfN6dMV4y89Mm87G667BBFLSI8PL0mKU8IVG+qend3l9QsvffL5VK3fjweU56mHlDblpuoR+k9equ91d6btx5BISXhTSIQIWP3v5sOzVpvhLXKNWECN3KfpHBURnv1LkZI+cXgyMsMuJ/hc/9c23fL8AgHPRzbtu1pSZkv03uerKZKLRAXREeL5LLOh5iSc0YfMDn9Pmm7Jj5l+j7Nn3/KN3+FGm9/xrW7OUB4YRaEf4YMJzt6476fdv6l5yGInGmAST+tCtXnp6dlWdbjESKPz8+pa0O1VRfTtaxk1sDTUsrdevj4+FNROayrMOhVraymQCB6i6B3d4/etm3LujDTqPLTxbeK7o0hDqzvF4GZiSlUQmRYr//2/Y+XrZ0rPCAF+P4kdv/737+r9ceIHvfvHh4eLHA5nd39cDh8/Pjx7rA+PDxs27nVcwBmslgRSq0V3tdDWcuyLtq3S+teFojCg0XdFinFipoa7ieeYejdExp2dzwirWkI5JpH3ukRAQ8KTE2XUpZFy6rFKHBQyeSzpVpAO9nn7CcFfG3uvfc20G6BwugeaB1bhU0NmpDuyU8GMTgRDuegQ4hAr4FaHxZQiIKGCDXCICalZIKwWSEJdliWD3SVo7v33lTkkDy8EtF9ax2AtF4KlsWgYIyKVMmPRg5hYqayFBGKKa2UUg5F17KkHhCxqNoKEMXIJbQGCu1oa3WeW7CFq7bOSz1fLrXLIVGV6Ys4lLIrE6bDplwXa+mBEUGXGyOpDyGuzOyHXazvpWVsHUTkO3pyoFBeCiVMNS7JjmPX6tJrngFbCEdwdWw2SkK0X+DQgBCxAdCQxG6NZxORd+/eYQhH731kEmV9M3dPqp9kS08F/+PjRqEJzayYu4m7d49SSoZ2wpG0bwQAPS7rHhgZEmBqdbdyeVeOx41w5aLJsFwahamKpcJeBlvOMCnwl2nuzAqR+7jPs0UHFXW6xmQEr69WlWhWx5kHbIA6AYTxRvzyb9ZuT7tP5Ptv0FQLid5jX9sJIlyWg6pGIMITlzXWNhQxVmsxW0wMEtEVYkJBL1qWoodixWBJYsXwCHpPa6Btbav9cq6XLbbN64Za0TqaMwKPWxWBzZpegCYX0k9PrTV0BxXrAd3VWXrYeigTqhiqtqwFQoWIw4QiWIohNEIze2Vlyc2GgVyiGhbCShIMpAlH0DNpxmxRw+7YTYfvCPVOT6XiSsnS2nPWEgpQVSAWoqoSFIEKERQnjBKUa5YG6EjlmSQ82EPobC1rIqnY4XDQUrxdhGQ43aP3UeRPVSLG9Mj0jSR+tXZAG1oxiKM46TQXNSsQ0+SVikJKMAgNFJMCXYXRw6NTRUwtkxI0k3Xc4YhAc5jc4GEEEECFErIstq6H4/F4PKZ7ZNR2wEHEwMF63F1KSKO6GDq6dqp79WXhshx68HwO9+jdSVqPZi2F+Drz70sptej0ukvMwPVwxC80UlWcTZRTfF+TQnXjDsXJls+5rqtMTMEMwI6VgR0towKgQFPCMKGBUyOW4bx95eW44emDY6SyD4tZRKI3TlxD74kB6RGRQeuIIMdbKaMXU2GeprKolbKU0luP3iOTk6fOlv/KDjnZSd5eNaR7/upxSihk3Lq2ruHZG7mEGwROtl+PlplRgTcAJxk1gQEuMJGfL8EBALi1tP5DtHIzjrcD97aAfwsL+flvTI2AmFDIdPDd3d3tuKM2c/p3h5oEdcn9Y0CEhwkFIRDTOCzluFoxUXSMfCUfQZHzdjnXrfXLpW2VdUNrqA2Xit7QHeeev3TMV0yGxVrRCSiWBQUSKOEarg8Pd+6uFoKwkl7uYBq8MBURoy4WQYVp0YLhhqaHg0UpqrZGBph235wIEzAzEXiD4AoGlUT9+9QZSKjEIOtyCGXsdshCtZlhb0i2Y1EByVAGKCOwSQlHc3pn6+FOJ3pn27y1PpIMpWhJax6M3j1aBzlYX4bkcIgmh0u6/jUkXBBg4q46pQU9urHAaIl7E6UwpKQHTUzMJLy5bxFeVKyYlQ4OvslcZLnOPCb3jkIVZTE7rFb0cHd3PB4fHt7f398v69EsgTPStQgMEIbk8eDEEtodqOFiwV6i7dLWyR558Ad6lJmclYAZM1tv0DKq2paS+uay2DxUEuNkMpgyZRDbGkSkJJzyxqefr7N23R6Y3UU8hr6lIiIBMQWkiPQ9ZsYr/h1T0x87ULkb3O5NR/oSk4VlKAe9RoQ7M+LlnkzMyDTsLKwa4RPU5iYmJkARETOiqYiYxSYBj/AYJ8tUE73vmKarG/2FmEh9RWeBQ796j4fQD9mFewaSeSPWbzv7SzT3mOw3t4+XES7dZVxmkt8+usSkbL7xjqXCHm+LzNeHx79Xe9Od8sbzvPUL+Mm5Ol+PAPfuec+26w4zNWbgnwyFQShMsJgWy2z5JirCKNBF5VBsLTAEk8e3da+9tdbrUN57De8MR+IGGWCgO3pHn2Ai5/CWxYQ0QVAM60EOh8OymCpIv7tfe+90QNzUipUm7LWGAuwMMXE1OAgNLaoowR4dHo0QoVgRVe3VU/HkwC4jLc30R6cPImqoapWwCfsVufpXx7jBSDpUkldMDJIFhoxiEIvMW89S1QJ3htO7dJfeUR29ozWGq3epHa0OvMTwIXKJCKdFHgpAQAGVZFuE58NSwYAqheghBitU2krC4Y2yVYeiFCbLcJZ79XABnRqgcKFAtUEB0eUoBkJchJCRqhQR7tNKKLYsdjwej/eHw+FwfLg/rHfH+4fj4c6WVVUHUbAZEtvtGgF0Fxd1ja2bhYgnxmM3/5kMDxokhJFEQopoFBHpzq7det+Fb7PMaB26fCma83s8run6KIuV4nugdcHV/37jz7HLZUst/vbkEJGSp4oMO1fJLNSnREwyoDwB8r/eB7GgKDHdM8Bg/soaAEht3N3dnx4fd1ThzZ7VET1aVPXAmYkSEbH1HlT1bqY90u1HePfW+xqcKgAA1hhJREFUQyaIZYeffJYseiD/5wJ+8RY+kbCfSqRbzf32k18t3LM8AK6ZZuPWnzuChrWeBpVNtJJM0As+iX4oPxNf/ts2+RRK9cXGL/qVeOPbyTMvgKtJG6PcpRa71G3sMVBs8LQGaRKqYoqiWhQGCYZ7FzWTKIKlyGJUCYTTW7Q+MLy1Z5lNd456da5kQvOEwZTvwnVkC+cN0xzI+LfheIeHh+Ph/u7+/rAerCyyrqoijS7okvSTFPRwcUYPUg2KCDaBqwQkJJDVBtjDs9Lc3J85IgkeZ6eTG1tJvuJoqiohQojIspjGC+xBLu4OIUGIihIWWlyUoqaJmzGqukKhLgToLdxZe/furXurfWs9koPNE0GDcOl9Mi/O3S5atORAKYXLukZ4BNIW9wCUEWgVJUBBgUhIp7Yum0d3MVMnSuJNqaQFQqG9dwSLyLKsC1ZBl/B1OYp6UY5kY0GOY5GSHrpSdD0uDw8P9+/fHe6Oh+O9FFvKUW2hGmiqiYaCuJBUR+9RGtHgSvRIDYsvSn94sOy+gnQsCANQUdUkHwY84BHT3T4mxaybiQ5UDO/vj6pqRaZGP2lpZz25W819ivswYyk0C7ORZL8Wigjs2lQcky405t11ou6IUSwXV99BADgcDlnGjRPDnTvk+fkZ2Dspu7sm8e8kJbWHmfR0wam49y7SnQln7tc1GRGdqQRrCsv98fbXJFMq7nrdrYtmB8TgNcDmRqrcXIybLMpfq7lLAEY6Kbe3m3I/ZAKrRCTrGOTu3VV6mf6XtDI5B13ecsu8pSm/oVm/eUT8nLAe+JnPdfBLrIev1dwFomIqxvTk9qy7Ia32/SsqSjAYZFjqL5oVcsDwBLCbaDE5rMuxmIloePgW7tHaYMYY5BjuTne0itZia+gNvaM7vIMBugaE9EhnZQbrkhj9gPv74/37u/uH4+FQDkdZVqqEmUQho8FNVE28I0yDvTvdRhWOhnCV1GKyKmkMnTyo1MOhSGaED40LDe7usiLS4YBCcBFLiioRoWRiAKf6RQC9CZHlTEWgQXEaKKGTV4hkQMQTzNrrcK3W2r15EmNGj21r0dldGBaAakmQa9361edJnd7JhG5TxMAhULxDFFB1t9osihlKpyWvU2vRQkuUFlJ0MLdQCHq4IchipmpFjBa+SYa4C0oGFCyp7fXh+N4scVNlPR6OD8f7dw/lcIf1MOqCJ5WYmugBplKcJDujU2sP9gHlGODxqT9OcGXqp57o2IFON9wkAyqGWZl5Ej3dCBIeEOewrvK4l2vaQVmG++Ve7m6k+R5uTbCs3Mjwodo3zbwQK6VIkgJIwcTqqBbYUOqT+mY9rJhyluSeteTupKcCnqXH8jRf1zUtBhs1j0atq9777tLJDIREy4AxfZ6tVa+t95rvkJPXXbIKysione02YvwyaPxCgt9I9lfy5HUb16+f+fVuGb5Ent4eL7tyKiLCTC7BfsUAT6oQgBN2es0V+o+luO8MViM0j/nnq5jG3vgmy0UOwfyB84Wq5dLhxLmn/i4vwZFy08wsnZAMd0DgBimqx6XcHdf1UIqCdLhLRE/fTu+teq21tVa33lKg93RBoHZm8SMS4SBBFUKSW1EUUNy9s3Ut797fvftwdzgsZTErAWnBLgpTcUawBZXhRF/Naq+MHg4FGV3hAmn0/AmiYG8TEMz1uCqCdEEAtB3hYElBg0SrUtNmZItNRCQgIp4zkps2Bk+6iQgkoByVQDI/DB4O0R3jWltLnHvv4S16C+/sSXXXojWPPmmQmLmgEcHwmLNjmaOThblvI0Yk6JD14DS4RoWFuEjtaF3PW08CsWRHMLM0UAoS91AsFWVQbVmKIpqZLIuaDQ6GdS1l0fv1YV3X9W49HA7rcZHDgnWBLTCDFkgBCmCiC7TACuwiIZRORHR0SKcza6RMT+AeLXTPek07+EvyGAXgHPXkPFMI9lDTsMSFlAHfUZCxXZooEyy+Q2VE2c1vHet2U2puLvVy63NfrKdwNzNdRjEDkUQzitkiZVfpFxGxcpVRuJGnffjWvffu0VNqlVKOxcxGtIBka21wYbkDY9cngUd+F8Bk96xp3pGEhOxOf1VVk2R/DF4F+i3aff43I7e41QJf+m1UVam7xLhtccWzjPYbZKj+En0WL4W2TjfuK7fMf8B2y0Xz6t832tf9Hpu0DZhu91w0x+NxOPVmKDU/r8KEoEniZ73r9CqXosuyrMUM3XtyzkYE6AhH7tjWvDW0Cg4/9kRtj0MEwED6i4hoMQNKiPHdu3frag8Px7u7w7Iq6YpO3yLcpIhQGfSA+uBJMQBBekpdCQegsyCnmpjD7cafL5KRUhWKJNdr7rfzfBwAzBCVAglfS+0BEy1DosvDtHUt+WhJEtIZwhIRmtBJABBmVfEYtcX7sIizSLR4li5rgx4yDw+KBJn+swiI9GUpBsmiMaIxE+aHJsAaFKCLuUrRDmudl87aPABVpuehFHSDqhbTYmUpycLZI1zMlkW9pRDEsogVXRY5HJd1XQvKeih3d3fL/QHrAs00qo4gSmYHCLRAFFaSBR/CSFb9QLKh9c4WQ6zvzd2jO3XkN2FX16brBoBOwFKIJkHdLA8tUIja1Ow04IOwbNQ3HCKsN+4SfJfyuzQvNy2FOw++C3cJT7eMiMTIOeu6zK40VLX1TW5YKhNTK3I1UDiRlFkaPupzLsgU35fL5Xzetm1Lqpl9kzAk5bg5AWT+CMVhRS2MbE+XXUVXVRmo8dc1Nce6xfVJ+MrnPoU7buT77rh/pbjfKtwAyuRquPaQN73RTOXmX2ShXQAimjF3DqbtEoxQCaNT3MQpnYAK1KCeugiiu3vzMMtCwxp0QCCTKvMrgYb6lhvnTSH7M5Wepnkz/oydD/plf/7Gcx59gjpfvj9nbmyR0CGyeHjfa918I7mEiUhohHmLQllhwe7NOxxmZsWWDMGBEj28meDOynFZ3x8P747loegqXERY1uq+bRV86r2dL9vp1LYq54ttG2q15lobWvfOEHEr6eiGLJGwFRKh0APu3uF4xLtv+rJwWTcFJcpqZRXTQN/usRiCHhuEPcREl2Xp9bKsRl0YnXSzRRUKWY41kmqRYQoTWEFRSn+2gmIzvgCooAiimZlJsawgSdIDnWwomZFAjITn3GxyEIGYmMoaskYcoq3iZeVBxCgmYgkXyUmN6N7QG1rl1qJ3eij16It19q1aRaUIBj2ItsfvzUzNs0iRCHTpZemDc0ZBaIvSOqsjIj7Ci5nANNZFHsr6sCmf2nZhC4coC0zdebkcjsu7+wergoOKLMVCIa4SBb3g/rvfKToQLkIVWRYrR7Hl+N5hay2L09APaqssx6wbS1NdVhaFZYKsk3HXH3pvbI3uEk6PS90ul9pav/R+8e1U21Ntz5XnLpWL+GbAzDfkAJ/Irh9O6xxDZ1cZlcmFAt/TaqFSsB8PN6prUzRCOsW7aoh0kSozvz9l7i2U83CeFZGSvr+UZVlSQyqlrKsoJJyBdvFnkoUNGMSN67qmNzMigv3+/v6gejqfEP1wPK62em/CO1b2Gtv2/Pj4eN4u9+/u/u673219U6NoRPStb7XXrs4D7+KosoAmssi2+VbJ8E4Ax2Vdyto8tua1n5OR7uJpo+8npRKIENEseQ+7hjNTzjxylkfP44HeEU7fMuZrIstk1iRZb+ThL9XcX50Jn237nO2fvH1xO6P/x7dPYT9yk6QgIgaRhI6dTgIcd0YIuCX1NnwZbIeZ4B5JIUoDs7QbaWbrInfH492x/P7vvjsWORQVNvGalmL1AdTtPfMYg7TMOO893GcZScGECUJ1UGKpwg64u8fDu/Xubj0el1JsJpRf2b2DTurgLQl3d93fmU3SAApNRjsRUQ45MQpx3DAXzuHCVFwEaeFd3ZEv7KeZQD/WtGf6BAUT7wF1pfQeHHE2ingAycoyyHZ8WEneR/E8SLJ3qVhhBKiEiMhyuKM3DxDIIn8QdIcOilwNlEE4IEEIw8JMabX2Szup9R661egtXQFASERnVO+F0dZ2jn4Q3JdjUYvV1AqtoLWtaJRSlqUsy3I8Hu/u7pZlOdyzlFXLYuUoukBXLIvYGmqZ1EswIJDIxG5GwHtPL/N0NLv7tm21eq21zlhyHplvJ+tdt7P8gn3NK93jdTu89S2Sl8tFXsZXU3G+s7a7TVKyZ3LWuq6paG/bdvt4q2QkFr33bdsyXpL28ePHZytyd3d3OBxb859++rdtO4fnGWBA3L97+PZ33wX8+fn5+HDMxFSSCbaJCI9o4a1v27Y9n57Pl3qu7bIl6bsQJcZaDYWEeMJ1SM4s+z3CGFl5G4m+n4U+vjCeOTjptsXN+Kt8jVtmn4NfLt9fXXl18dMrv6TdLouv/e6/ZxsZtp/Oy8sxTPluorV+zIUbEeyD+vG4ZqFPmBkieQKydGqsBytqZdFFpSz6cHf3zcPx/u7wuw/viga99stz22oSlG7bdr5s29Zq7c3hXYYN7lobu7/OBxaBFbaAErbi7kHef3v/7sP98a6YycC3mcnI4GWa50UNWaLXwxlaxDJHLXkRg2BQGAxRKEIzz08UiPTAWiId0mQ2aObnD0fjXC2R9sQQ7kp4hpN3vyUwSgAiywahOyFEp1tEa6qjGl+GFgbA0cJnpbrWowd7MIIRbM7m7DH4gkgIcFyWyqi9gSgLksQmHMUQDogE2cNqoHc4ce4sQUrvncEu2gI6iquQIrQiRVmKHhe7W/Tduj7cHd+9W++OS1mwGIp52jSl6HFdDofD4bAcj/dZw5YLxQzLCl0gBl3T264ylGyKgMmPQArEe3iLuvVta7XWWr3V6J4va+211lY9ebPibfDam8L9TTzEzwMibj+Tr3OO0rWdn6zadr98yveUxR8+fBjwnslzkBp9H8GbqzYgM9v5fD6rojufns8//fTT8/MjyQ/vvru/vz8clqfT8/njx/fvHw53x2VZ2L0ld5PZuqwJ994iLpdLrbW2S8bJlmVRW3wgiLB1j9oAuLfWPSLKpG4dv0gSTpj+kok1GcUtXmTY8GW9hzxd8t1bR5PdTNibwn36Xl6IJPItvqwvzZl8rn35vp+9fn2Gm/6/fL79Bu0ru3/rcV7NDScg4aAAnb0ruZo8PBy/+eabd+/eHZZU3BXBqXoHSRy0lHJYyt3hcFzXd/frw/3xeCjoG3rdLt4vI/Vp27bz+fmy1br1raJuqJ2t04dcw/DICTyp8wQUSMFCiOBwJ+++uX//4eHu4VgWDPhZERNByNBjBd5bmBYtIozozV1QSjEwew1m+udwFmZxaOiw8c2KqMKUZkUNpsnbnoCYAKAUyixsRESybUz+poiI4UjMtFcJGCBBEZrTEBJwDXGlKs0C0tMESv2UhhkcY/fwoHeSfL5srSb/XR9eiXxm1SbqYpTwGBSmFPQGQgPSIR7SHc01Ak+1ARVAprKKNBEBtV6eFtPlsNwdlncP999+9/73v//um/cPf9RYFsvD3SSBdaEWH94/lGLrui6LlVJ0OaAUlCJFRvgUBljWBcI45BwRIq4kkoOVQDT05r163drWettGMNnZEwLavCemk3yLC+R2VQPI0oS/lfI+TFuz2yv7v410RvMm0oFNJqLm+bztASozW9f1cDgsy9ImmCeBmBzFLAcLmJk9ny7n8/lyuazr+v79++P9wdk/Pl1++OGH3ruz/34px+Ox1suiNpLOOr33drm0besdeZys62pLrNBECZ3Ol2g9ood3eiM8TeLFkk5g/9UaE/QFAHtkJ4H7N20ful243761S5WbxI+f09x/udq+S+xddt/I8VfcDr9G+/7fRXN/q8WsX7wfsxnvfjAXkbKUu7vDtx+++fbbb7/99tuHu8P+M8O739Rpwv1xWcphXQ+H5VhkMS0qi8bHny4MBmv31trWvda+ba1utdeG1rB1eIM7esAJaNY1Ekl6OYICNdgBIiirHR+O7z/c390frEzMqoQiPacUgVIF4tHp6kKECz28O8KlGEBhjJIQ1OQvk8ykoUjSWyVwQoqiFBWlychpEhlAMp0zvq/03TFDSmSm5dRmRISyEEkQlMBLpjMoi2VYJOYm+ixIzTLCg90TJNO9MyDP59a7t+bNc5g48JfhQNHFIlrvFzjMREyjMUSD1qlOaQ4PC0elttZ6bXm66aRWP6z48HD8/d99+/vfffj977/5/d/97vfffnP/cPzg29gl0kWYqVdm8u79vSY9ZnqyBt+xwg7QAlMkjTynk0tmSDccGTvO2LW7uKP3aD1ajdajjXycSY8Tyc3jEIbIGyGqq+459Z8hQ+Xzbpyfba+0tF0tfbXfxZIbKkuO5zN0VT1fNs7KSqWUtfWtNjMrEhn7PRwOiW0/nU7n87n3fnd3MLNaK4AP37z78M03x+Nxa/V//K//+fj4eDwe//jHP757947k+XwGwt1brdu21TpYwwAcjx/cvSyLuzuxNa89g+3h7tGrexfBui5yGJHYSFbOXaBRZQh3mZR0JEPk9ZjcStFEyu8263WUbmK2X9LcP6O242fAIJ9K3jfU9jc1gi9o4p898/+jae5vkEjutB+ZNy1TlUL0dnd397vfffeHP/zh93/33bfvPxwOh1F1chRfDHqIMs1M/eb9qIajMHr06vWybY2+9b5FaxGd9B1j6tQeUTtahzuaIwAKqGmmI2KI9RQah3u1Ug7H9XB/ON4vpQACwkciGkIGKS8wCqqFtypM/gPSexCuYRKC0JkiBYnByJavZzkOMy2a8DiFhKhMyLJQPWJwDQ33+swbJNCHraHMdBUZDJAxyX/dRUJVQaGMEs4q0TN1JSKqR7qV04pqkTV3mNWyLz2qszvcM/UsGZqkIks7GRktSkRISGHZ2gB2B81DmmddDqm6bI11a3SAYcBacFzx3Ye7f/j7b//zP/3DH//h737/uw/v379b1wKJOx5IUqlWbCnrWpbDAhMIYGnCTHGb+NDyIDlzWfuHo9hSkmtBApIcCx7RSaJn+Ni9t6vvvXrr0VqrHr1HmxSAeIN/+7o5bpTxn9W6PtH5PvOVT8XZ/pW5nkNEBMnZAACiCtWt1hR5wnD25iGXTURsVhk9HPrx2EmeTqfz+Xld1zg3shL+zTffPLz/zin/8qcf2uWny+VSSnn3zYf79+9acHt8dm+n0ymd67Vd6HE4HN69e3d3d6e2lsCSLkrKnXNrvfZuS1kvm5ZSttbDw9GzPl3LX5O1ZXKUI+gCmyIhzZRRLY9jvb8YEEwqgtto0xyo6+h9SXP/Km/7q7Z/Xl7dcF75WqH8CxfQ37y99bvKDJmmCm+Tu+Ob4/Ldd9/94z/+4x/+8If3798vJYU/lMAoO0lhaDJol8J3y8AlsKE7o/V+ibqBHeyEi3BQaa+LLUUuToQT3geRSAADzycZ94MYVKALSrHj/bquZTke1kOxotDUEUtWMxgH0yjQDIOoENGju5opqAhGRPdlKUxtFyQCs+J9JvCpSgr0xWxi2JPSDxRALEFf2Fd5iNMjsnAFnJKkmBQTNRHhLEDTqKBycHkSMuWyUMSlBwY+YYRhN3SGTOEerfbm4Z0V2kOqs/bhwvKsKC+uqiZNRBjJ7BbB5s6gOaOxZzGjHiBxVmmNbBrh4nh3wN/9bv2Hv/v2P/2nv/vj77/54z/8/ttv3j28OxyPiwjduazH4YYqtqyrHhO3ruh1EtjnMA02mcZl2IMAPYQ+4IitDVqJZN0JMhXy2rz1dEJltfpa+9b8dLqctrict6323qMTtyQin7YXG/zllbc+/7PC/Vbg6E6u8vJjbWflJCa9lQIKLbt55UQW+gKg4Tk8wXrZGpmUHlilXGqLiOPxSJQff3o8nU6Pj4+HAz98+PDdh2/Kcvjhx4/hzd3h8fj4mGTlRe14t67r8VAORcpPT88Zdg5AxAJsTidULOMBUCvuW/N2ubTWwGWWaXRM9c3dVZm1n3jNt0rY4osxvPXufnbo9Jf43F9952fbro9/duY+PYG/tr3lc/+P1m5xMrdMBPtJKyIGKUUP66GU8n//r//w+9///o9//OOHDx9UEREqLKW0ttnw9E4XZFGUIrqBgHeyg120FQ0aPEsha1gRX3WJJUU8spInPMV6DOGOCIZk4HJo7mW1dV2Pd2ZrORyKFhOBKEe6X6ZjiKoI2InIhWZAT7S8ABKCEHa66JqhHcaAyzLrKCkGYrBoZipRZAzObZkCikbmfJDOrMoLDzrZQ5neFTGRkhTmTKuAdLdp06YczJz5WX9aBktBKrghsnmeuHRn61K71C7u9LAWfna0ju6cdY7CdJS9XyaFZ61eK50Ao5M96CGdntQzT+HCWFQMKIK7O/ynf/yH//b/+L/983/+h2/er+8eDmWR9SDHI5ZlJanHb5ja6GJYCkoZxI/HB4hgnJWBrLanGi5QZA7zoFdhQKK7l72iDAFSne4R3aNnVTZGoDtr99p4urRT9UttW/PujFEcyN6S76+E+3U/vmW5/gK1HTfbfHfLvG7TmHglB2990LedB810UROPaK26uxUpy/p0OrfWlsUOR3k+nc/nc2vVzD4c79TKT0+Plz//qZTy/v7BW//pp58+PLwTs2J2d3f3cHdvZqdL+/jT6Ydz7ZFJcL0HIqIHPaiqPcAs/2XLrtEmAGBY5GS6BOePjdtflGgueWk8vWXr7HL/NrmyvKVpviU9d/DN9Qnw4gC4njk3937V4aubkrsxwi8/z60ZOH7Vy/5/Vui/9YG37vuz19+6b0qUPXitqrVWM7tbD4nK+t3vfvdf/+t//c//z3+6Ww/39/frcQCwECS8LKZZjEcVml5UAsAh0CoQ3lv0c9/OfTuxVUZjb6Z6OBxU1Tu1rGU93r8rzR/98ak5ggiBB7bGAJgMt4pScDiUw/3d4bAc76yUYkvRvO+IxbPWulixQh28tkEQgR41qxJ460QAjqyQuijCBU3QTWmimfCyLNwTSYAgIZIVr0OSJkWkdyd7hJPMiqOEZs45IersQg8lNMvUEQzRlOZuy/WQyLRFGAktq0dyH4930zFaEaS4Rw+4ozq26lvz1lPcY+tSq88w44h7qfJQdFmEIdsm28aWlTOhBNMh46RTnpq/v1vP20ka/vM/lv/Xf/vn//d/++f/+p//8PtvD+/uyvFOLQkrl4K1iAgOR0myHS0oCltGRcFbto6xBAHgsJSsAijhogJH94reJZVbj0xgC+/RPCK89tNpe3w8nc7bpfWnc318ujye29Pztjm3Gq15dwQUmhl2nxeytz73L1cs+HTXfOrM2WX6pzb96/2VQRtCtdxef+s51YqDWbpHymJWSK89CNVSnPjhp8fxYBKsld9v5A/ZaSnlpx+fDmUxK6dLS9il6dodtbXtfKm1XkJq7c/n8+VyabVnxCKyWJBKDMVbfIhssUFAYKajZjJFIOHdd0t9/C6lAH5D+357KsTkpcSs1JZivfp1Xn6zGqp4eTjfXnn1AbycwltJ/cvbWybFr/Ag/bZtD1bnf0OG/u5JkATpva/r+vvvfvdf/st/+Ye//+PdN98dl3U5LGVdoVOHhEopyb8HA0RuuHg2mMw4nsBARQhDBhM6mYUMI62/2hgJRSkRjnCMKnGT1KaY2GqHw3I8rsuhXOmhJr1OAAZdlqWoqUIgFJdJwbNkbQCCCQVXgFA1hAsbo4OhCFCoVIpKWDoRdLoIJYhB5CvTsh7BQDIzwyN1bkVQesBDCAuWGAQDQmYVAXho3NQIHv4tmIV0ymDRDiGRqngjMv8p81S3xs1RXVqLFmw9Y9HSunhoRGQgTQXbEqWFErXKpaG3NIyigz3SQhJQG+R566z8cMDvvv3uj3/3d7/77uHDw/JwtOPCRXwUY7MCI6A4HACMhFLJ64UqU3YpMAI7KTZ6PUcEvKc9JHQJEhG9SkCTnk5MGbXXtlU4PNRDm8u58nyJj6eakr07W44DgMmQ9VfcKr+qfaoX/qxWx7geIUmdCxDwG26R4foDZNvarmIq2vCgwo7rWkrZNn8+tFIKPJLM4/tLrbWet8HLPcl5UL2T9PT+Td+KiJTBjXMlFEmoWlmXW1skoueTxUsuLHlJOjZcMao2vb675s5fU0P1i+0LQ/ypk2gfxDnov6jxjUjAW9c//fovvdNf1jhV7WwRsSxLIgg/vHv/z//8z//8z//8zTffyId3pZR1WVEEEfA2EuYBZt0H4VVtTzBsRskUapmaEmQInR6TI6y35q1Hd7ROwrQcbaktvDmrw7NL02Ut5VCWxZZDORyTT8OHuxPGyBpRQnBZDgqaQujJSItR/GIgV8CseBEiUNIknb+SeGtLclBBBlH3NCjKzcYbcm4oOwBEEGBHkEooQwJwWCNES6AENUYRRyTHU7dyY/5pZP8UF0lFZ7LCDO7DxnR6snmq6r61XjsuW4+Q2qM2ry0SWeNgb6OmXalRrAPoPXrH1hBEJzyYFlKChURL9E061vf4w+9/9/d/+O6bh7uDcTU3kO4ARZcpzbMC7HScZsJ2IKCYxctGvDSGCVJIH+VAXYTGrBtHHI5oFd2T+r23qJd+Pm/bj9vpcnl8Pp0u/bTVp0s9Vb/0ONdwonfvRDhEKOH4j+f8vKlqLbcv3t7yL9A7jD2/X0X2aKTuAcxLTc6WhBdN5i+OjNmllKUcUrx6bb33n1x2fD0ATg3rdN5SuR5FcWc77MqfXAl2RORwOKRsn1JxD2R8xguCSVXySl0maTu72W+rud+2l8o7X/55fVByDwf/Ipn7SnwPsfLZ67/ZD/m6z++FHPP1/u2UmMuyvLt/+Id/+Ic//OEP337zzcPDQ12WUnSq5xEZGowYPFnZmyRH+ES2hafqQTrhwmD0aL21rW912/q2tcuWLL/RXAIqYgGpztaQ2ZViYqUsx8N6XJbFbBEtAqWyQFKUIGRk0IXorLhCRaIgkwRGNSwiqESEQIViQoWuiyBE6CAMqjJWc7FqJrYMPGg4JmtSdldytQSynqnQiiSYPcCR/CmhCilEcagHnDYq5Yg2l3HWyLWcDUED3Vm9p1mzE2P1YES05JZxtB6teXNsW++hrXttrD16p5MMdCICMUogucBIjQgP9kDLDwymSoiIUprjILi7O7x/d7xbTeLSe2NfyIAQKhICBChgoLbBsKz0YoDQhBRblJCszyqkhpNEUNEV4aiYRUEQnXTJOrPbFp0S7DW2C7cLH0/t+VIfn9vzaXve6unSzltcmly27pBwOAf3HX8BOu5v1T7V1t90t8YLcP24+Dn5MBVnmTgWeIh7svkjzpWkUveQQEJp+/KOk/JPsu4KGRG11ph4mPQNjgg4p8+dIzNruD1bHlqxX9RJr/bqmTlh+1c//o2nXidvM/Dbae77KbKfJ7cv9s/s198S7l92qvyFavu/W7sV6+mZUVWDPDw8/NN/+qd/+qd/+vabb5ZlQSmQzrQTSTAUneJqIZw4tiQ4J7NeO9BBjgItN83dI3lHL3WrXmvbGnsP99J61BbbpW0XtA4IdMHheCzH5Xg8Lge1RVWF6iEhketPY24MmUXfRSUTR9UshX1RBUcFU1CFNKEJsoazhoOiVAFV02uuagkWUhGZnEtpWtq4mSoFGkKQyhCVUXFPHBJUFyXFaUFxqoc40QMBZYhLcEStdrsbAajGTnw4s5Z6RHinu7d8QbQetbO7XGp4sDtrZ+/sLu5w0qFZpbURE+OjpHowMnGL8BHHRSjoTQXHAz68f7i7OzBqb118USbnpncHm9h5yzpaiGdAaUYryiV9X1COCY8OD0aXCPEAHWj0prETagY8EIxt8+q9ejjgOD9ffvrx6fHx8fmpPp8uPz4+PT6dny7bVuNcvXoEh1ATycSA/3AOmWzlqphiFzgA3gr8EoOWGSkcrgohgAxOE9j588jUd1MlUEcoIczq8h7N+9QMRhKcm94KdwAtBm5CBvupqaqGxNgpo4cUxzLiT/S0zGT0YyYWJiIH2x0wMt8aoWOZDAScCq7OSm0p6P/qbhm5QdG8Hvcbb/v+fF9Quj+vnr+Fx/+N2meJ3b/QdjKOV24ZVT0ejr/73e/+8R//8Y9///cPDw/LssCK4lmoYAcBuiBEAwykWyO3K31I9si1mrC2Lh4SJN0glYOl4HKp50s7X7bLpV7O/fnktdanU32+oHYAKEVsXY/Hox1KWU2LijAkLYNgFFAZyVkNVWHiaSAiKKLFVNENUEMRRAWEpgKYCotAhQUsuigCbMLQLBWhMAgWjjSuEISLFJKEmhWKiBgFWf4iIhxkmMPT47FL8xbqQSccbI7u0jmoCHZGcsfVO0OBSkltq0ViRYbmJQF3r1laL9Ad1aO79EYPdkeW0+shuRlrqIwKO5xJDS4iQfFkB8REJQVIlsBScDjo+3cPd8fFxBWxlLWIiASCrddWm1Vf7kKXtcgCgbAIlixFPKRWmm7R0V28o7dB6SlVMqKM6UyODgdIISKinvvj49Of/+WH//W//vWHH3744eO2tfZ83rbaG5WQ0CWLmQwLezA9A4DQ+VYW09+o7eThwHD74dbOeKPNdzmdHrgeBhK78UiSE+6WupbDA0ZRlAJxsHmIAw53mAOXnrx1Nx3EzK7SYkYzTvgAAJQAhxuNQ4gb1CTBkeO7Enu2Y9fBprC3FPTJisqbEGsK99ba/nv/6m6ZV3/uR9A+FrjxuX9Z7/5N1Pa3FsFvqO9/lmzJzL777rs//OEPDw8PCVIUNfQuXgUCaEpw0CU1tXBIgEG60DMBgmToQg/2ylalnqNWz3K8vbfq5/P5+fn8+Hx5et4+Pl3OW/vpo9TK0wWtgkBZUQ5rWVaqAHB3SjquuxhUVR1I4AnzrJVka8lyrWsRUxaowBVdJCWBmeqoDCVQhAqPh6IMjVVAEaqwTK086YDdXbv2cECR1ehvXHYR0QkSrYd7VI/u4bQgPaTDeohTmtNDq7sHupOUNuHDt/sYgJSkVh61iWOWSdOQ3vsu3D2k9vCQHpKy3l28h5MxKs1p5HrlKGGRAejc0JFFSGQsg/Tvkigmh8NyWCxZb0zY6oUezfvT8+m5YTneP7Ac760sF6hhnhFgIPrIXYoOD3hHywBuB32qsukydm896QRa8+10+fGHp3/7809/+tP3f/6XH77//oenx/bjBYk5DYWVRdeDqUDUMewmS6jOQJL+7Y3gV2363F843L/UYpcJM2rFTMqbiIdBfQSm8SI72Jw9+YnTBxr0gEuhJshVku1XeUiCjQR3kcx3PRGrPYCWmYtI5dobphg0E6MViAQpNcNAGUoVGUeC6xD0OyQmHTWJk8FEymMWdIt+rTj1W7pldg39s94YfFGA7pr7l+/ylvj+D+KN+bTt+ntyG33z/gPJbdvevXsHwM9nHltwrKgkwk1vOyQ0vepTskcEGV2VvUXbol60brFtUStbH/Wvt+10Oj09XR6fLh8f43TBx2e2htYQxFKQRc6slO4eCEtmGQ1R1yJmUuI+JMST3QXE4EguZV2KLIWmNArpEgKJu7s7UzXTxUoKdxMKfF1Mw5UhoCBUUURV0byGYPghGYMFRexyuXDIRDKkhWd+UA11Z2vNQ3pEMBolgh3m0Rq1ubTO7qPeSPd2VaBmYCoE5jYM6og9cySFu3e2nrFWDIEedKKH9NBEQEbS3w8iDSOJa+wLAksaSBFRsUmZNioUYqzPrD/l3b3X8ly3Yqi9fXx8em44Og/33wCK3iABk0Q10R2+QExKQQS8wxt6Y6v0lCkac3l0r/V8OZ+3Wuuf//TjTz99/Jf/9W//83/8+ft/w3bOMCnsLqV4oZmUBbKE6MBOTQadAYW6hi7/A7VdHfzK/X6j0klMxeV1rJJ01YUcR/XkGCWhrV064akakIG0r5nwhsS2I/08BCGllD20Q2binooY26hDoIqgBegMETbvWck1D54kUVAdVfZiFvDZH3iHpO/cJMmNvCcxSRrFXzW4IfbCHScQWEbvTdTZkYXfghKh+VvDEa7hiCZogqr0rFER4UxIxUD4B9/IeJarWnf9FzcZWa9+yP7nq5Pmha/kRf9vVFZ6Y3hukwVub2es47mohInYUkpR+7vvfr8uVqR+OC535SNOT1jNFqWfR2Y/Ybl7YzjZUUQA9u4MUUpE9/7+49nbpdZLj9bpvbWn8/n5sl0uUfvd9x3//4/41z/J09PhfOqXc/14bqFUhRZRlWdK6RdDVYXR1FVdDKZYzYyqp7tqUAOt09zvrLzT9d1y+EZayeq/0UXCDkhWQtWaC7GsVkpRM1EoBimxhKuIJZAxEsP4IaJH77FtQI22RQDEcnjngdq9uzjRPHrQO5/UnN6wBuCiEeahPbR7eGhLVrUkOAZJPtUK4Na4zukQIUMiNKWhd5KF5KUVjvOTEQllFxep3kjpjICGxdjm6SPH0NGY5VoBsg8Sxhk8Wm7Wxv0Bf//d8Zt3d96ibUsry0+PJc4XQfdorWl331p79B/7kz88bA8PD3o4pNtA1hVlBQAnKHBHRrojJNdJsW1r5/P5cqmnc/v49Pzjj09Pz5f//v/5/54u/en5sm0IoNytx3UtZd3kSUTADMeFSA8BqGaRDjGBkBoi1KxM+DIYxrmhPrNTBlb38xvmjfZqH71qn/HlYp23G3//TD9zjuafAEwE3dPWUaYanhV8pQCM6D4O9IAEnBFuGhJhik7v3s0dTiV7nAwUi5j82VBliHtTiorYqMEk7AT8klQShIQoi/gehrSp4A5cjYWpashpHgyZwj1RMWVJN2wLAdTMlr5YtXf6UWYXfy23zNe2rz1jfkX//z6q/e2NSO7evMxFLqX0iN7ZzIoD7F2rmSk6wnvv0R3JBGumTbMkR/rnmvda608/XFprtW/urbNvrT49n0+X+vi01WY//FS//7ef/vynx8cnbhdcKlgggTAUJUK8JwtlWFFSsoAGMSzDUGxbN9UFIMREyqKHw+F4XMtqq2FZ1cpRC9aDrsflcDgcD6qqpagtpZSSddCRRn04SEtBEqNi5eXSo3tr2w4OS9Wj9ZjyMplxJJydESgJMK/dU5Gt3T1k25qH9mDz7u590gn0F7iCnAgld2pgiYiZBxAkBxdNvpyunDltn8xuIJUQuRHkCU18q4nADCISzlb7efOTyIJ+/vijsNF7gFJshdHOtaP2rfa4v/dSFjPTrfppq7WaLqkFAtAscxXh7qfWT6fTx49PHz8+PT6dfvz4+ONPz6fT5oHeaWbHI6BlWQ5qmSy2cs6PiGAn6MRYB5/7CTc+1f3KLzC1/5Idx5eJTr/Qsv+rtlfOiT2cyyvqPJ8copSJutk/l52sepM9QB+UAwBjYDGHWq/q6anX8+hby9WnDSHE3Vvq+nDM461GKv4Uib+ZcH9lE/31pu2rPPJf1e1nr9snhQNJBiiCUgpVau3wEJhwoXf6OcwA9Nrq+eK9i2BRW5Yl/SHOCIGzp8ul/uu5tVa9RrizX+r2+Px8OtfzRvfl8al9fD49nfjxCXWDd9gdxGCC5jBFaDiJjiWQ7jyTIowixbWblIpqCadSOWQZhOPhcHc4HJbDqnf36+FoZS3loMfjWtbleBgVczTdLjYISWwU2Q4hFAI6k8+6bO6+bYtmoqpqa807N68uESJdwgPN2YLurETvnJEFrzN39HzpTqR2n5RMw4BVu5mdHRsu+8lxK9lJCRl1a2Mq75kzhSBHZOi1oN/3c07vFHSfb6oQoDkvtT2fLwdocYjrT//2I6NJuBRbDuvS5OnSRX767ne6bb13Ho9H06W19vR0enp6ur9/wGCAKABqrefzdrlcfjw9XS6Xp6fT8+mybfW8ta321qhWzIoWMV2kLColUXqJBuE1z2tUlEoDmvMHYg9d6i7SXwv3t9ytt3/+6h33trb315Xvn73vixG4NgAQpUInPCY/o8DOE6Nj/QgZykmTMDOVguy7Mp7fmTcaSMfONhxlsqlqntAi0pOQI6kxxSAQZzAGZygh8vU+98/9+F+pdO+K0i583+j/i5283Tk+D7D5/Dd+K9OBKRumjch0aUevPkreXFqraOAavbm3NVqIkmzbdn4+9dpEh5qfdIc9PMjq9fn5+ePT4+VH9l6714D3iK1dnp5OT5cWVA8/V3qKKxl1glqWaRAYBUTvlHQvMMxElUUoREeYSVHnEhj0v6WoLcuyruVwWNe75f5ufXh3d7hfloPZaofjUtZBdKbFkq5ABA4qAYQU0vvIkQyneLivR7TWnBFx9GH3akhoD2FQSZce3pK82OMS0VpcqrfmtbHVaB21052RZZ4dGe1EhiDlpuYt93xuFRFQOMo2CaCZGRjsSeIxusgFORZmkltk2ZyxcGwq7ZhaHCcc4PPrQRjEdvGPPz39WxGpRbqJ249PZ/SN9GVZlg7bIgljpTw4Tey4NfT+9PR0enx8PJ/P93fvjsfj4XAQsfP5/OOPP/7448fT6dTcM3Mt81yX5bCs94C0nthVgyq0kJLuKx0l2Uby29x0QySljyCfXER2sfapHH/x4gbBIp+kFH1Zvn9h332qtk+b6ev6+do2Jx/YEd67M0qu8l1VSFooNT12KpggMygGW5JGBDFwvYB0BUlVxKjRkYi45BkleJsVTJIZH03rUDVUq2QBQiIoEwbpIYiuqmkmZkGDvxpa5mvbVbL/pir8X0lt/7k72s1f6OwldNu2y7ZtWxVW8Qp6aypB9maiJOt2qaetXjYARSH396kF9/Dq/bJtHz9+fHx+Ol9K7733Ggggtt6ftzhf2nljRN9quXQSgGbFGWwdAiE0mPVfBJkYDwRFVUKoVAWDDKXUJmrrUlYrd3eHd+/u3727f3g4vnt3vL873r87HB7W5VBsUVuzFM46KGeTi4auA0uc4UomWxc8JELc5fIkEHXqigMUYhBD65ceIghvHW0LOTfftr51/9gsK9Bv1XuPrbE3dhcR86ROGTp7Drj4Na185lKkU1n31AqZm1eA3Bdg5lIN+Z7SH8IgRDFqzVFG5hheei9+Zl0JSZzP25/+7UfpW3+/1NP6fNTnH57AJop15dIgpbQevff7d2yuztVse3p6enx87C3MbKvPd5WHQ9Raf/jhh++///F0Orn78bgGCCkmgiwzCwXk7u6QSUlpR/ZwVZDggEILAAUy0cZkMES/Ut5/tr2lvOMGWPG/V9tX0iu/0Cvhnj4Qknq1gSJLywzOVgKwlAa3msPVV5hJ5thd7bvFKVddHhBLIFQysUIJ9K5a0o3p00ugTrEQkSi+Hz9/M839dhDxIl76m2nun1Pb3/zGb6e5C0mlzULiIBmMbduenp7ujuW4qLd6uZzXRdZl6ZdNU1N0r5e+bdVrc3fvsqwG1dbapW7Pl+enp+fT+fzc7lqrvfeAi7JHXBqq22nrvfXWoruiFC3dA1nXLlMjgoAPfwKFDjWqmQVEiCISWRGbroewdb27O3748OHbbz988+37+7vl22/eHY/r3bvDereW1VBEF1Uz6HFK9l2BTQ3ZJUIyQwkEKd0tAl4NosEFImqh6mKhXSpJp6KFn1t9vsTp3GqtP1b0Fq15xiO8S6rtEAE0yBja0i5H9GYusHtmZrr+awORA/LH1PhlFk1QMGT4Tg1wGTKdt4T9v0B2iQ3+3Z9+uqBe6rOe78q7g22n81KwLFiWsKVDS3ID/euffjyd+/Opgfr09HQ+n4/H4/v377fqrZ/58fT8/Pzjjx8vl4uZrYejmlpmr6gIDMnjTopqcORIAmpZ3kMl89JuXAG5NURiEJoKsGdoSIahP+f7/qxuLiKv8kKmTHwzf+UL++4L5sJX9fO17VVXu2my/3nrlskjE0CelWM5jVElqemQmZmkkCx6JmPhpTqfpNPIVL7hXRhOQSlmJEdEKKsFIhOqMbD6VyehiLRl9P831txvzR/8FWKqf1W1/ctPmzsqD5KIUJGn5+dlkXWRfn/w7bSdn5di9/fHe2tZklQA9lYv7XQ6XS6XFjwej8uybO3yfDmfzufLZbvU/txara21DQAMAdaG5grRTraOQLpbJUsKQSwGE4lTRTWR6RoRoWTQs6KXUJWL4aAwyHEtD/f337x/+PDu/v7+eFj1cH/ICh7rcUERKYZFUYxNRAQqEIWmLAmCMKUSQcsfF4R2eGApylC3AFVUCWkMJWwJ1Q7fXC6VT5f6+Fy3rT9uzd17j54Zr5TuEoMhhPsJOplgX1QmHzi2l2rErD48PiOzqhOEmjWMOfymIuJZyMkoMeqfBeOr1pJoUQlE9IYL8IxAr+cTouLuiCOx9fBThdR1XZbj4fsfzk/P/YcfzwAul0vv/uEDbLlvrUVcsnriVl1sLeta1hXRRDSzkCSJSEUg0lrm6jpIiqd/QEhE1lYcLmHJAEMq8sSk4klCo4RnvFzn+19vuGWuFz/3+ivG7eb8uH3xVwZevD77c19+qrnvbhmJHe4SOqojSRYenJB5TaKa9Lkvu9zPaHwqH4oeWfUqkyQ0GCngY+ABs7gOfdjfSSc2ugrOHkkfc/G3Fu6ftr+eW+bfreXsZkzDgVR8HDhfnssj74+rCtp2+emHn4rim/5NW7OYqSxqotx6PF3a8+nURR56rIfD1i6n0/m0XbZW3eO89Vp7zYydYkASjoMozVubVEghaJ5areX0h0CIQEgSnEOUAooKQ4RkARr4cMBalnVdj+tyXJdlKel0MZMs+UalmEEJU5jC08l35SUOKjQkY25BQAagLopE4GTpiiEQ0p3WKR4SKARaaHNcOs+Vp9ovF5wqI9Q90QQ6/UtJdpDub44yT7n5J50egBmhmtsvZ+ilE5nuDopozpcORp0s90OIOEQ5qkkBMPlM2OYLSyyCamImJoQhAA80IoijgiqdvFzQA3fwh0V6x1bb86mRaC2hF5eynAGMuoDU9XA3oBHNl6Jpe/XeOXTJzDC9US/JiN4zdZ7r1DwQ0xChDL42nXAAmTHVXb4P7XtHiL8dRH3ryle1XRX9TXr7bVsOLEcMhgRntRkme1hCb1OcpeKQCXAEs45Z2pEMkDE/L9nJMD1nWa3mvs8jxVKqB2g2RHcQo9wBCKDnEwKCt3Hub43mW26Z2yMuX/OWeP7G9zKH5jZk8etnLt4g9X8Lh/61AZm3rr+Fqy26BBJ4wdSDsgcr1nv/4YcfzqfnxVTL0nv707/98OH9IiImqgYhvLZz6xfH9nh62vp6XHQpHVqpz9W3bTu1DNEYkeDC6E53qZ1W1hLYmlTvANa1JJgEoEiyrxM+FuDhcHBvtTdVRYQSh3U9HhcAayn3x7t39/frui5FD2s5HMvDw50VFBVVhRBqIBEhy4Kp7GRtHKhMdx+hGB75IJ3duYhRLHmtTUUXiPWsnnTatqfn+nTxrWHrcmlSg5ctg6K68/QCSkrUuEElEcP5tDtlZF7/+WZjuQqUEQLSARONUX+VEXBAECIQ+5m1+koeuUgRWYseCg4LlwI1isbdgVDUTlUtRxQgRJ7PVR2qKAXLYvcPJdPNf/r4dLtHzFCKmlkpJugYAb89WQQU9L7lhOxZMAqqJqx9GC4pctKXkls0Y3FxHTfuZ2E+gP7cbv303Ve68Je/eD1057c+6fCXyyXgZp9+9jHGj7opWTe1co0Zjb+OT47QjeYOwIPAleOFJukl7B0cDBDE8N5QRIWRECoCFKppPpGKToRNkCNDHCLLsmB3cnDwhc0SmblqAWDEY4E2aRXIX6y5/1Yq8KspxCc+9/8D2oyp3LZoDhFeqgLwYsLMP+xPTUkqY2wbjx7sYhHhPXrzZAg5d78QFfARQET6S8lkBJeBUh61jUQkYKGgiIpg2orXeXR3UpKsEaIYzLe+mh2W9e54PB7Xu8NyOCx3h/V4TP1etAiKYS3IghICRAeQDIgyyL9k+GfSQyMGSNZLcGLx5EqI1rq7161vNWr13uEhTgtYYPWowdLZnaM4A7O0Xu4JkiPp7NOj/fbQ/dzBLy8uptEbyXDmoIyEFocLlVnPWqgh02X/xakf6v4INACAhooqkEBlVaiKaqeOdZ+HkqQhrXIoGElhpWS6eYoSd7/RorDrTM37jUYFDH/vSE8fHicSESmyZT4XR9x9YD5T7cnlMWKFL7fkr5MA/26m85eToT5tt866WzX01dOmnoqrgLraRC8ViBwtSYWGIaoglTPiyglztOkVzP+BOnOsZNKw8mqSzt81no3caxU4st68KEbhnXn63lRi+iWjsK+nXz1PX7bafvZU/9+oZSLjHnYjPS04d+9I9vXeRNODpkQ/nyNill8Qk7SFrXY39AqweYC1tq17D/RJYQFgkCfCAchVcIhqWGFiMi1X1VhSDI6ywsPiESgkhhAMET7c3z88PNzf3x3Xw1IsiUoXM0FXGJKlNhd2EBFkAyAjoDf1dIiISbJ1JEn7UN4DqhAdQSRHd0ku9dO5Pp3r6VzPlzhvfauxVdSWe2CU17ixZPW6BYbHN6Yf8nZJv6wCKq9k/R5gV51RKQ1SREEGfOw/EWaKLQDoGxnO4w67gN31d0YRqogJRWzwQWmBOFVCMIxxHVmF63q8IRK5tWtfCCPCCXiEex2febmh9pDp1XKeniowjbgplYBBljiEE4cJnkNvo8NbnfqXyIHfRG789dor4X4dpul4kJdB4OlvEEaWd9cBnRgxzuGBiUBq1SKZ8mqBuAFoDa65IeenNTDAuERGR4f2AnB4cZD+MFKVBMQpdGJQPI+Kw+O01l8g3G8V6tt5+tpB3FfnWxN82yf5IlX4F/X/xvU3H/Vr+/8K99T1iSRBxMjSzACgQgckPGoUhSaXB4CNTN90UJQ6EhKSK0Sr95RjzdlcgleKPhGhihCqmupCitai3QsjNAX4CjgRQSF24BQz7SLBWwyhq8i6lvvj+v798eHu/risANzdawtvQi26aDi6o9X8PWAm/7iIDP+7OWPUEYMG89CyBapAmLDTU5B4Z++91l5r3y59u/jjx+ePPz1//+Pz46mezv7xtJ0vfbrax9Kf/yPZx1QOURvj1Jn6zpy1ScQqcVWsXsxUZIImqCGDHNaY5G2cgJnxLz/Tw6er5eqizCsKaNIgq5hChFkcfDe1Uh+QcTzrdLyCdM5MmOH0wjifiDG/ItJvfLIvN+kL9td98+ehnl0CGNztkSCaq3OG0065lQOYooBvx8Z+K7nxte0Xumf39uoJX4ny/TPjA8Rk0x0gyEQViyQXfzYCosMrTiiEe0UB7oKqYIzeXNEpHdJNY/MUHkZ2KkUZE7rZAUjEKwCfij8nlsBu5uWXFsj+TY5f+aTta+7/pHb7ozhiUBQ1mXs08xcCopBwV4iIigJB0DsjyxzFKLkzyqpaFqazMckhiRCx3NlOV4UaLHSJLgUdoIpTjOhUdSjRA/OwkQhBRGqlWnB/XN+/fzgciplFhLetbRF3qgwVKsiI2DYNx8FhZR4MFFNABQVUUWdo6n6jYhGBBWAWJvJ62i6X7fn5+XKpl0s9n+rj0+X5XJ+eLo8fzz/99PT43E61Xzb0YNYn467MEBPbkuS0BEKZIm+3bnPY/SrvhsK+L7NbXZ6DU0ioITmkTg5U58ARDUBkLv8vKu6z15sDRgWGpEEOFbXb5xEOh8xw9VKuqUUkNV0xt2I9I70zv1FEJOg705cgE7VePmLILi0wh2mekRyVsEid1LfgSEIDXmhOQ2r/sgTR31Zu/DXaK5k+lXFe48nXU+r2J+yTJalzpDN9im/mohGqOyFJNEdAZuAU04GzHzk5HRM4H1cnzC5CDHLjyGGIkmEKTnQaAAckUsm/Lu8vae6/yfG7D+KrOb7VBW6vfO1d3lptfyvNXeb6uGkzbjXs34xvqsrAJJqZiQ4S2YisYWG2ZOgzC3kYRURN0G0ng8Z+OorCBJE5bAYLJR2CCC5TbW8ggwUaAk61A4xcMiZcih1Wi4jW2uVyOZS4P5iCpiJEqzXCA25lXfxgywq1BF8hVLO0NxZARANQbz2JUGxFOiDRO3rftu1yuZzP2+l0OZ+20+n88fH8fO6n50teuVx66xIBsOxsSsO3M4dYdx8NcIPly4nxF7MmcbM/Byz4dm5SjmXoW4JTcRZVYYAKhTqZySYUaPwMV+J+63xhoA5QGslOWgZLpny9nkO54YfTi0OOk8S1ip5AEhpz1ZvDKBCjXMeKM5uBFOo8Hkbgbkj1mHuNGbzbNXmKyPAdzMDJVYG9Xflv74vPyo2/toj/Wp/77UNezZHrW6/dMl/uJ0/cq3yHiETmqSJGb9mhjm7HeTDvjvTSpL9+F+0kDQMAN74jKiRmsZ6xD3KqdEzw3r6kuf9Wx++r5f7pXV59+lff6D9Gy1N67Nx96YwkBRHA0g6PUdRZEFAVBDnrs+WcA2oiYCR3QLIeWkkKlJkcNCdzuHSolKCCKgIGpBfIXvAB+R3B9DUIhE5k6bjoEf1yafRuEncrPBZ337ZNpZ2fO+EUWY+HO7IE07fTWM2sLEtZF10DVkCZ5OOpgFoK915b22q9XLbTtp3O56fT83l7fr48PZ2fntvz8+V82mrtCXlEEnG/tpcDECUgEiOQS9n3FDCCAdepuH0v5pWXszVEMFwGfFNFAiLIQoIg04E2ExHlZ4lwRyRShpmcfUEABUGHSyI5c9iHCogMZmTiQaJIx2mUkp3Myk8v9gfnqZZKvEEA3VU3GQQ5qQzOXFUdIzIlyEA7podmOt+HiJ9q/tc53P/jq+2YYmf/d2+343vrlrkVU7evX/7S/K5AoFQMCoLr4cE5Ydf7huyDPMrkzkhsXhwBfZLTR5++ACTafX44r/Fl9aVf6pb5rdquwv8l1sD/jm3QMY//azBzFmCaGyrgER5kFLVEVqWhnOmFMWo/wOcYJjfKnO+QqdCJMulah1z19CVHDBQNjXARShgl9blUgnuv3ntrDeFrkdaWJIk/ndg3j2iQWJaFpNmSUZ0eXttzKaUc1gPvF0AWwkyycDcdFEaHF5DRq7ft9Hw5nU5PT09PT6fT8/Z4Oj8+np9P/XzxbduS0RYUUCLTOJR7yezUTm42FuVWZ3+1Tj+R45+5cvumTLwRAYSlnz6X6nhfALzh2t07ufZ2e1HSApvKegReMExzF7Xovefsq+oU7k5CRkoc9h2zmzRzJ43ajqlU7Gr7+PBYPkAy0N5Kq7hy47ySy68iDPstfrb9+4v1r/W575/ny4adG+dzEvxWXM3DPuTqY7kqIqovhHvECNy8Fu7CdMVMxX/0jT3CP7FLSK/tVe4LpmN071NkRvIAfEG4v1K3b47uF9YZyd2JKUo1SFCUkC7qZUHfKgGToPSOLuikEw3Ius1X+vZQI/mSLf4zk/HqeeKNz7+5tl56gd7yDn16/Re2DW30cy1+MH6TQEh4htw1aTmBrGcESBEUkKhwsuuosHNdUCQgUmEQYE+FEEJCQlqtgYis1GekBCVIFK4+RCY0+W47+qy27ZTEd8OWc13+9EPFh353dyjdHrd1OR9kPf7UAh7b5fndXfnum4MuWi61X869bq01r/7hw4d336yqHQRqTwpsaXWhUlSgWO6gRS8Xed6+35b/+a+n//U/f3p6vjw/b9//+Hi+dEGpjc61C5u4B519hJS6YVaRNeyedRQbeuutk0AkA8vpe043dDruxjE5vTTXieYyNw8JhARUmcFom+hvzIguACZX5wuQJW+W1ZTvE0m4CiT19+yYEllUliC6jLJXJhAGvEeTNRV0eGSdFCabWqZVSbxSj6yS87z3V1tj7NCdPQ0AWrdp5c9REBHAR5xgd/ukGPd1nojcC9ISAPqAXYlAqNebln4Yve6eyNQpsEdBkJZGfsz73HFDRyYAw2AkHmwQYpwP29A+9QSISNw4f/YhAhBS5y4iqLu4WQ/F26jGlVXKEyLwDFIQwtjxkUIIvPehRu1k0REAWhxwXSFXC09VUytP+C5U0jCH3XrVyVRYoPAsvJVPOlOYAEPP3LKAKLIAtgJYTBMWWXiFz3EAAkb7d8W5f51/5q//PF/o56/R/y+81+043Jygn/nW+HcqaLd7af75YrfP04YiEE13gKRiyFmC9ccfH3u/N7Pjcdva3Wm7LDW8nbbzM2JdFluLIVzYt3qutbIGoFQ7tjVLdkjykTqat+1Sm4dKMVu25ufz+b//9//fv/7rn//8p+/Pl1ZrPJ+9dVhh7/BMVZ0kMDK9ir+6zR8en1z5zFsYyvvr+73U4V5/5fZjM9/n5srLF9l4gyAnbxklp319XRKxzzJ34SVXZiggyV1f+It540F+tX5e3u4XtgnQmoOgA5ghI5+DKjuxq1w/x3noACCuAgjA7RiaLQAFIzdM5qyrWgr3mwFUAKYvAGNvvdhfB4kZo8EIMamItOokBWaTIzqrMIYtEeGO3qP3ntI/382PvUrM7CMndA7SUAFeYnBHwEPmGXA7KWPzZpEicUkn0HUGw/JdCSSd9cAXCyxDM7AsxksSMnMmAPy74dxv26en61f1/Fs9z1v9/LX7x825DYyJxCcy/TPbMl/cpE682vn7XhqvxDATo1WVszKXSJKM2/RHW67t3odgVdvu7trp0tanU/SGaK0+sW+9r8uyHJc1Ijy2y+l82U5KpRlKad1tLUtbRMQZl8vlstXHx+en0wXQdTkC0nr8j//xP77//scff6q9oQd6BwhQ3ZvTfHCqY8qyL8W/U+x+IkP5yQ7fxeJERo63uO+36+cViNeycvaQt3u1JG4M+Wv/3J8nSXdEIPJqfvNRMfMSka6YnbtmroqBXJwDkgczsXv2pzIq8395gxcraoc2yotldvs8b8a6MkPiRrwio9Czvsd42hyExF7vf6ZHKiWQXuXdp5ADwgV2e2UcFtN7tY/qrWUvNy90HAovgksk1cqUlQJAkugT6L2PiYORA5jbWnsWiRhivbW2l2V8tRP3P4MN+PRQyc/vFVGu9qLHq/HnBDjJHOxdmTAAhUzXD2yI9bTPNIQy4U1QJh8lCfG5Yt/W3K+awks5ha8/+m/brdLxalCwW09f9zxf2z7fz2/VP9/o/1WHV4n8yX77VOd68QF5+bGQzFe+vi8ikiXSk2dOE2ghIrMIu9M5jpUpHJJZzJ1LAaW0jufTJSKeikS79PZ8f1xK0dOlPW+9Bb3V0+lcL1UA2EHX1mGlO7SS3nv8+fvvt619fHw+nS4iejzc67KSPF9qd+YBYzKKMJKMT4qKDx3okymYHpjPA5P3d+e1G1n5QqzvnQQmjvhlP8x52286BeDtip3bIf3zM2n25RGbb42P7YrwOGlHLQdwEM1T5gkxF8xLgQVwsHoNScsJDOUAUF3X3qeDifn4+2q/XfafbZlacetqH+O5j8yOwhx/1zEuU+1OTfnl8L6Q4/ut8MkaCIRM0+SXq1wvrIQ+njk94zMarp7FLgIR3d1r7ZfLpbX2sbbd67Ir6UBGgLKr2/PmhXCf/45nAFxwa1UDQAv/9PlfDUhCHcaDqmEEbIaplgoPRUhmWIjkVYyM1UR+Qbjvw/SXq7H7Avq03X7ml+Qv/Z+htmPadyKyY4o//cBnr1zLgFy1/2yCWb5Z5EViS8r3dNVFgFQbLwBAlCaqWnIpq2lQzlvfWnt6Oi0FiBbtcvzH37ewS8XH5yqIenk+nU51O5tJF6MtxxqijIit1m3bfvrpp637+XS5XCrEjkcvpUTwcqkRhEIK1AVgD0RragspwcGwC75whnyiSo/X+zu7UNMhUYbbd1c65VqtOq6SiIgp4q/zpbdYYd7eZXznVZuH6MyZ2jc/AWY8TyagnoyJPeRVIUbCZkKmWc2UEbK7ZbDHBa+rieNUxovD4PN6yatFfn3x82t80D/s+VlTZg9i4OzArofHkJ8yc1sBCsT7dBhScV2fCrXbxxsPf8OTscdweQWY49Ov7K9fnVuJx83T2mdBriB719r7ttVa+1ZrrX3btt77U13xibYnIplVsA/1dXNJA6YjJVfLfuoBMVaS7id0+DRfXhSLHtmOc4gwBgq4qa2azp1MPKQIBywSyKUzl8rVa/TVOPe/QOJd2yvJfnvTL2juX16+v7h9vp/fqn++0f/e5GXqlnMasFcb4mWHu8k2Dua5OEZG/iin83KORPakjFH5RW8LRBGDWy59NntFTVK7x1ajx4XeVXhcrRgVbGHnhsfnum2193Y5P5/Pz977w8MRtqGcDtVJ772ft9PlcjmfzxHYat+2BvatUlXdebps7kQmIGVtdM00VLySNJKxO14lyD6AL8fz+vkX70rsmvye9ikieBmHmHysSGYDHV6agUeWzAngzcO9uHvsd5483XJNi53PM9l+ZOp0jhu+RQIMRfJ6c6aMjnkS5uUxBrz5oVNAJ2J9HG/7wSCZc/rpMKUy8Uph/8JSf/XJ3Q+DqZbMYd/jrvuLfDedfsjycgLDLCCXY1MZADR2sT6MI4qAI6IQ+0gGXOfwvuLw+dRYyRi6LHNrCAPu3oM94nS6bLWfL/Vy2bbeWvV0rzcsu3C87U1V95P+xkLiajr730dabh3uqcLfHKhzn+5Vm+b83E6iCNKTtHvk9h+YAYyxmhSZXC0YOY0a+vPCHb+dGnvzA67ts7f78tf/D1Db94u3E/bpGfB6mb7S5W9gsMCL6RcxmYAKxtxAIiKhkjRGQkqyl7gz+sjrGXUeIoLq1HBGY/pPj6uZ6vOpubPWZhK9brVe6I2kY4NdQqyUc0R49HPdar30HgBa7bV2UqwJVNw94dzBcA9nJnOJFu09gh6BuPEyqbwRvpxnpLx0LEy584Jy/YYSAC98JnnS+Ss/xsjvuhapmPrpVJJup0bmV4DUZK/v7jOyqzIv3A43UywkEYnYRxS5+S3jnpyGzJTe050NjGnGC5+7JDyI1yPtJSvDtb2S3Z9rPru8uU0mBs+ffZti9iIbf/xHQLFRhkLAnSAIAM4+6hnss5Y9q+os4prDKCNHZ8+evZ3xl1vm9ucULKkA1R6t9a3WrXvrcTpvrfVL7c3DOx2MkEhH9g3RxbWfcepMVQwjHBTj5q/Olde+l6syIUZSAMKVA6S0GyUEJo/cQCA1Sg7qKyEgokQAGjogriOrWvcMuX93nPurH/yq85+1E3+r53mrn792/6/e/bKIf/WB8SKGBrdL9pu2Y6EVO32FRBK3IyiiUEpQFMUUCI2ZR5PVTCVsWa0ogq6OQFCcKpSfnk+XWo6rqTC8kSi6WLEfH587ni49VIX0YMaiBr7TO7dOgAUhtAgBNcDu3Cq8U8QDhWTrTgoHdict3F819FOF3KX5jd4/JbvczM5I65ebE3eXwlcB/Tm945bGAIPZ5pPAZMroIeOmj54jwRDDC7W/BpD1OOVqE+Sb0/vPWTApJUzIXtt6nEH5Q1KwX9+6apfyclH97CBLv95u/tTruF0fcgrZMXQ6XWKa/7tsLZ8qPDPmBp1Wm24cvQmECGE2FNNdEZ7S/HZgry9GDOOl0BcRr6eI8M6t1W1rz+ftstXmQbEenphgQqGZHkLvDS9k6OhnZw7elfdsvrtZprK/Y09FRHQ32kYb9jQcvMkqZeSUq6iQmkUWICLiCEjWRYC8iK7ZWEqIjLBn1KpMS1S+wOf+61qqhPtAJwBAJyYUN8r7bQB6BC50KI+vhnUfu696krdk65eJhPb2s7f7dWfAK0MBN54HfFaAXLEcMj8fN1/ftbwBbo0IwVDe8/NmKfF1uGISMAO01kgm3iwCEc7hJFF3AlFKASiK7owInllrPZ2wmCYBlghVupbydPZzfbKSjJApwmxPzDFNFgHJxFonkgtMjKbmHQz2uDpMjOIY2ZsQyGt0wXX8cih12uaSpnoOFgByuDEByIgwpMvldqyHcJ0+mamZcySPYEjrG4G4P8ALzuEdQZ1KKK5CZ4e15HxxbvHx3ZmvhLkvdvoEzm/l7OdTjxe7ARHTnpCrzP+MzrhfnJ+X10vxVvMdjpPssclgNDMRwYwxlnVl94jow0WQhgbdVkn2RLL36K333t3pToa4c3QwXNiK5YXGrXNmzKZzX6k3nrRby+lWaGbyl+ruJBkR0ahRt346nU7bpbbuTg90UtRChNBMQB6U2yqil33Ebhv5IjP5un5GidSY8yKqJjKSFne7jWTmVO8Dqwq5eqhsKUVVkr4fGGHeiHADIJl5hgidVkJEoyjURIuIICvviGj/Gsrfz7ZfIvvmc784A2/ffUOcvb7RXy5G/4btL/T8fKq5zz8xJRhnfvnU2V/Aum3eKMV9iCQP1sD8ZaJ0UkvOY3UQKCYeGGCi+ZL7HZpqrYjo4L0CI+U8U7ZyKJ6Rh83sawBC0ANO9kgljkH1SF1Hp356Y+S+vUBeLafbKzJ1q7HMbhJ/dt15H0jclKDc9S0gKWt2mIfIVa/HW1B3DAm7OzpSOmP/92b6bh/jNr9xjv4e++VufwCYeMPrB67QwKmeC15r43Lz7otneGsF3r7lsYkIWUS6jARIFUFrLfu2skx5FN55bkLSe7h7a57FzcPZmjNkglzzPFYRkVXn0Mn+c0SkDEK8EBn1UT6dZZnUDvsjT4wTdwjj+bG11mqttXlERMJIkcW/mDl8IuneKAIRfWEBYHb6lolDXzDIkwjspJ6cPOypwg6WYAAiVKWZlbKUUpZiqloUy7KI0CSrpDliHE7NQI+gR6veGb3Rg0QoSPfuXWoQoYtKgUri4vPhf41wf0vi/Gz73PTIz66wr5LvP/vJt575Nz8MXj3/7tx89eJnx/DVaPP19SuGb/fY5PV5i9gJs6YJn0hE4bDfFQNkjVEQJp9QRRiThBYiEqIGgYKSORQKwFOgU0HJAKQiMPX0xIznlossT0J2Zw+6k5SIiJG4lOreKEPjP7eyXory6wuZyt1YW3q1b16eFbvOQZl1bQDsr0UEsiuYt4vwVi7fartXH06+deNA2B/vFdpvJrDwZuLmqkhJzQFT2c+e0S2nqh64Gi67L/jFYr4JPO5Hzu2e2tfnKy0kn7kMNEsGOIVQUQG0N1ctMBVZRMzda629+8du7hzydOuttd49Au4+Y0Wprk6lteI6WZnrqgRQMqkniVBvAga7xHj14nabZNmZlO+XZ/SWr1MrAUVGrTQBBSIBFZ3EFplAejO5OyDlS/JBhnEj8xhO8EySNruqqSJLr+hqqrqoLMuylmVZbbViCnoqOD7ztpxKChUBy3WgKAuoafm21nqwdVw6OlORd8BmFE1E5KuF+2e8wDcvXomqXXp/OjS312UCSG5lH17Kx599sNtbfOnM+DI5yC9uXziT8MbzfyrZ8ct+2osRvhEoVycDeRN6v7oLRPalScFwZM+Rz2oDiAg1GemRpCTVOKmgGGSkQQHKGykDcDCMgpI1mYSSXF8xYbkD2sjpF8q4loc7uzOIcJLiweBArQwI0BXw+abr/bVMn3/avDjWQE7BLUT9JcNM3IjsOS/cj0bRzG98S73IDl+o6rjKd6QmvWuXn/0h+6ylcp1QOZmxtX0Gp0EwY6wzDJCi8FNJfTs4L9Yhv/T5T0W8qgzTMJSiJAQWULHVCbjWhgg/17Zttdb6saH3XmurtdZak5GfhDuSH2Fm1c1S3sMrjSwwrcPMokRglhXTK9xIdhfNdYoB3FSe2oX7cPbGMURcJGSy8Mw6gjEMzWkGSYjqy4A59sCvyOfduZyx0HQK7QqbKPMAUy2j+kpREbFVRKmqRcQsFGB4UPp20fDAcKwrmB60YiaKQhaFFSxiqqYiW5UWrM0vPTZHd3SAEY8Dukr85W6ZL8imV/vh9pi9nZfPbptfp7bjk63+t2qfff5Px0pE+hvjN4sDAC/0Q0kC2NnhFCiT2XW3ea+bWWKKsxDJgA1FY9/kCo1AXhEqGLO0xZQ3AAbaeHiTKYrrKSMAGRppr9OBcNIynBv754ggAjOeli9SmjOCkTHAlwqCyJv86VPB2UHB+xu3n9rl+CtvzE0/cyKYYS1iaOzc9/wNAoZ8GbO5CXtOnRoviRt34S5yFSI3b18R0PstrkpAIpuvIeIbyX6zJsZ4XBXMW+m8r4R9U7zcubcq/vXWV10+2sDpUpVUweIikCWotXPb2mXz81Yvl3q5XGpvH1tPHoveo/frA/cOGanRVKXqGNd1WYAh2UXS802RNAMJpMTf0Ye0WcY16w/uP2Tb2u3vmhE9MS0hQd3Lz4UzRiww51ah838i1BngnlMwR/mTrNqbKY5XARhIiNDMlsWWtZRSdB4bcsjqM3sNnmBAEcpQQBmCUECm1nGnBRImKBIGLqCSAvnmUILSDrI5Lj3ONWr37nGWwtl+veaOT0T8rb5wK2Rf/Xnzmc9I4U/F4i9X229P8re+9Ut6+0vazz7/devmiy8+51t20u5GGNevIvFKBAhMU11EMls54bAEqeHjXYI7jITTh5Ooa4gM1lwhYBgFwxARKiPTh5ZaV4hKWrpIYZ+ogCzidsXj0wlnRJaJG0HXUWKCTOj9VeiQM176xdF+eYKm7ONUdSdcfXT7SR6F7OOXhdL22eEMZsRNVdF9gb1Cs+zcefvIX/t/Ketv0Tv5xArIFR4+geq4Qut3esjZ/+ck8qcr//Pr8K3PvxEcMhygolKoRhgSKRv68ely2frzZXs+bedL3S5+rr13/OincXgP90AKzfGvYLgvdihh39JSoU3vX9qAOkrYyJD7Ot41DKeiTmASCTJBWbdtzESfupAMr7dKhEiXkXQmKtQRMgrZwUh4UYwUeNPukpk3kKFlgDOcDhGqiZmVkp70THztkNBxGEeaRcZQZQEXqMFMKCP7DYfeFWL6f7X3r02S47qWILoAyiOr9r49t/va/P8fOGN3Zs7pXZXhLhFrPoAPkJL8ERkRVftY09I8FRTFN8EFEATsTWQRXFQXpUKWt4tBjOlKvG/8mex6y7ds/+dtcWF9zvmDyB0n4D0+B4I+R7b4PXuFX4DtsYhXifsngv07sH2m7MB94r5/IBvRqfGDcmTcPucr9Z1AAX1QRmwszTise/SgIDnR9as37YJlbZcJlFlECz0laQrmoiFR9DGB6h6ewno51iFibV0VzdUuuT8gUyef79kDpTvMJz6z3wAU9LNWtoPWiuNjbpVeF/lM7UnpCeszgYNZTRazxiRkaQsk3lxPVaM6Cvd9P+Y0zfbPMVJO0rSumFcifsBg0Jzltubrer3ebN3k//j//8d1tfdbvt7ydbXbiltG3vBHaJdquVgjENWlwAXVNlHIctNCRFBcHmYRQngpknmmSiUdvCsggpTKbtFQanWkviM1FEBEkiigWq2yujc6qbtmrmtnZoAaETszKVw7KruFaq+zMxyk5JzX9WrtlJ7M/FMh7i05iQoooIKJsoguyjeVRZIvEjH+kyrCxZBgF9oboEYRsT9uAiEVJpKZNqYNi5no4iWJflQsE4d/ejgLE2zH+XrDh8h63CEeDsZXh7P6v7pvHXWv7uIlvtqFU8OHlZYVKFqz7EYZ/QitAl9V16ilBWGJkYUa0QRqlYnwhVtKMHFO2cTQb8mO8yW06GGvDJ/E/bLlEAUX9ZkYYEbrkdoVXvEdgZg34zlM3Ts/tOcn96qpcgdxHAwX38/k/jRj36OnT/qft9tGk8z1drV/vb//57/e//XH+nPFnz+xZqyu/mTYDGbYMmwpS1sVUv21q2J11UClhPvxJH9IAqDlsgEAh/R032PulNcJpnsL16IqjlTvBxSzGar74QOwLG9NM9VVuVw2rWlYLKQ5ZactlbJL4TCc4T1D7kXN0cVATsXLUQFh25ZzFi1mjR023dx/soGmWnqbfLv8EOEikpJcRAVANgj++z/+QWSsG/LVtvX205y7vq6l598zboabYSVowP8e2s4uSwpIY6cP3nps6rtp6vhXrupeG1zGsim2u9bdsiy2lXtcRSN1rMBskKgW54H12Q9SfD6c8k4hpBPZ66k69djS1t6ddbea7Kz+47qXigDDic1h/k2ppn7t66DKGVjZQF+oTljLle8iNLyImZiDHrUi3i7vFUpzoqfuVW3j76gVc7lMgrjOS1GGUXVb9J5M83/QhKq5aAsooJurmOFCEhQWvS4haesbzZiz8+0CpZkryqB6Lq0UFwBWHPdPCIwPb0E06IqOFYK1HIDIBlkZph7TSyhd7QOC8OE+iJ989h2lBC2nd5guypYtVhu0r+KbrcltBSgGIgC/vu89JmBqxKYLgtrv/e3Rfi8LSCjiB9iObsvqFH1LulAursv0f+ff3m/bn3++//kz/3zXP3/+uN4u143v180oa6b55XoBVCXJPzVcArIsNPiebsbYLeLUU//Ma9Wq5+KkW0SERi4oRm4FYmZiBJhT9pNRzaqqCYsWDA91azaV9XNTX3n9v7QGiNDEczVzVdckUi4MeYfr27UNHxpHTNXUWSYEdneTTDC7Iw1SIEoV6PsNIpLSoqpqnZb+2OCO0QjZYDfwSltov/1Tk9pvyf5x236z9Qf4j0UWVfntf//X+/U/3rc/bukP/v6n8X9m+1feVugG2YhNDAl68d6Q93eK+MWxX0buz4S2LCOyPmCgngsRpj1kFz49vMpSPITwH8v/fsPj2yYMcYFJQKglZYXZUewDFMB7WkmrwpCIAdoBr9OFEml9a29FTBzJ94wjd5ohn5v/noeIr+4gjyl9XCad9I+BgQWMMS3+DMI3OEWaCJFUVTTpZoQk1QXQnHldr+/vt+v1+n/++bau+f3n7f26XW92XekSGE0Li/4lBU2zsXgOakiujbVf5mI4t4h7LWBus1aKjR0K2iVO57xccze3QbPsPZr9ymQ943FAEwe3Wugrty7KXB/6pziunTu2pOEhQzwMVhyySOgarm3prxXMbeKnucxkJv7zz/ctqSXkhKxCFTG9qP788/rn+8//+19//Od1+4N4J/40/CQ2lQysdSVJ1cVk6pT2O4g7zsl37IhXwzdT9oNR/1D6V+MPc54e9n/GNO2qN6A0j4nCHDQ9xdH4iRTlFQDoE93na5fLwDG/WPHhA1L8eqB/1+a3NTMcYYdpW8sTTNcvhbjaY8xZT5/NrjvIffpzoNEnaVplJurQI3fEfZon7dyyNc1rmFI6TO+WJkkaTcvpSfJbmtlw29a8rdeV7++3P/683m74z/W2rXa72W3FlpHreQlgQBKlwi/2truY3eZPG+Jak1af3odpcVDg2lhWr215nTWDzfgPhBClZRY+zEjmYugAy/Lml0jrhO2cjWrppXb91YBDo8MArFqvqVt1cBUnaBZ1WEWTXre2teyJe7uu7xXb3F9H5QqETEQits2uYrcL3hP+ofhd8NvF3lT/n5//84+fP//jp/2x4idxE6yCd8CUmyAXF6sQZ56Byw9tdXhgz/35+CfDIagp3fFExhP4mjiAZ8IzhoX3YSK7dwvlnfRH8ffSH+S+o+wR/B7S913wO8xWmXiyaa3spG6AG6kJtTKBGK0UlyRanXUrCAYgV/GC7yuO5Ehk0P+5OnslNNOoPB6jSpr7dKrbz2n6Cmxjpz0QhT8fImqOkSckvj0PidGkK7uvWEW6tS29FfsmyMhDxHlF0s2QJEmF2BsIbpnv1+3nz+uf73a9YduQy+0YNTMCqlhEhKpJE2TLbvxE/YTdfeYSXZ4eKjNvTrGqWnYvAhRWA2dBDJmlabt60oxqXCzsFpJzznBNsMDKAIskmpsZEKvmuJzYqFjFMEA9SiX9QKPo3wOgAIOJzQKMGPbdSNwx0qWpQza6Wfn2VhRUyip2I68ZPwVvggtxES7I/3n747ri54arISdkwaayqWyECUyFJMT8qiHpzj1Kxb4cubfp3Sb69NDDKzvHL+4xr4YZ/nw0/avxR1m3A1WJD32ml4lUFkjr2oLKg3SxKahEyUw4g2lVc7ZahMGBZ4t3xXDrYMrzMVpTJSRpxQ0qepmVR7YOBl8OcTl9MItBF779fiQfqQchtcMjEZe4i8cQN4CHYT9PmnlIf+4PlfzV9E0gs0nS5OomwGZ5XS0TP9+vt6v9eeX1hjXDcnHkmbNlE5d4F6G8CIhlWQjNoBJN8E7jNqokjuu7NCEmSBpttvim5ZfY/CgJlXqUrLQS3Ib+HZM4wsjuQj0UkZjg7iRd/1A9B4aRsqLBWAVWIqKqEac3LrdGMsbud2JU+j7tcwAMb77IWB3cQ0xhW86r8Sb5D8MFWMyNYuOdAJAFdpGcdBXNIhuEmur5s0mx1mYgct6kCqk+Dbnfn6Bnb6eBfyZMkORVKv8B5P48rPYSztKfxB+nP+uTWPqEDvbJWMSi5/xNOUqzRuirPYOGc7UhI4VIt9PSSbNIcTxcJ7NbNRCgu6+sRB9moAmtu7bhoa5/swewc2YSGzgtp+M2hreRyHJUC9mlv5PZg1IwIpjDtzVumBsiwiogbpWcajvNk70hvP10iukJS4CLJbJLA275tuY//swbYYQuekm6rbit27Yhm5plv2uW/UYCpExaYSIzKDA1brSm3o5CjvsCb/WchmmRSNylQVorK1WtW1ZIApHiv7Xc7Kp+/9p08oc+4bMpYEom+s0ptxkEEQlsW72g4Xo79d6qSbxm4Q8MRP+AxJ8twza+buAegFUVAlKz2I2mtGRYiEtRjAUNWQEV6GKaMmQlNuNGe3tr249AQJpCINg2ExExyjcg9ykcrsCyEpwwPLeqvhO5/41g+1jK9HD4zChzD/ZbWpoBtptUabpbNalYXh0Z+SqNeKYws2bmrsLo95WsgVZptDuUxVo31E9GYnrnNuBHw6sA/zzlKbjZz8lzys6zBLtMhgEN86RE5u4kpF09qPtBww3Br6wWwG4wrJv9vK3X27ZuyASp2cSYssnN8m3DuoIiRjVkUEUUquoX2UpFCaGCVGpuTCFinWPrOo2r6RaRdkXXwETptuaKlEZJt+/u0pKhK6yYopGqcAtXTtJa9LZlETEzUyxJgOwtSO5dUixMZwNAU0f6XlkjR15BUG0zt5pMgz7BjpbAW5BMWAy0l7f0l+lCpMwsIERVyC0bLOsiIpLUIBvEAAqUmsHkNwQBoSlMYBMB+Q6xTMNKcYCnUf+bh5fI7p30r8Y/U9D9GKfs9TQTZm7rQ8ZUCpijMwzi2lolqtkGOLtXXNKE6Qt359TgOdne+k0c/+ciTpj/NnmSwCycW43qr584Oabpd4+h+bJpuc92HzPC9kEHZj9Pgtx5eHuaXoVUmm3ml494W3Hb8Pbjn9fV1i1fV9tWWzfkjI3IlvNGM2bfzFPJLecs4veTDG6jUEEi8HCFY3OR9rIsrWItAEgUaLETp4SJJGoG6zGPAHR/cqRA1TWftU9OMXH20E/zy0aR6/1eFtO73pl+jGoi1JTKZhDFL4BBRUhwgfg0JsR9xkRZkH/YBmA3KKeyKeXmbXPv1sWePPD2dsk5m/lpAGjclGbMkgWpeBmHaJJFk6jmbQOo8FtMprTS9vSEVcgzu+p3Jn1DZNL0csaW9xa62fdq575ooVqO/Ms+87NyD8NnLc6zYTuEJHfSnwXV4/Rnl7CaTLyVv0fB8QHlBElYPSSIiKqYWb28RxGoLH4aus1DUMQ1SS/S3RgpKISJiLsJzJnAVrlkAbSeuOaWg8+FnDOLDwcr698i3Iu9oTWH0uLYD3F3wTAuPbd6bd2bc8DoyO6yW8zysP+n8YpZHe1EAysV41Nyz0Sc6jOJL4I9gynzNrpj+iBGmGByG9At23W9rTfbTLOBcklL+vN93aibiTE5778azFAIjmihRNuWnRNQId1rVnZl+Vg/LzMWXU3gcnL6auGaeqFqJgIsKdEEXYdWyAzaBe4AWARuREwIM5G8rm55MUmxbOplbdmdcVi1Nuy3oJA3c9c1ot3mnjhOp5DYBCJYiuqM+n2a0q6A2adNdJqWLVlruG6rs11JJTnQATLIfFVAtchrDEwppZS0+vQmTH1HMdLyxXlsMaW5pUuFAbjppU2Ombh/GELeD3v6fhamuf4qTf+rwmdB++c/nx4O3/Y/iwa6qzwCw2FsM13ic7aRkyh+IYv8verAAFL55VBSf55E6l6or65G0chO8VqDANRNwhfP3LTWb9P+Whfbgx7bbxKH6T93FUgQxXxumIU4EeMfsSnM2DLzhjVLNm5ZN8NKu63luult45axGrNJ7ljioCCgnKcXo1mhw6LTDLip99xl65EaZKNbFQVcWcatICUX8fjxpxb9doGRECC58SNzoxcUiKS3S4KoQlVdmujWwUTMbyiyGj51SOnT1n3YSd14WLzUeQMhYhvUJf2ANF9RHcLLHaXK4yDICiFASCIEVbGg9pzb+gD8zEEVGVBX+gcsVYcNAhGYGCt9Lx/H0In7HdbvmTAR5QAWwgoMaVRVjMWKUAgRSe3ZzL9heLXfpvRnu9fD+DsPZ29JFDNVQahN+rGnAUNlXDWGwRpBpO9sDYlrumqJAWC1wMWqJwMn9HJA2UUQhn2kg424dENRB+qw0zzZA6iz8Cpx/wDa+J45fGdexT/XDduWbxu2jWuWNXPLeWW6rVwNq9mWZcu2mutyQGCUTnFicWFpA07knLkL/AfqhaZG3NtKLwYDTKUbzhSTxhklcW/lzmWaFbUrEQBqfkcdVNfytku6QN12KYl2dISUhCz+QFhNBUCQWA5MIc1zoZK0dpIPA8TMKJKEqkXps/SngIXIvhZExK3ouME+r68CfjuLok1KJCLFBDZyQWPQfkDhyVFdjAEA8jg/l0Pm4s4Mfji5JUhj7sxmqaKbcSfoXMwvLs6z8FmswFm/3c/9qJ9fJe6d765ppgeptNcJKqpUpADwJgwJziLu1IHsLjpLKYBY8x89AZdaPVeF9NIgrp9GiPheUS1xlPz6atwdpbatBSN9H7ec4avDKXFKxE+0cc7G5Wy6ncZzEJvUh4+BpyMuDXHt9IfKn0kx6VZHORvzJnmTLWNbsWVes2zZrhmr2WbMxtXczwrgELVeiQBgYm68i1ZtH1JB1m6ky9ZZR8hHZ1raCAIxKT4GIG7Ft7BrqXCa2o3pu0YtZUG9zwqA2UUTknMW0orZSCHMlV6WS2qVcX11uL0TLO4Pu7jKIqqusFfVCPf5JwAzinEJrwrrQcIHAgHfm8TbzOJUPTmlL8fjatVjTDi2ld1iZTFQ7X8JptdLTfVLsH0KMocH0phWh6EX+q2Tvylmxy/D9g8XOj1MbycIXwM6ZWd7JZW+t9wGR33ut6/4+ax3TACAUkQ0xcF0Q2pNRU3rQ6hkg/MBy3s/qDoMi01xznmtyRAfWpjAQZNF1PhSesCS4zTD4BuzBTuxHXQmc78fWk2+aDLfmVeNpDqC3vJy25CzWRYz2TLyxs10y8zGzbi5n1s3zFO1X2g0dc29slVotXAuAoHCjRlAtNqVakLq6sWCrQ5xhrpEpEC9IkgRFevjRRcOGZgFZDv8bzdOKVkMm4mIJt8kVDUVqi1VnkM68jaYlgNhUXba6HVKqZrndJ4FhVttckUTKD4uLl7dbbHUhSNGFv/GINzbcPfv2JGBi16aYLM5fPLdiJQmL+0807KfFvfrffZWdpzgfeT+MPO4Yu+X++EifiWc9dud3E/6+VXkPry9C7ojZSfgZqK6zL2kDM75An1HhdeODQsEb0i/AZfa/60aTVwe/f+hrY0JeJyEaAVhD9tb0aUCcbL5TauzfH89nHIAZ/Ghq0fw/lo4Q+5ZGjVnHAvR6t7CKTstWzbjbUs5Y8tSTTlKpjitz4Q5cQ88WkSFqH5lIe60MdavemCnRTtCU5MdsFuU1DU9cWYRSUa/QJQq1i6XkpidDcyCNj/d8ZcIQPeiTQDUVOh7cf5+K7Wr95s8YyfuVl071R7WrWBnAlAVg7jBvAymYvSmeDWTD50IZr2g8IuiBUi1I1PHN35NSsXtOiBV94RwRYZc78cSxQAZALcVSUEKoKTwUJ8L26eH/asWuAsxTczqs4jyJ4ZPge33qfNR/JwgrqIJs4eYhj4Gn03h18NgWwZ9LVQxLlF1BXYVCqeg3VhNv5A6MRMzZR8dwIY9/gS2S7fyOIY6uacRCWc5Y8En/PWnbxDjuvjMyXw4r1p/NMicM3PGumbL8GczsQzL2MwtDXDNzIbNBTICKi5zaYUzU1Ub9X/8VG/LxSNSNG6Tc46GvWoNAWCrYEUzVGmak5mIq/SZGKOwXwUm1bR9kWRIwxyoDMqmECviAuNWX0kTUW6sRgZkEATSuMqGwnCAfiYIiFArK+OdXDyTyVNGU2LY1E0Km7rBTG4iIFWL3MXtsGkh9HCeWJysm3MaoAN+A7KAVAoNCWJ061E1zGKZzw0NUR1S/EiedgP/cYzzneHVfvusfr6D3A9jYhyLXL4/oGHzc1kwuROHD5HxnmSxCwigmaFE2S3AE2I6Vnsg7jiRxsTfiVPcd/IZcdcTZYcvmnhflO1+Xu33Nr8+mjfmbJWyMxcnKn4B1cypfPEVHfzH7faj2tdazXWVatxulhKWJbmAxWpAXeboLBcArFbcs2SAGVq8j/r1IsvNDrhg6axCkBxW7RGWu3OlEDEW4zDBqSHZj/e1QUkR6X5HecuWqjDcsby47IjJ1C1ZP7jbfD9kJBGqKwX5eRQUpImp62E6AisLq1L2An6U4gcOMEh27XcxK7+pmCyo4fElphEAPg5hCEVEFJLqca9mmlHciTqwUDZIEr2khdlMIcaV1cQUgLBuz6CUHdAcYGSH5zc1Z4Q1YKHEGC9hJ3wmZDvOZ7aP36vTnFdM/XxS/zrZOUqBjMP66Zul+63PlYHzexnFVFdFaACqWQ/NlRxX8Q0BkYTugqgvTVWhVaZy5iSWXnqpDAFccUGRxNe6e48VUhEaWjgEN9JdzvSEguIIUIFyE8S9L/vFLBu7Lfgtag/+W8fluJfBk3E8hWonC8TgoueC+9o01pSKJ51aSmE8cnFy0O3EFGlGPqysmtNgqJutbbLXTIiIJhPNOeWst23dzDb7baNtlJXcRFbIFbZR3reNXEgmK2J1tzRh6RarV+eLmW2N7Prg+gi//Q4ARF6RWf3tZUK2Ok80TDpg2W/DInBPFZKKhw4REVm9GrY58m2DQqgosxmQkCkiyLiJYIWIJP3RFDdYrJOamRXGggBrESoAEmwDSKZMJRYwQRJEYcmYhUqo6qLi9+5qJ3QHXc1aZLRuJvW06Z/yZxHDDOphkrcioHJ+pl0OWd0hl4A0w0ZWOzJO+Iz0Y4tcRm1VbT3zfTdUz95Oi+ebwxmU/iJW5k41PjH99PboT+2SvgG/38sz8v7td89DTA+RLWOdijHNS73cQFPNwXcgaZtHbKoMGpMl5kmRyJ4JqA+vjVTb06fptB/BiRc5S3YnFP4oFGSgGcxsqwDaLDObZRrVKDnDjNktd9bJ0C5txtIbYqvtihI8sti2xVavpBHVE7qB1ftr3+R2DZ/bcnbG1lXC4rF/uxvVM2zT1dUBJtATVa4Lba+6+R789l12AQ5ERNxgUr06W4F/rW1fHfFA69HZ2NTklsmTvXE/q08m7q1+9ynjxETHSB5pKXwFqZ2o1cP4rwsvzYA76fdLsf7RpS79t/qkxjgXWdDMAQ1ry5uVCZfqtO+gSuNaKqqQLZOTVuxlM8frp1NtArCqoobGBoRxC2L6U6HNWXj+k7MEmvRwmGLkAHFO5JZn5ZoUNz9H81ZoYjQz5pxzRjZYXp3cZxMzNYPRLxgrUA2UsBobaIgUM93ZGywrBLRu964m3/71Zk1TrqrfzO2qLknRdSUFYGJRwgEAVC7HCucq5UISWprmqUPC8cO+8xv1L1eyCqfo6QkR5ULHy0qz4rgMAAvDNNt6A8rto/KnlKn5JJW+D5ti5cGDNF6NTyPusTZD2WHuxsgWP4V9FTF22b7kVyva6hDXA+/G/3p4WP9fJPF7klFiOkBRBoVlFvwu6NT5AXI/jGQFUFN92mo5rlWYkSOhj7n3x4bWpZ4WsKavVJ27j+B0wdO8isSn9R+I2nEvnRL30bcndxzPHuJg15l38ndCnPq89cjWS+7T0MxsM+QNZLG3KCSRK/EVQ85IbqCEoKurlKO9csuhUvvaE6GGRQuLdSBIv2oBFNo3NTBecZqb4w85lxGW0YxB7jYwGh9mre0iVFTTJ743bC0TlIoVq6V1vfuG4GZaIJKUpDIRRj9V9cYpN9qiMKpbNdsMqYraWO8DVwzVmhMaVeSTh8M4QZYeyYE+9Lmxx/gVRfXp+vlimTgd66jw8E8EsxIduYwz+4uw8xHMuRf/1eEXKfv06mCKhJnR0HqZh7bXmbkXZqM3Mk8vhPnHI7jdKvj8xjnmM7060KjhqFfTKztMv/sllv93b16eFRE5xpha0Lgrh/j46n6F6ZTYCZL0vc7M6PDcylmiCswF9K7voUCGKLkV7soIQKuGJd9OAGNTY+cYVIvNnGJvxnUKBd0x+rArwNUmx64AAL/QWsUsdAtIKASLQJfzhFpBtcxPhRp9azfxCRGVF7sLWrh+CouWiogkEZDFXJdrPhKWjSqyGReUU2ZV2WAXuZTJDDTTBZDikaqC917HUyxa+Ju+MCvL03sGzy2ZluYzkXt7nqD6FBoxP4TthxP6bpNeRu57eD79ua/JYSteLPYx5/ErJH5P0Md8lLS6wKbldCAHuF9WjNxrmxzWZKrSWUsPkftuXDy2vQV2pLwx8lLVwON47aDfQTgT5pyN+x1kHZu5hw5hw3P1jgNKephz+1zELSOWs25XPxFRR5NV6zxk6MZN2uUthRlMYEYDzW8RV5sxVvvT7RMVfIpSWwyE3TcP985u1UmA17Lrs8fbYSTzaBEadXyNfWQFaN/36VbcOPYRr6yDO9UrAL/YDj5CbPVZKvsCM24u2ykG1Q2EiV/kgxsKznQ+RWmm44Ztbu2+4vfSwCic2R34966Q2k+oGL+cox7TB6l3d6UOMeLCBuRzZe4TRY4PkXzHyPh5jYnq0lKz/EwQfTbYZ/HfE36Fsu8jEajG+Ct+sy+KaCpbLYBYtS4aKyECa/NMhjx9Cg2vBAAyG0PQ2YUyZc9FMXviLtLROsNpVR0gr0OvZyEHHTq0ecXxwwdEU4PWwZj4tc3e7NhglgThTKPsJIPpzQc5l9qI4xUAsPGOL93coKgVjRA6Tq9EEACs01a4HWY3nmiiUi43tGp4suPJBrR8fMSFbmja8UQRUDfq00R2UhW30X26tM1basGBVkjQk0G7t9V0hOqN6+q4FClUWyoLNXV+XPVKuChfRJLbxPfFkKhuecaQRWDlilPMudTWOZJgPDKXpna+5GAcj2jOrrcHwYaMYpxxVX62zH0fua/uhJUaGpqIbHv4IiJ7lvNfQtanCvxi+l1M8awUNYILiS/qtD7LRzsB5wXt9wzs5hmannsDdSPijpT9me7mibPTRuFFqjOFUQizJ+7t1f0Sz4h7MEH8VGj+kSUIqQ4nfOjADpLit2dFuL50dVsR80z1NKLYTxFhBo3IRK5nnka4CwgaDQKKHZhHdimLV3vXdayKJCSLfo4ryXi3Vy0JN1oHVtuiStKVHWPrnDz62UbrqP4AdSXNmtiLUDOrO5GyMDEGIAWsz6L1qCCi4gYBWj22TcWUsNJYDM/QgAVujVdcYcbcQxmkInGmRnlFup2kbhv1xYV9Eu6suFp6efhM5B43wxbT1pWMQVW3rVuJi5UTkWiYwtVR23bkiaNJ6DO759OyDIvzuJP3p/8P2xv/PCOyj8f0RQtEjWgeljuBQVQuuCy7iqZr/Z2nFZGO4puR/bNW7KneVOj0yUDZ21B2oNeyBUayUXGbQ7NKFsMJcBvZ4gtNAEBH127Fz7F0r36Vi69FcZgh7bf18zTQZ/PkbKDbRc0p8aSK1/OXOaX/npWrRd3fu10oUIhqgiQzt/9ZxliVgG3IhZQDbh3MBCaSUjIRyba1RrsOia0AWKxxlYPafc80k0W2DfPT2TWjWx7NFcZ6J3tKrXPSd+LCqIUF234JIDerKmXr7sozRUpRiatfTarmBBrwR7ZShQG5l44SZpOteALwfUe9q0gRLmrJ1BQp+UwD63Io5irripOweVQXTgDc49UAsevvJJCTch5Qb/yWFVw71nIuE4nT1lj2mAPk/hXQdZ/htBJGQqxtjFH9XaG1Pyh7iLiyx1OXjP5ySP65oQ1he4hvDwgNm6qMW1vqd/w6bC8S+deQaR2gRtUbRh5w6O7P1pD7DENLdjp2vsQBuLueiPjqcrg39Pu9X0ZB6pT+kxDYy+HUeUvjhIgsTG7zi6bQDFDUBBRzA1kGzWKUYoKLxXqJCMQos44gDoQID9eRNNFHSV8im2Vdaao6NcN9DnfyZzWX6PYtyvzvWD6wh4VuaKwGGclfqSHZXkG1bDvq18KEVmyVUSGZKDrvJEUBiUATKIetvhlHF95Bf2aGPocYfNfqg16q/dwpgG+PLc9Ps+d+FmRcvg/H8qzoPSF7sp5f1K6/KhySIdmxTf2ZzaZHF7z4n1VPBvGryTnDniQ3ENe+aQl4/uF+8lbhZH+42+Q6d9kjC3SrP1Kku8O3AfkeZ67jpSqMfGGowN80ZPDQaDFJMDUnLSLuiUhMFzMStlE2yAahO/JxIwQGseKGVNw+bkDQdR21Dtl3C4F4loPObvU/UFwCOG0tN05nCe25J7LSulqZMliE22OHlCPWxtj1O8BVMOj5d+LuGfs0k4sIJFGKvXdzNR1V+pGqZKGaWgIgStls8xmodUfb3BFlAae9X+LQ7NHtWXvjYez+qwj/W0Et/mV77r8e2MEUX1o/h/T9TmK80q5Xm/x5XfTBfCRs0a0+e/p+BOolPFTcfXaKf1jjEdieVeCs2p4FRvoudeX3lDH90QMqLVDRgg3rAWZI/KAtZ904ubsLjT1ZhGe7x4v7wlmFz/J3R4dudkrFDeEKAGpy+wEqSYPoz2TJYhu3Ddyyu17CalwzWf2nV6O3br3qZfrQcW11M+p6fe7euZoI7QI66Vu1dEJ/ssZzRB9SRS5O4q1Ie0o+5ey0o5A4x/JorqNUU0RNRQClmSxKExETAymSgGwU0Y1YssMcN58gKG6hypy1Whnuxt+KQGLoMHLW94+dGfohgPRA4gbYGhRsvsSe+1i5Pmb7nSc+TAPcwkSmpwrfD//FYHsMZ8R0jwsKTi/6MJzoe03/EbubZiYf+jCAjqd2t3GqlPmwmylANeU6Ba/sq5Xclft3DE70U3E2keDa1lSKQJIWs+su8qZZXpk2k1vmbduu2dYs68Ytk6I0EbKbRgHA3IR3bVfufRKqwfqvarMUEokip3MfpA0TOPwHimpjVbKshB7o+lGTdlZGNzYJCsRGGodK98qGbV2oi/5h/TWzOHlFuGURQaKrsSOJAVShmFFkUclGwHJKRkuU6KzGDzz8OZ5dxY7y+8QMk77OzuNlkEKd9w8keb5+Xrbnfhaen/0VuWvntYfXCvoFOUOxzi3ObolqoUFUkCzOD49lxGftOqvlaatPPvhrkfvZYE8JDt5SKh0P+J3NJdO5vbWj0lEheIh88H09TRnpe9vgY0r0eIximUC16xRn8Wjj7cY4r0TkbLimjmo4aPo8PBw38HT+fxJyP2PbkSgiFEkiWlxfNcEEQDUgE9u2reu2Zftptm3b+227rXk12XLeMjcilctHhayLiJvMs9G7IUawdR6apZei2VycZoBpKbemPYdiAC5i9pB5J2GNuyrouP3ZrAWgkXXPD5BgD3BogieuqyAsDoLMIjDVpfWHFkZEadUTTTIxVc2745mmjRrFZRG/N67luW484MXL53U+TMQ9Zvj59tynMK2TO+A94veYZo/WB5p1Xt//krA9EqND2H6YMkaSmEwR1HhBuGTB8Ds8BDA1vJn26PGh/SmB6RZ5AfWf8HmxpQYg6UwdKrE+zrZJ2FvMJAD5m4D3s9Ip3fJa6WoSIGmErpZzxrZt1229XfOa8a/1lnO+rfmWaYQR2S2fakFRrhiuKPt88aQ60vd79WxmOEf7/vVsQ0UgSxjBbZoApRHJEW4hhf6PkCKWaQRgV76OLJpGMdqd+dbEgzkXmfwGTQSVmk0Bt+eeKeL+ZK04hiwenbRvUdY3pKFVEgo6w2R3wgjIuortTNzDOvxae+73wweWzat4+b8SWfdwCsnHBGNowDxON737ydeGl8j69OEhw+f8KcnLkobICnCW5VSFUYLUckKOY7k4fPU94UzmbpYL5fUf9gliNDP3y5G3zbYN64b367ptdstVA1vE9dNQ/hKB3ywaOpkn9wzuhmYMHghu80S652SRA5PRrSYdre/C/Nb9ZNdX+/Qx8Z0RdNm3qkvATcWFMcjOYTTLMWWyAUAmEhgP4aUeODfjmvvKtF+ZJOaPwgDeOao2ePPD8yI6aEMOAO0onKllV8t8wm4zSFWE+ElsgAEGyQAhBrGcc7bs/E75J1RFWjKxIW9EJlWxuPSvqjQRYqJGMtsGIKWdr5hY232vtfl6t5kt5KecS/Twqh70mX/miTNtDxWI+EytfcJonaqox7DIKAH4iIjRyoVAEu4ZBzBmA6y61VC81QWDeK25L84nBQ3B6khsVNHPbTGtB7IVHzPOrc/Nj1CtICYvpC2nqPfSGcFqx5WcEXp5ljp/qhgIAKUrBRYhctPB7Qbix21gFA233yTH3kDkZJ5kbK3+wxxIx+l/E19sAgg3mmnOyfIi6W3d5H3j9co/b/nn9fbH+/v1ZtcLc8LmqxBAVYOVclZBVYUaLJMFtqPLtSj1Nqy2fo7EJTg2WohmXD4Bkspza4bnc7tcnGlsulskTZQ0cROVPmPLno4fy9a7t9Jrq7Zo6JdI63ADWJKIJADt/oPb0M85d0pdgs+HZAYlxZhNLkmoULFsMM0JukEWiCSBIkEk/dNtDlAItDYYAEK3Ys5Bqja6ql3HAZw42/18uISZ1tdjtQYJlVFet21NdPjJ5gcmsQl2Ypbd8739tmY1YCuEBfyQNPPfXDJzhtDvI/eDxM2f9ZjDDi6leEmqLobv67q/2xjFKXo/TP354Ybs5/mjD+r/JiQtu9Ml27brbcXP1a43e7/l22q5uTUqHup6iQzI3FccXAPlFBr5c+Nm+rM1HcQCYM2f216mYz7aVHXVbVQqAKVl1+D0jzGItvuirmWlyojWvUADcdhZJ603JVGsbrBBotahrMfD/pJN1lVlRGaOmrFaTl4NhXajMtIqUHIgcI7l74dpAd6fFfHlVznr2JMY1KVSyf3h2xjcMk7JrXYWyzHMo1vg0wYQBvuEIzndXc5K+Jz0Z6SjTeY2XSby0cxokN16S1NTZ4/B+FWD+THDhuH6wV3DC4XHPDvQOwmHU/D+vPzLRR9TeKk+E6B5/sO50HLLTOKEabDyIBR+RczMMnLO60bLvG28rXy/2fVmq9EMIlCFuuuqfbnScXzFxehTo6TR5gKp8jmuh9qPoKXpMrFnW53nOa0ejAcslbgbCZMiXVcgC8AsrBZuWOUPjRVjgliVZi9Kq4ys61sIhxsMZQJTWbyqCrs2ZAfFouqnyADNzGXshTqThJtXE1ExE4FvQtRyd47lBnWn7HVjqMKcM62Ys6ADYu5Lvs8s6uRKsIXlKXQQwll6HgnI4n47TfTAaxwDw0KG6uWYSqNBkygbeqY+Y8VebO9H9dCfzf8ZcBZIM6s0psf3a0qIbGazKkNDjZOaB9um0FRlPExjMaOe15rWNhjuc55C4+8nyvi1vX+/PkcPZ/NhauND9L3vk0Awh8VyuLJaMAOoZmYmebNtxboxb7Zm5CykqYpLJsSQDZqVzE7rwcaaDCKg6oDlqFuQimmXaiVfRFwkVld0TVmRu4io358t1H9o3SKASAbEWK9awUjRQsLcN2o3KykA/BYuQJW+7WkCMgmk0gQBBdvaZyApZJ7a5c5JW/equ4SFFQmzm07w3qiU2pE7ihRK3HGH014TNCMzpCuXkn7+4U16YkLH/vH9J+6wROPAGtcm/cJ5yOevF8sEKt8SD34AInEfMzjVgwyfD+FvAgmfDCeEdWQzA2mOtLvj+k7Z2SB+Tdn+ZDt0bZLrRpc/QNbPWvGw//+W4P3xzoSx5nFK3++91ift8+bsbfr8jHOyatEzb8wbtw3rCsvcMoxUxUWTiGSD+2FSgFTVmltZYoNMhvG+fNdl9IbJtKQbfa9YvX4W96oizSgsSdzY/IKVkO4ESoWEmLrYRI1FnN0kGwW9Qwma1h2I5f6QUHLl7zlyO6yux1jsDZRmTWNEmDQ1/FauSHNy0laNX/JYUivEb5IN40XAytWw8t19n8x7UlkbEWvYei/KPwpTFc/8TpH7uZjiwWRtD3sS36h5mx1t8YQSZ5JdQahEOWFr0hm/2gqXquNcH14UL/ylyH1P3w/ELOWh4QWJaQKZFscSrDIZxz5Ap+yYUIPZNI4fbeOef5rDRC/aw6+X/rFwiEjukPj9q2dWyr6UmM3ZahrygdC0khs04mVWbD363UlRgxkEiyZJGYUICkRpYkQTGJOu7DeVE2YIUreWPjAohb6etbdtV3HfSrJVJCIqxRO0QqkCMHfHkCW9ubAFIEudrLmYNzFhqm/PcF+lPLU189siIXBZf6FUxY9qa0vYHmr/sDI9zVIf4CoMaoSrM2BC4Ue1K7WoZnACqG3LWTAcojJ+G2v1mSZ/X53fLYyLp+7N3XtWqg9RgKDYWb7eFy0HkpnjT5oNil0+D6xO7tIf1+dVKNpqOz30BW9xXTVtmcBch/SDgL68jaWUGrrotuUQF/CrMvdYh5fC3xK8Dw+HIboV9PCxbcnqBZ9KpjmN7K56l0qGsioEWdUpJaoA25x6O3nyeqoCyeequkNQVILbBm3XXJVq5B1I4Vq/oDrtEOlmA6JjW3Luxr4ruPaUa8UXuuv4SwAoKhwRodtEywX+l0JF/ZQgg9URKjMNEB1ZLhFUC5Q+Oh5bTl9bI4ncqL+4tfnOqlCKqwDX4vfNBupnA9U4W64KYNbJcdlpjJC7YFG8wo2VEqDvhR3C0yQoYUX8Psy6rzpQ7SWPYpnpFU4E7oCf/vuQROpMEZ3OEO6EPV749wp7hI7xmIhFouJB2/IEULXShjmBwK369lD/LNz3dKmnDd8HiPu+Cc8Pwd9tsJ4Xy4x76suzrjl0Rie1M8Yfy00iMDOVRBFVUW4glgVbBjLMYOoYHUmQABoVhMCggKiKOVJn0WQ0CKpbux3k0bpmpZotd72a1t5asQJHKnGvFt0aNC3dVVX6ykaggmqH3qoajNVZK8EGknvRKz6LFImkleNNATqDXuztunYGKz1pMLELoEQEVCsOuFv3StuizkbSCJiVUwhAYFbFI00mA6k2gHkGIrsqeTvNcKYF0vWCXGJGgMN1rd6KeKuzi2WO+KzXwvSJ90ve+uzsO+AupdZAbiJJ1VTdsS9E/GRVikEeM4CiDuTtyXq2du21Qab4Keg5m3mYD8bl3d6eEsdH1Z8Ae2W+GkeS6qtwKanK2VEHm3QEZOwOkvy34bVSRJPZtRIncj/VZ0ofu+VwX+euH6Tm0zzXIA7HJ1H5M7YyXK4JycrSmngdAtHP2xBc7/5hoftXU7+9uomamUAFmhKESgqhGfmWM1yVRiCpCYtx0aSqKraumZtlM1BF9LbeHD1TykiICKS5ZnRazYrHc6XRbvRc6tks4n2Cuge4zN32E4LkRR0Sh7iq9OIIToTJS/KbLqHfnEz7jdBty1C6V2+iWFcHYKloE7WdshlGAwBE5wQilQT7VFSFqqgiaZGtKyBK0EjCxOAESiDivqtURJiKxRt6Urhnjz2vHEPEoBEZMEP98jDqTRRV1eYPYJ4t2s8Adsj9b4Bw1Q0zNMl7V52ql8REUO+kKc5kWCM1DIzhWfxZeMq++Rf129lUaAQ9UPZKzXfEocyz6QU7Ue4sv8tbx1JiHR7V53HNPxa+f07u6++kyT5kgOzrgqqi79Ak6XruyyLIfmlNoEUgIEC+rn59MGeYe9Vzky8pgWIFF4rBCIJdWEmrxieBcMtf97fDSnoeo10GZrpEDJge4WorQ6TH46LFYoqRWUhaEc8Uo/5igKRud9HNUZB+IArCtWmkX4rsqIiOhUgXj0CTiFC1yaDYoFX9Qk00gUakvoZQtDCrvIRk0a0UTacrqNH3IpQTaeo6hRFnl1T3/r9ziPjl9twRWPvI7D+GrH57sYoSQiX7Zfpn6nnWrmfbe8JGhXymUem4OERinKbx+3s1r5n0WT6Sde8H2VN2VkE80e+vNpMyFUdondxOFby+syCorlKZYsZ6HsR/IETgH58rX3xfYvmZIZR+wJx9ZTibb8elF+2mqBOFTHK5XERBY7bqZ9UIsy2vTtmbM1VKduzvc01EDN0+1zAK1L7312M9gZvzi+o3Xt1OwX3+8mgZTNisIy/xnabjek/oxDo7tcxi5boTk2qtVOG9XNT01mzRKOgaNjX/BqUb/wpgLf7zXO+DqkmUKvWEr+J9CQy90/dafxV3UY4qMKrEyg9+7YwO1Mb3UqpNJOetaj2t1rWJpr1Fpf+DA6hze+6nC/Vp7DaFtkLuLxKfQ46XG2vT8hdol+5BOVp929fnoF04jj+rz31tmTv5xArcKeIhPZxI53CuUnFHo+x9gUOnhzakZNOZ6QIHX5utf0Jxp5B8SvAkidfd6DcqMvV0k5AMf34XkZ22/D5vv3h3ebV1pB/gkXRpBFWVypQSQaG1GWqWSYhTBaEIEuH3ecwsG+H643WJ+ammTlr2UllAoizAUu0WX9LXeNSJd3DzNlKZMPzuIUMGo2hApGLquvlKUReTaG6KGKk6G3HRSpvPJoBU6xGdbTWrEptSHWcCKvoU8yuTfnAbUWkVN3ky0GBJtXR4Xx0mWgH38YK6O7gUUrU7fURRJfX6RrUZ2UP477DnfhJ5RpW7ngx6X2jUn0GTvI0XcA4y+0XY/igc5hO3xonEfyD/6WEX2eZuwG5Rwogpsi22gO7tGIw/pOwP4z8rvMjzfVqJTtIH1vO7WIcnQ85ruVpccV9KSQAzZ+qFpBmy0TKyC4iVanBPe2YwMyOzkSqoeDC6d6pjSnRw2aZHHG/vpWr/pH7tr2JuEQlN3dn1GoMRIwuabKkKmz2PBFATEiRnUF1GkwBS6Ee4bX9IkQN2mbUShFjRU/QzW1e4MTQaHRFPbeQ8Cf0+bF01EmQmDTg1SvVAEmA25FwvPnXuvAoG9iGa4QTu2HM/XaWvI/e9WOaJ0BgItD6Sak7IE9R6nlYzSktaSud19vFngedqOS/A/0Du9/V8GAakwxhT5S2VmgOIlD0WHYn9eJQ65lngcrHg8XB3OYs/9xx0D7lPYNn/8/cItFXkkWu+Xw5hxh408KvL3Ye7s8uPhQSAKMVUVbbNzGhmOcMIf8jmZlsE4ToJ633LQqxJqlTqJNr82NXVJiJkdX6ELqgps6WimbYl1LlUgc48l5qV6UpNxcGbcxLFZAzqMAw+w9ykIAhNZuYHynUnaJJ8b6VrxzsYUi+NzCAUpFh0jiQyCY+sTYfmRFCkKPv7HmEaDic6Je7NzFVd7Uy1OnTIMN8CF1SWm6q6RlVp5UygAnGvWXwJbMeOm34olrkTGBSt4vPd9L8O20932v3+0fJve4/Us5EP480GN2q2TQ+95OmzKKCGvjcHwn0W30shCahUANLoe6jG8cP9+Fcbe4YDvlksEybqg537k8PZGc9JBSRlASyj7dz+YA7IrcZbIZ2qKhRZ4LY0kagbEtOW/WZmsYdJP4DMXTlIRAbMWB9cUnoUH5FiQVW7NtFGp7dO2VGooYZ+77Ly9q3Qigg7u91ZFRGlWVizwq6d0UoSKZsTRQq913JepTocUUp10juVXjOpZu+9pcNlGmmRrcKUE927lgP64YT/dj2bDk/HnrxjW6b31DedFz0fBvlMDKTc7aWY8tRwzae099v67YiSnhKdODkO4kd3qWHyDOuHdzmSM2LX4r+oW75NLLN3nF3+/JutkpSSmYkW8yskzTZaOpsDKSXX71WFJl0M2yLZIKtl96pn5vafzSdG6rQJODjW9S3lkHCjn5g8Ds3DEorpmApHgOF+rPY5SdKtnfpc7QJ6sQ7ciUCsq9Se9fgYdMdVBF0Spe4IriqDFptomGn6cRPobEEUrGhbqg+aP2Kj/iu1dWWhlvhnOvZlw2FnVs3ODh4zzQSSVKjMyGUDkzIpSRj9kEcJv0nntXcrd36QINC8GQBRiLnJaUxQNHYNyiQO1TshOg+bf9auCdKGfCbLuuX3nNgdI7V4U3RXz6D4yG7bDkBVY+8508r9XtKvAbpoFZazQcuFuYRyRgZoPi53z4GVP4fGul4GANCGG+rt12w76YdmGarj5Vop1LvfpgVQFy/LUk/86iEYpFhNccRU7flVCKaqmgoqL1uXjuNVH7ILLaZBc58DY8r7wY76AecTT0/MnRrn+9j+p95+Bygkcs5GY6YxgxTLZtmKa6EkUMWbQC+bGdZMAzKwARtwI2TBZiUGhlyujMLoXk4L2m1S0SK+846FanXBoVIthVVirWAGhOqqKj4KbiQyRYAS5pswcgCI6si5qLtUTXYfZy03b121TsiiKQbkk5vnSI1iDAjyzdngYrWGiVCaKFIZd9NG6kVMcGHZVCHi9h7iXfeKnKhFozFneWuvMEyDI/ogyLIChOUyxUUhzLAk0WyzdG2ZgNf/Mj33yj/2KS5jUHVTol73mSJHDitizIfi73+vcNQQPX/b9a6ezu3Tgo9awxR7WiZydhA0ZPLq20nQF2X9cZmFc9EmmCIAPXGCcV6T1/rwkLI/0xUP6+NN2LiibuoufDcx1D1eFVIdaGTAsu+dbcNGO9cQgaq4rTHH9WVf5FzoTgI5xlOqjLDU2Vg0+QYlZiphWRzPHUC0V6lQxzqBsDyJmmOo5catJb46lhner/N+rL9uJWqrzPfouR+G6OUHgbLrEOjXjF2mXGvVcGthmr66ql8a7gzznh3Z/xmn7xh/sALL8xf02Bmuj0XXm67PZcJ9vJ9rjQVVfr2R+HFrIcblWjD+uOV8sM3PhT1Z/5V8ELalgkzrzdiqC+gAX0SRROgQWgXQ1bIpNyvShpAzAPct56YIzG9Ashj/QCu3UO+jE6xeKxBEBlI3HOJ6EMUyon9VGJrqZm83pR8cLFsQzg13hWTon2dIu4xnSypKui6Ns4Zl52vEZ5xUneHYT6p9o6bn50j8nWtKOkqcBp4bd/Tcz4r6rD1nQu6NuAdrbua3eB0+HG48/te+8h9ARqf1fFnC+2L6e/K7w4cRm1eMFi9iTHIbnM+zTwl7cNpWexsI9jVwL5PDh8lCnLgtp9qaNnMmU4UO0iOuL+ihimXq91/L5D2P5mr8cT6TYbK+dqLvC6VSk5ImFCV0IahCt0e2WSZzRe4uXfA153EjW1Mqc04fWmXo+0Gl10W7ZiOSiBTjL6LlepRoI8YFwr/GZdZr6yFlTW8ymGSsy+W4/8cZ1Qv1eG8OgHKbKR6oUmSnJzpR9kPy3Te/8dWD9SiYdRwp4QSbU1JpF2GaWOabYfudQJLIcfhJ0JiS3xouE8vZOnF5W/1093Bcwqs1ejH9Z4Y7M4A81qoaxe5j+i9gBsdFcsq0vppbpOwtxil4Q2PFt24VCEUORoKAKH6uqimVB49fbf1wbT8QPjwEHY0GVERSle7ZTqFOPgFQRfFmIKgZJLEh+wlE5V0gwTS7SMy6LLpo5GZPH8aG0Cl7I4O+YWS45wq/kaQkIGYCZZNRI+3OnPQuvpxuLbY/oj7lhw1EaPe4uweIxZKVuC+pTt/LGUATQIzcQhsriZTkRfDeK1C/l4PIsVTc0XM/C2dvX13SAXANs/awRDaxTNE88ivC/bbqVLe7TXhtdZ2rR3xS/AN79G1EdogpWh3oBH0Qu++7lIwKW58TImwZCgoxgfk9nT8Snrs++9hvbf9o3LHUG201g8gLdpDu1pdUVaSv4TvhtIs+tHM9P0Xb/fhduT4BALd+0giGUgAnmKBb/RKAKWlmOU82g4DaCXrtnEHDj7WrWAuCA+BAH0LN55sGdbi73XMBJIOpWOul1EutuViJAlDM4+5x9NnVc41zLMS39Dt7Bif57ECJZygYrDCNOsTSNGGY4fQ9IImDVRDyaSzFr5D4rjUUPpFI37WYSxZ8g577aTWDx5mIR/z+tKas2s9U/SZR2QGplVVx9pOHK+ev5T+eDw/HdW7d3ndM2MZ7PAcdrD3Z/awQ+zkSdHJetFW58IEhtiPwjsLvi9RdoKnDcy9zlx660KY4uRRBNWvMkU1+Ojy4YTiFTiXHTfrMf8B5PsM+HTqHAKyI3DUjk2KAionBmKVcChJVXZKtee7YKnP3qzFQFTGoFvOd5DF96BOtNtCXamOsqrkVyZBUjC35tiSVwUAxUDbyfPcXbx21IVjdaYLIx1v3uGNj0UAR7IBSdsa6h9bOl6625FZepDTJVdubbd5eWylXp/x5akh8eC5UqH7gLWvS09uJZb4tTMgddfqqKpn9OFUka0K7U9qrWjqpm/z9aE/924R9uyaoHs1lHFpl+KKekZ1Uvf1GRBM28mezrU/Ve8VIzgC0I9bySgyAaqrJRbWTedVGlYrpVD/S18upfe2zmj3VgF0+U/+8utwmw8u1LlJUNqkAsluFNTHQTJvDTwUgNFWSYgGks+ejIgpVVmMmLD4rWCt/WOFA2fufKGBSUj3vzpRUnH9KV6YBBOoFsUpcH9L3/ahYKzqMJGvl9fRSWIuX8DuW0JljiXUe5KEjvd5X+/66e7wqWz0HUr6n79jDjpf13E/rcJJPSklEujXnKu5ct6196N8uyyIit9VVu9wataYktrOUTQOl7J+pXEEuIZ6NHNfzVTd7r7LnJwd0Z3bk5dx4f33ovx6B0mmC3ns1E/dOtmP9Wic3BNI4xAkK7T0KtfpPZNrjz+yYH5ofiPCz5XOfyHk+ffLU9bOk4jfKzESpqq5plXNW1ZTSsiwuurESKkKsJ37lJkQ3CTKghCkmcpl363sQzja/wxCg8ZDyhLbSL/XkwBa0epIUFimJd12A2zSjWTFBY0ZoaqPTUJcbK/fI4kehScP8Dv/R4FW0K9kMgApEmEl1ib9SxK/HCklNxddRa2f5/8R8RdO3YLX2ixGz27he/B5DX25t3nYzlkP2Vl3M14hC3y2XmgMQSSJSkGVxnlNr3qwDz6ATZXROLyDP62KXwnt8hHTVdWxP1a8oyrfquU/84D4SQZxanKx3USBQjIsCRfT+9xS8nLHbcbrgycofUYF2dN6mYKfslX4yFDFTq2dK3PNDEobhdP49Hc6+PcVrdUtyDCXajfm5Be+Wson7zEzGI5n6PAhJH1LbXXhZnFJxX5Ea1Yac5dNuxs7DdxgsqFKwrg0Uzxrd2AvsdLDirjCSmI/AvkyINsm4L2ZWX66WnPqI//M9I5idCU4xT53n1HrGBjXBpO2qHD1bxYfz2at1cW2IE0NQfUgAvdMk90NOPxSs9LOs8jrHGkdyv/sehXaWy+hTmp2+x275cj33CAcwdmsDCBKDDsptMR+g5aPBxMpn1fSbAseDeDmn8pG2HsajjDGA5kKvLKqIPmbqdndw92TdH4LHooHEfwDJ3tnX+0PAmKg00VFasdtXXhVcJiKqcAPc/mEj7q1lNfMDeL5/+KwlEMs6a/jZJ0/2Lcli9bdQDwHcRhXreaD6Jf07XGsk8aHCH6Lw9P3WfWYUwuPS+CzuFQpkNRhWxeVNOONvzwzPFbRek+GIsrMbI4NYH9BI4lSPh6CZ56laG+NYmDir1/LZ+yrxlO2GfFzvH5pUTdxaWGen72eemOKc/3o997BVqh/WBBIfH9qfohRhPQ3rmvlxWYo4ODA+cqaxq89r/XvHKuTZF49TBHx01p9nRCditFGfvX04CmE4ZIJHI3j29nkrj/fz2RO4eyQeqO2tMVL0+VDmsRRxjBbMXsQPpeVdGNi70Qat+Vcp6athWtXt4fVxPw5WfItyUJRi4XFZZc80NMPOvXtOig5V/UCfKItCFnOhTD4FHcVbFkn+TIxA0ylPpUXnrEZNWbaNottekzdQX+bMro33SVyTVc6d1O2keuX77aEwneIoNzA0066XQpg/Gul7K32i7zkM2ZfruUdCFiD58OcYDs5ap6xaeFXl4EPhrIizXnrKLZ8HM0unPsobEZf40F9HfXY2Ki/7aVnTHy3oR+F5qvTxbHkSP6afXkl1S59URehHO7GSjqqkSv/DgeRANNkR/ZdI+WK1I455cot9oreboAIkUKyi+A4nLFYTCRNmME+G2sv27z+HyP1jo91u0uSi6wjVoioDZZZi2UVEWdFeYTm78P045+hol7v+mSg7KuXb53822C7u6MS99krfF2tFIteImvn0ED88KfCpUMlyp+8hDPQ9B1L/aXrup0E6YdoTd+wmvSvJFOGMWn3pvVp2TtSbER+iV6998LKq3EnyQ7QIPEbuOFjqinonwmHaCNXbs3fcrCCFwJ+ehTMie1DDJ+ofQ2SHZ3od4ztUjzOHxQJr0JNxDi8UN6Ou3epibP4RYrhL5V/kFLvYjWjg9E4+tT7Djn5nlhcfHT4fiv6r67yA5m0tJ36kdCEeD8at9VtYpq+vLpe1lKnhWbjiqdCF1iYQyUrx4/Fa9NTt5+uiPoQ0VpfpRNmr/OaF/Gu1tcrcAb9f26ws9IQCQFK05ADU/SNqKyCQEX3xpHBHn5tEOlr67fTdJuT+dbAdR8gdFUA1eC4xQGI8qpyddGsPWqVyjbodl/tFQOzXw6s7+RFlPyD9DbbXSNmn2T/vQ9xoJ+J7SC4/3Mtn+8dM8fucoYjUuyfF3BDEKvFqUzci8QHI1xIOLoiLHJy5fbRlQ9hrHz059K+A9yKcQRGvE5BC2a1FWsHyw7aHtkGfIvdn6hqrXbS/rdG8XKWHQFcDLyReDMHnkYST1fMSyp4z9E9IHik7AHs5/4YMtJ3asqrk+DYa6eReRTUWcbDYX59Xh/TZzHZuOoD7YpkvCncy37+aKMsUQt/dManzXy3sV/gdyN8Q3zPpp7Cnrf57aIL4+S3qXnFHeRyR+FglaXdsJlu4jTM4nEX7Fd4Q2RfN/4lFaL9PFvdEDzexTDHp3I/gCmlqbtD1JZnhLwXGtSmRAxjIK7sbZBlPVs8yTjoP6OEhGisxbhvYlP/ZgephPnGs2oSJKc5m0cEO/fIsq2YP9jnPeu5z/gu7TFnml0fh1SUwndBL1S/O5l1MaVYpAABvRqEYlNg2EaqYcAM3t/FsWUQUssBvOS0mN4xLqCLK44qeXWo43UtO8jkT99PeY4bTDt9i2vnklo/3p0OY7BVCaCZZz3YqF00ASB2eyaV+VsQBRZnUiiZAIQC7cqfKSLT8V0Mrdx8O9eJFRMLpPwC3DUrmKjSnE3GtfSXa7NEL+2RSkWJnPIsgmMtf6p0nv7HZRLgNYYmJa9n6DMlQ/7wKfAqvsE6qeGy/x+N1Nn/WfBsjXFu59xvZxCkAgHQDsBPatAQzz8FchR7EZtaGxaDFgxxBwJipAl1uJhk5S9o0G5EFqyG7L21kAmbYbHV4oIpcBWYCcTfaZqDxUt0lNKLpZHQZbyD3hhpFEgGVpepCJgCW/xU7UOqATf4YWri2/vd8970uw8tczx474+a1PTmwVavUgxAkx+k0mJZ50s7ovba2bahsZZ/qsnd7UYrLk0GoVgsZ6tnEOLcOz2svl4oe909cv9+t537GjJ8kvpcg7pZfzXl8OPxixe4Dt0OEfuerXwHakaY8k8+0OCcAizB9/a3v1ZMByDvFHUHyQXaEIGN5Juy56W8IzxV0YMG/PJSD9FbnqU/KodQdtq/1YtuptSpz183E32aXPoNNKadQ6LpLllwTO1juueyKbuNVEOXfbPHuEfcM2MfZ1aTElRZ9b3VPwvfZc49k/Sz/w/gpPTm4JcRRv39DeFjcWX8+X9sDnm7+czYDCyBozhxU9SVi9yu92liT08oXhFUwlhY+eU/ZHzsUngQs46uX6//9lH0o8d5B67Dx1JsNYyQJSC7q2iUmkvjDxgWRhYggJeTsHonQGLbCyRTrY52yqw4rVLumCRgseMaG1NoaoJdFQjU6k/d5AOWedPdO/PQQcWSLl+GoJtB3i36rx/n/qkLHq6rbUeY+AZbPRy6hbgPnsqPaMXIE+Ae4o03e2vj4Ce6Ayy9fuk/0Z8BNcsYesopMdqu6SWmiAcjSSzG30j/jqf2+Mneb0uvZM+Sc4ZP5+K87SHS459mqzHVGNPKH+cAqVmbfzxMwbJNnL2MtZQla+mFcThp4hktOO+RkbU7kGOP4hnDgXauNsn/LckMpUnOn+3D3opG4Y6egVaTfdMLt/jroXjsIJBkMNEoxJ9CftTBddQEm0H1kWlHj8taZWT2UjM2xJHNNEDQp7vTbk+HV9PlopbR5VWgNgHoftSsGSdnvioX9eIMUaMJxuae/f1T/lzeD/vzd9twlhCnmKOXw54BvwjKOab6s4s+HonlW+7PV6mCQbH9degzH4A6oisxluU6f1Hg9iX85xDvcT4ZpJ+h0s1pn7A8EIDlnAbr19maRT+9d8NvPW46vunpGZRQ4fnhY7W8L5+O7D7X/ObBrlayzXkl1N8Ug3UpDiCzjcMgcQ+rBtjQq5RYiQdFi66NGFl8WTtOrjb+arbhvbqva4qV1gjT5qZjW74iC/0op651BOQRJpar9AkG84jTznUzH7Tozt4CTs72z9LGGL+u5vxzaoAfYfjhyO8DeI6f0Pmfi5Dhc9ofV+fLF+0R/Hk6RZ9KwAPaYLCRgFM5WJHt0EezJxXNeh8fhbPIdDKWzHOaQpsrcx418P3nO5m3LPfKI+3JDQwYR38Ml8OnIHXPftvjofyeY3hxpYlkILMqOzYKYlaNAeGSxcmxFk8+7vQpYBMUmfsdPTrUJAXM5/XQza8UQt5i1taljp7U6BrVLajT1LEHEYbnGYHg4NQ+gT02/w35+7gME5iYKYY7zlKYZaQKRatDQKx+tHj0wl3Ia/yIHEzfv77bnfojZY6FnpL+FqZLcMdS4y1Z/VjjLXx715wRYTlf/rqAjciB1p0O1Tsdg+eh4C3myZ3b1/GCYiGxvTvOIRuBE9iIiRD7c6QsK3fXzLpPjvf8szHDs6wNnsQx2l6LD2wDb6xWhrmxjIjQDtRgaoAZyX63NHBELqQ6UGrwHCnFXZrSeR+rj0hGluA6906etDo2ZBSGhhbIGQ9AOclkPIR9CzLtmCV4IpxObcwLuYsYg0oUKFBSHU/4KQLske7/c0/iTG/JnsviYz5fruR/m/GTMWY0cddTpXljG8A81/vjrF2r/0fBKfz60ItmI7IR54x8tk2ODvbgzm78sRLLefm1rEKba4jcCSIFdFZF2l82qYaYJFtxnQe4A9r9nuE/Za6I2uINXFrg5rar7b0XV3Uq8S0ecsp93hqpmm2oCVW2qf12w3mFEYBBruBkAWFHKtJhVaFMAZ02zsTeYY5JvDax7TD2MKTWzI6Vokork/j5d7VxKzaP3jGaQvbhAPCn4pEIv90PP/2V77mer5cxeud/QazjL46MSdARiPidQLLkjkSm5jA8ppZyZzUimYON7747rfotebe+eJ3j0QdMSAcKQBejaf/cFPUSOTUzf0wfK3iWfRfQ52M2P+Z/1w0NZ3hkSn8KkLdM/D8aVPBMVADN8axy+psEa5RlUb39WBeNB0w6AMc7P8Pmjdj3Z3jN79x1szOMbF3kY66GgLmdXWapEZahJrnu/g24rN5NJE6tfm7vgA0EYy40YAc1AlkPU21os3pe9FX4bWGkU0ZRSSqn40jIhmXMmxResGcyKncLrzf0x9Kb4QOfcGr7F/tlLXwZJ4y6kpLEn6++5GOeMw96zkgVqlCsW03idnI6JFds4JQ/RNs1iKe2IFTlcQalM0r1wdgB7Vv84l75Vz30KE7sdY+6HsK/O2+n349NPCS8zayNwAzCaJTjF73/bUClKJY5oOP3foyETY9F+72s7nI8vwrYQt4FJz1WrYpGQVnXQK5avlL32YRTiT3UQl8w4rffbOi7aU+0W2SoHYKRsm7kejplZZoXqoyaPYhK2PB/OZdN9O0YXeQOnEPdUlfYh0HkuNAWKIvsScT3ILlxyE7YV10Yp3HDIcVbNO2UfNCHMlu/Tcz8L+yXRV/jIjE+htKd0nA4ymXvhr7FYECfQOJm6J4f2cDe+ru1Ba2ISxQ6g498lSBCRCyTG/P1DFDvEGbsb94NXR/Ohxu8OycOzz42SsvE6gORyb9kvSkr79g5WkOIvW1h9JDlh9hvDmax+Z40UGt5vG+DI3Ul8kcG46AEAXFGwyPMtMJXhF/PwPqQ8kbOcmLnD9A+oR8D+HwuVfBejbMW2mEtomsFLorWzVT9uUQzmEOYKn1zUaJ83wF7JQk/zuj33Z1o8VGJgPmQX0C2IHZ+G7TPETmsq/t6v/2ftWqdFnMg1ZTjjvbfCp5x3s3Cm7FWpuaOzmuY7LnZ9Ig6oEKw9f+SyX+/+iYjEEGDAq7U/a++dfjgchXFY46Q9mw/HB+bsS6xYdc/1C0PTj5RJ2b3+gWFher8wSIQquhQBuRU5jAmJbTMUf08IpbvbwqGhmGVQQ+gGaAqjU5734lYPaUwP/BJx31P2JgUZBmN6HDJyERcAiLbNu5hPAwqYR5uP4RxirHPT5e0EGVPlTsKIBnr679Zzb2FP5fcgfYffg6yq9fWOHHxD5T8QzPaGmHt4krIDiMdZ9aExwgGzT37g/01Ca5fg72tS4qXwS+MeTlDjq4kbaLSVVTeGVcTu5NxaMvN+3TMQs6CfLOIcIgNFzm6GcjupX5pyt1haM9FirAsYNUbwpJm/tuTPFnJEdfdTPhMagAh5dDNL4fde6BWwhtbVjFWvd+CkZVfbJrlqVXqy8ieTZzxQfR6246PIPU6mSMenh/Ymgvr7DYs1P0swhE8iF6f5n3fQ4fhNDTnMPMbHNxXHFejUU34LZvfwifTXV0KbAjXm77VFPdPes+F7btwLWd/FexgurFbKXihyrq5T4bCdBKR5fJ5uL0fkHleomZEwIwW04oTaaboZrNyGgq9ObxZEXSPTM6jNTJBmE/BsEA3txvLRucU+yLBDRJn1mUHA44J3OLKtneP0j9Z7RZYUEaH7zu5kvZ+sdlAaoLnP/MhFsd+CvEuN+wzpUe3tN+m5t5xj/g8H8k5uaC1n236fEsv8VSGu8Pu70R3KvouPSmk1mSGu/0+TQ31xSClJWN5aD1TtkW2Zv1WYx4KdxZQTKVMjrH18Z8p+siUYqqRVx0kihbJ3QB0g+fmlmEruWeg4ADpxx0TZ2x5Uplc5AOhkulipRXJziSe9de+a29kSjm9jq5fl2KPZQ+I+Y/8XKUdgINBJPBoKE9/t2p+ThetWjZbProAHTl129OFcLPM9YY/ccZfET5DnMCX/3mKZJ8NZ/Z+MP6AU/ybBDYc106nStAu+ywL5L4ZpjfXf51bVRNnH+CjZ4G5kZ5uR+0IDofc/T+vQaHfZBAiKdnouVegi7s7NI4qKji9KLcRXpdzVPKFZuzCRglPkLsNKb7/nRdzbJB59+2yY8TGL6XwWmc/jTpiA6ZNVOkN+HpYz/fTpof3ZDjqm+HO25YcAQJayiZV/xEZsZCasuonx8DssK7Nge5NsmrFkhYkZaGZGKkVXJkGiyeVyi3zshPsAiDFWz05clqbU5BvDpAlaEIWDrX8ctzedQ4WhA2tB0vbXrmNADPrs5ftSpSYeLS9ruxwHyQzVu7l4GarRwjQ/9KT+Zw6yc4sO/dyuiMtuIi3MHZ73+wqLZVezFoKITrh4PA/77B/HodPGiilrWZdwDzZ1r/aSqyRoABDZxnOwUMJhP8yHQOVbAX6MyVpvjc5PepIajzbctWPD9dE2Ud1tW4YAVFouNkFBKAQZStI0GQyQTFm3RF91ZmbFrUKGGMWMucjTDRQFDFzcsL0TeAPdaoxAL15JA0xR3xIrFhCqbnGfbsa5zHYRQC2riZZnwW/6szVfCBFNULJ8TVIp3f0Iebv84T1cLf5DgEQYb0KIm2Bv0j1g44nd85qhVDDkD79p0QrKdZt06UAdX20XC+YMZzpb5nbhn+vDhrVVG4C0Cxmqzg6JewdGWflpd7U1Un/ZnT1swR/Ls3ruvxI/dcTh/hGX7v73MJB+Ka96BI+fsKd5clPOOe/r9unczNlO++H4z63MiBN/KQyUMXRmMzI1sW4RjsWH+/kfhfF+kxwnvjM/h/qf7ShPh/sj9Sqjdvh2GjUnoxWddKhPHufa0h/lg77z1H1GBCLIG0QAQUJVtyzIHRCoUEkUoX0JLuARaCobAZTl5LwAnrInEnCDNgTcunwG4sAGB0NHLeoje96HfrlygNtlVxzA69lAPBQSvDqf74dX6YOHx3runxh/WO+Jd6v7GGFWDAaW+GE1tnx9UoqMnpTPW96vO48dXT31zDvNqbW2EyL4iOru9FJlrmRkHY4e9lK2u+WdzKd2GXgu4kPzr43INI/3yF13u+bU3rn+L9YkYvBD4j7Nz1hu/POMrL+6rd4xoPbcim0MypHt/t1X7bfyh4W4+zHpWdXpiur0i6ZVGbINayXu9Ve0GBNGYjHiXHpakNV5hyyAs0gikCbnAQ3ZLEMgUAOgb1KKcgpefJ97jIhkIDkj7kMZZmiFcW3Ihu4tDyeGxhp2ZlUlBvwaV+vwNo0ZFp2J6ESyz7r0+MWjCT1Ny5bNs/Q95P9Az30ff5b+PJ9h8aAyKS1g1HMXpZAueo0pew5iqKwWWZC7jKeUsvP52R6mNh5iusNefjI8k3ys0sGKbf15J/1LJP6sDs9ncpZgH+2jNRH3AKbYfoeaSNfwnR5KtuEXRwNX44e3I1GOZL1EEvOc94czMdSrvX1Wz/PNO/5ZT1YZ79wXMFRjpjyHTYjFJgHZlSMb9WfAqQh/SqPsEIALwGKdnBCoG6FdyhhRYAoIJPkVTWYhVLAkJMVFkZKKlltOJDYiGzZXwgfAC5lBdwDbrvKb27IERBQmIkJ1b4hU7yGtxCQFWVejMJAmVTvd0eKDdMl4QfR1gjjNoXRttIm+n2QfKEx8eAY7RfIb1Gs6+WZAt2P8MB8e6Ll/SnxcV3IcBiI+pmRMFqZ+RjnMqUWc1Wfsr2nfaqHYzSAR0BYHmfuvhmf33kfx91/9rcJE2bHbVL45HM1Pqf8wwioZPzyO//X6TA/hT93HI9rRDYkZn/tawC7tSVlHREoc3DIZCIpABeL2+OnUEBlm8EusAEgRWRRJkRLeFrwt6e2SlmW5qGzbthE553WzNdttQ84wIK9WFnXREO+kVny/r9jcFADFLqVuNJQjLgfdVg6c1e3PPzXBDvu/4ZKerEjdCxiN9P0XQpx+OJxXZkw4PAmXkYPf5Qrgvp77YfxZ+vN8jrsgUnBgoOAz4e/JZs0JkpMfH98ebZzUwxDK0MBpM0BAzR8jQ/eTH02mYwQ9lT6lP8/ttMQpxLbvh/75fApoCH0o4Y82Ii1uL6Zor44fTss9qeqJrLOBpqmxEq597+fDQ0zwMJylj/0wltKXeolnc8wSKzYsb4wdUlvtJ7oH4hiWgAbSY0934iawcuWURaJW35EZ9NNUCrEIU4ICyw+n7PL2Y/mxpLflckmqqn/8sV7MVpiwC4jET3lJwGiEUECBIFtxwUuUMtkuRRWuXYs+VZHiCBPE2urodONc9RMBsE8fduxewbuIcz5SJ5W5Zv0dWVfLYffwIOyH8nn8F1/f03P/1Phe5MlinkelOSEUYXBI6MEAt3pIkVTS+3Q3adMwNLstlWI/WmTGa8WbgYOAcuCOGnMQzsbp0Waw9wMgh/GNmEzxrdhnKPudt68SqfuBVdmgZTtNhl6NSm0jqJedVK0/vAiPdu0d1kmoEmJZU31aPvuldcbJPUPE71f4eN12UXvcfbtO5O63d924IXluD2aLNPpdibtTTKdrEAiMgIioGAAFk2JRuSy4pEUVv/+eU0pvS/pxSR4jIJD5m5jJluVtxVvCLWHNIHEjWGT9DZKVRdil39VgjYiotKtSCTChVumdz6vUnXn5gJ4w3tIWlN+FbjozldUHAlQvc8NJeafvdSyOe/JwPj+czrspN8TjbJ60lBG5txSHs/PX4w8n0w6SlwUPwF0OuPhMlM34TkH36n2anEtquGwqy3aLMHbNANzqt01bZiI6DxfnS+H5Hfh+/D7BwxKn0Fo6b8kvEtOHRUf4jLpEnf0OqpBCK1Sjp2mrvNQ4/MarflO75nKn46mDeSs1xJpPPT8tuVc74YPJBsre6jPXfwfqK2WWIi/pYttz/qxM+AqUG3EnN7eIZZVoqhKwZdEkUMGi+LHgx5Iub7qo/PZDlpSWRS9pSUIy0zYz+x//229mlg3bZrfN1rzlTCP/5y2bWc6WM3J21UyQoLmiuNekmhoGpKqAlOoGXNgaYhNwOwpn8785jBIRaLvW27bJ0L93w4QVPoylrEDM2Q6uzPF9O27hROv7acT3zNz1AWiMoIiklCSbqjZJi/e1qnLbrAbvbnWXMCKamCxZEmYhoQqRBdja8PQNIyCm6bQ8zu9Y+cPNIC7yJ8P9MRwwLIBzZNdUtZ5Mfwg5ASzLwtb3IfF+HGVkVaZ8zhCr6mCOzp/Nyo1B2fWk1mcRybl7WTrP/54Pyf3imbqr8d2qWhmyzpYBUEleNRvb266zS83H/+CZ29uT1fvqqibrPf7ekEgmtNMyKoCU6MDTO8QMqjBgSYuZ5exdrSTNsBlDA/tScECfM/yyAbVYjjcwu8BZkBJSSr5fkIDZsuCfv6d//Fh+XPRtSZckKanwClCxiq3uhmF5S6oXEZBiFL5J5kL5QUkA/jdb1zXfbrf1xnW124b1hrxhy0gGmhJJcBFR81raWnvFiYNjPqBaPpfwC6DNsenX11fr8xZUml48nb7DBOL24h3je5eW8dFURw3DQx2vGl8zTWmvfT6O9rgrtd29rqPhqz29idP/V/Xcz8LD9Lsen7ffToDEdtyMuXk4Mku1p1ykc26WAUGCOH4a3bZNlfmGcLBVnF4vniQzH7x8G7+KkPYOiPtw/i18uD9nNuIuyyJHsvUPFHQI0r9nSjwxH2I1KmVnk4PPe3Ab3GlHd+5IpIsjWvwAS3eh7Ph+60iyKESRAAH+2z/1v/9///n/+f2iYtiueb3lLf/jzQ88uSiSyGWRlGQRUVUTONaiSK5Xk970uq28rnK72nrD+w23hG3F+xV5w+YG4sUdTSlIFweRrJL4IlARkVRdZ6DwN337j3u/d05UmhgpsscHeC4GaOvYjpgrroij+etQ/TB8bJ1+XM/9Tj1eSi9N5FICXSxTrsIVEk9R148chM775T09HwX1l9Oo32nQWcUfNm3OqPJO7eFOODxwwxMVllF+fUfMuocbHwjdA1EAmDoOQXxud1omhBUsnQ4P6UV51MPxnObnmcrsN4QnSoygpxL9HRqYppNUPxvTtFGlArk5pGag6WzLEFNdZAHbqSYgimXRRe23H+m//7d//o//8c/fL8r155UmtoH47U0AJBVVXVTeFl2SSvPSp4mS6JwEBcCWNGeut+V2kevK397tZ7L1hgSsi6w3bNloKy0JbKO5qyOnsHD4rtpcUTvObmgXQApacB787rfbT48pZV6Sgx9U9wPOfrY0fNt7/qNi0tP0r87MkOZlPff7OT+fPmJz/3/8M9J6KZPDKN3+vbmbjiYN0MDI6LCdjm0f5Pu9zg/dyz0ZHibf4cQHyaZwRtwPN4CCNXZ3c/bVaA9ntPF0hwBQ5dpdhn6ksXc/nG3VZ7J1nOjFH182L/U/0nOfDcCjbsBnPfFq/J36HD7s64ORsg8dcso5MTyHVdbe1j9L1wkKtMd469OTxbm6KC6L/rd//uO3H5cE2va+3d6F6z9/LJfL5bK4q1UuKS0iSxLVeoc+SUpJdDERI0kxs0WYKcsbL5IuCy5iSdY16duitxW3hdeb5Q1rzjkLkHvtWO2VMYuINQ9QGkUCSZpWZWdxivni1skjlp1V8upbVosClMEqe8nhydn+qoNvP/BoNcf5SiwhkLEP6rmfVuXp9CMFR59kB/RdKn1vCco4uaixf98a+IpWw7chNRwt6S8NqhpNBra2S0WsX1ef+3bMD8O+Dq/m8EwR93HMN4ej/m/VOMTssYs8PkcZASpyPwsS5DYYp0RbWVLV3kmKXABQMowEaaCpUzczy9sm2JDtIvrbj7fff/8BmCiS6KJIAlEmNIimskAFUM2UTKOl7QYVTSJpwUX0ArtoWi+W7XJb7XrN7zfcVq63vK7IGVcDWU/q/OhVANDQhE5u4iLVXe1F0CZWzrHLRhi3fxNX86drbUadGUy7yHk4gx/3wsfW6ct67nfCYfqzJTMR9z1ZR595JkJVlXoZFZKbni+r9mlzW+7AZHfyMFSyhYe99unIfSr0GYQow9J9PFkf0qkJVo+d8JgzGGqpXcDVqAbHT56EBS3xgN/PP42EKTycjqPEGXKAlIdsX+WcXg1n/b/L/RCjFGdM7VsfUF8blV3rUB07KwuxkzxtRFYjTkoiIlgMG/OWBXmTnOT9z/UC/K6qb5e0YFFbNMEkvUEhKanfZlqqMEdVBaqqVM9fxIQJur4RkhMXEUvylvB2yesGmqw5X9e0brau23WVdd22Df/xsxgfzhkbYeaCiyrAEgfmtYkF20ub2IckuO1ktfGGipe98xpCKvTGv/Jknc704cD5/HmZnhx9ey+TKJbBXwHbY5BZ5l6nmALWAUV8FVroqKUTlEIRztuuUpocVwWAU09g9+Shh+kftHdkrx700rjJOYv91BbbPrHcLW5OaUJ9hoeXQueQwgqJl8ga0b+fz0DQ48OJV3vghLift2Kcn6f5PAllPis82/8nB+9noYjIwkCMU2AQy8SYqRaWk3PMpFlWAFmxrVyRb1dcF0lIugAm25rFKAtFXZlVU1IXvrsB53qBPxOkEEJA39I/zUxzNhEAeaFqekuWwR9Mv9M2A/m25m1d123bfvzrn5txXYuOzXUNCpSACNztlJm7C89aaHrvPb/26FY5RyJQqH/r8bg8G2JBEDk6BdqluTeUL8+rcznqSf79+YN67udZP5s+Uuo94BJXkhSGxKdsPgObiZFyHZbbnuu9CeLRV58eHo7xIcWRJy5BTKG1cSLuZxzMq0GOpI0fzu15fHCw7b2S/5QP9v38jcI6/NpewiOxTMNCgX7dYyunNdh6gJRyDZwLuVlG3kiBvl1ytvefN90EF/KiukgmZFuR/BRTRSSlpKoK5pxJKaQdzNhEEmFJ3oBMCGl0W8xCCpaUKHiTBCVFck7blta8pd/+R974fru+v9/e32/6fl3XvGVuG+lWcdzogokryoumCC/ab2hgXx0k9a7UhA1RjmR0T3m/aPq8NE+WzOo/xZmlPZc6HiUru2Y6jlbXFA9Zx+poOeUwSvFRkglCsiaDmGYxdkGZCFwgQ5I0Nz0tIk61cgbSxasnSoc3zllKNS3LkXvN2ySmKXvj2cHdmX3zs17eXmTHkg4MBGrXuX46gHal1pdfXg+zgVRZniti9yko1aJ5RHDjOu6/gFXtmgZUQkI5+N0OT4hkvjdQH7Zq3VNdh5gmhBhhEGIROPeeKnU668/GHNealGirh7r1MwlSV43dUKIqpuPQYJ67bTvRuz+pZz6dD63bpP0ACPJkn71VZ8NVAM1Pj62709C3UvuyfklSqWveSPop1WZZSAXeFH96uWZ07CpmAgKWs1/kp4Jm2XLRKln+IGXbzAxJkkJl0y3Lnxvx45J+U+b8c81vF/v9R/qB9Psf17e3JckPpAt5WbMqJalSMklhTiTEEpG31cz+n+29zO0yDbmk9KYX7Zrd3syEtwT8+P8t/9dqvN1ut9/X99VuV/y84bbhz5+4bbxesW7MACX7LH9HppGESlJV6EKSGZkmkkQWiNuuLwXl/D99dFhWpZC06g6QRT7T0s/aVo8h5osY0vKxGPY0BE73WQfZnxW/D03qMsbM0H58Ppau1jffisF/PUyswx5WA4937Lg9DA+DytcTzB3nNP7XsgyQ5lXIPFW1bZlaDTJpghD1yFwoUj2uPAVVpgncutQ3sn+7KXE/nI0dxyMKGc2BuchlZEoKblAKpQmQ4wnhkCzKl9d1e0cm8LZxSZYzgUsGLyKyIW1Zdck0VVsUIqIqYCapNCK3i4quB+eKiT7iKaWU6mQTm4jmoj+WnFNKl8vlR+b1R/5ty+vGf/6Ttxt+XvP1hnXDbUPekDM2KTJ6MIPuMFBAr48VUmjuhgRkuMI08LoN8QxI8eGI/LVhGZfB8OApzuj+q/F3QixripmS7ZjHGVo2s2MY1zk+OgBfPWx7aUkk7kck+7hLzxAEw4I8TDDXx+KU7fENif9qh1QRQrHgCgAQILkeNJtBKPA5GdQhWZ+L/Hax21cEhkOaI8oyI0eS5Uw6dIvSsk80pfPAUu7q+1VP80gV8+NCcS5PRItTKx8arGsmshFrQlrwm4GybsRbwkrLwm1bNa3JJTPCZVGFiUD8/gqzz+3Lj0VVXXoDQFUvmlwpnsyD2VcAgKma2cUy+ZYhW+Ztsy0jm15Xu97s53W73nC7bddb3lZLq+WNN7GcAZptEMCIlBKgBtAAt+lLN+w+FOfrrxu4L3zvsfvyX1kaX0FnliOk08NEpktLj+LP0k/Ia7DQHUjZ2dqbEGJM74SeR9K0GD+UvqvP4/Bq8tcHKVa19UhTVYz0fcJiz5R+iC9inrssnqpw+11OxBRn+bvcrP2DOxITFPAmTcBQBGX3DsfbfGsK7IHAhVn9As54GP4qjHaGFjnQ9PbclbWjVXyHRk7f4WyTm/mx6h5JJKH4JhQn6uIXzyhSbv+ZQAlCs9k1YyMSYAlckYXpp6SU39Y1pW1RqEpSLGqXlJbEtMhbSkmhqpr0ovjxdonEvSD3YtOiyBWKBgsAYBM1s8USi/s+ueUtm66ZP95we+Pvv72tG95v+Xpdt9V+vG+36/bzdrtesRksYyPEkJgzs5gA6tep3GZ7AEPlX30uve005M5C+1wQ+SvzbTn8/htge1uBAacCnTpPDwOqDX/OVe2zvK3t+vbvyTq1RTvBzz1xr62716uH+/QnIov227N6kWa6Y4cmPdO+ljpjEut5rixTvnpyvv2XCWczucT3Tqg2acl6TFW+E5FEUgRKMSaKqZoRIu7y1D0kwawZgySEUvcSIrs35EIlkQAqKHIjc6aqXtKmiiRMyiVxUS5Jflzk998ufJMfF02X5e3ytix6WaCqadHUjPmIpHoS5vXwaP9zQXKZTgZJZhqgqxDldAlL0m3hkvQtpXW1H5cf1ze8/czvmq8r1g2ry2pyIdUZubYSVZ26/DbiHjsZR+voU5bYp4dlRIUHkpkJnkfPNU/AdtxB7u2rPXLfL9RI69E1I0POhSB6GjSx3dTdr/b+q2N1hjRPWRPrB3qs2AqAT16UqeZNHXr7QTVOhDAPP4+K1rHKOdzJQ5CGvYo4Lg7QMAhw9/XsRZ3X93i+hblUHz6T7p+166u3lrD39T9JUhykt1lkqKru/ux1Y8XvoiTLzZDWhSJUoynUpIJ384MPy9XtXZmBhXc0ElBmWxUquK14N9PVbklVkdREqJIXxY+EJeGy4PcfCdRy8p94WQRyob1DIFQfOCk3OEWroF9GqVqSy0bLOS+kIa+ZQgiQgKRIwhuyALSMtCnz78vbLS0/1N4Xvq92W3HbsBp+XpGJzYAMI6y496t6BBNsr6Mg4xUwjPPhV0j8+Tp6Nace7sncvw62t7Cn0fsEuz898zrkofEkq9ICF222IMqr+vvaIvzqxdxNenq2QLYsIptFztr3P4iInVgljKrl8WFPhf1/PYHEdfRLua0CsTIx5sxI4tmcLLY+WvVa8Pq0cmr1zmzL4GS+xflUH+7OsH+rcEhKABC5Ic9G0wFUz3MU94IhzWKKqUmGqYjBTW65dEzE7Ti5aztaOY93WYwpi/EWoSYjm+c+F3dgwybUZAlUYRIkxfWCN8XvPyDCZSFuW2bKBKhrxg+9Lsvylrlckqrjd7835VI6E7f8XdsjookALAuUsqjCNpMsSWEUM6oJiWS64CZYLC8XWUQvy/JbtnXDbePN+NvVbhm3DdcVW8ZmyBVUIZBUVrWkZv1U3DRhnVqfxR9/BeJ/1vzAriqfoxePgV63P8tDhOoYyArr21m4X88D/0bM0ZOhbT9STfq1V63hd/r21JzAyF09nEOthEjE404//eZ1e6mZddF23UGSuU5Eyrx53Je5Nz6yrf7JAXfjeP7LhHuskrByXo7K9/fjIe7JVABFMslChRgNQqVmoatcKhhsmxcUxWKfSwHTlACSamIu/czZDDSqZCggigVcEghwwcLlYnLLS1rVMi1n4/rDlHq7XAgoRS+JUC5JDboIABMmv/zUmLC80s0zC2lgAxcCE2YIFwVAFUmKZUt2zdAsCUmWTNwMP7LczH78wJrl53W73NbrytuKTJB4z60/58Xic2kvsXhmdP6SsEy1ach9AvIhRUk2ZTTZ196n8T+bWrFlU1VVhZVzcxFXjrbWd43G+Ss3D52zqSKj2Pi3XkrJwWlgy7NfF36i3w+Io0YyMYj4D3M403fmiWGyxKHDI4nHSNBbbxzKMfYMIwBfBxGGazWrdEojam5TBaSWVeyeN8NLj8QyU7+t27wZUKUYea8iKQTjkXqSTwuH8eM4dn8v+/RNy+iQTXk+nM4uGRK0ZJP2UWSxavy+IQO5mRpS+VQ/+aTbgZnyBCDqMjExIQWkulrM729vq2Vkqooy+eJCzlSQYjQSZlIsH6hYNgoySGtzJgmQRUGocCGYQGUm15zfr8w5r2t+f0s/3nTLFwO2bFxsyettw+ViKclF02XRlNLvv70pRVXVmAEtjRFSyAQi07JxzWvOOSPDh9JgRuac3RmEUlUTa88YkoiBClxUjHhblHKRhHTJ6y2vGaqo4r04CsiZQL+FUEfkZabwq6l/zP6BPfcPSGDug/dIthrhjnTkcEuYYqaatEdUxNIo+/Thk3Xbb85nlOXXQ/EKzHoUVp81db3yUhlPT4v3VRCtZNQY/8abJMR+4A77B0dTdUo5UaJe3C4EchJ+w4vp7tNkVMVtt35P+EWy/ivhHgw/6tn9/Kz6omy3olyD0D2Lsd7Lbzi0MTpqYkL3CKNwvUMIYcygtM1BieymgAFR0rcCqChKid0GqwDIpio0YAORQUISzETInEmDZVpmzraZvb3RLtuyLT8u+ZL1xyXlBSZ6IfKfmwt2ipp87YwkicxkymbZhKaZZqZrvpGSaZZpZrkYOVMVcUsmIiKi4pcpKGZunwoKuSQACW/QnIk3kmbIZjlnM5h1imlF8PXsOP614dSe+4TcO6LHcfyUyfS8z/9OnSK5r39G0t+3BBEJetmFuLOKaA5zvlPo2YvD6M8azj3H0xR+D8t6xv74iObmOk8f7gHgYf7Rc81hoU+GfOrTcg6uSGNjgn2yKf5Vm3tTw7+Tvp+N4GF8rdhs0zEsQLeq4l9lnya+FGLu6stEFEo1YT3aNphY/RriNlrgAg4K2WT3vgjpQFYpWUxYXEW3Be41ypJpoFkSckNSbBtvur3f5Lc3Xm9yufD2drtc+PbG3y7y/sa3RX686UUl2zUJFpVFU0qSVN205NuFpJBmJptJNjGTTLmtALiRNDETMxQddmPekLNkYjPLxeGUFKNjpEhSkQTSJQf6m5mZYct527acmXMG205WNNYO18LfjcTfs+d+SMGJA9LfwkTZG53t2RZAySn92cOE5mUXQm09f5/9QyPHFfuAvs+b3HEm9zHXa/GRpYitbrS4BTyyO38HhN4lGcNzTNkW6mQ69ePEfVR+UM5VlcAUfyAcVWwo8TDsKftZAz+L+u+Hoz6cxfcKRPoep4eZkcORu584VDOMnnvRj6SqE3QlcshH4FoJ5bCi6M4U3RoQ5kyV+6Cv93t8J1EjVUg3kiBmZkXSD1CwKCxjc7tjK99ueVlk+92WZfuxpffL9nazywVvV16S0LYkuCS9JFuW5ZIoIiq2bisAiJGZ5EYlFjJnJgrMkt973Qx+A3ZbLee8GYsHV5MtczUaUzZuBjctQFKoqMyNo0lVNTNVpZs6Y/OjPQ/infH6C8O9A9UzSHgHKmJHZc7yj/B8ouN78r1/Dg8PunLYHI5Yqvsh+kOKDTzL5tX4M1IRDX5F4r7fF6c/Y/+Euj9mmzznLEW805TBqjhoIjHlIb3Yn12I4PVRN6has63SiBbzPcKZaYv9tnBGEVjFMvu1Nk2DnFenTSTN/F4PAKTi2zN+KmjjaMJht7ba8wSgEJOiNt7pneN8EMIEbE7pisDGHUlnBbjQXV5KYzGtXP/cQIjSaKqZmimLCRMWw2q2bLgok/KStkuSJckiWBIvC5eFFxVVVRHjv3yGqwLip74ZQOZCIykZlo3blnOGmV5vthlyphmNspHbapmA2JqZMw2SaWbIOWez1VxoKbm4cf6lcfxrwz177h5kxOmNyZMT0c1ubzjJ/0QasIPmhYLHvyZ6XZ8j+D3dfs5UCSVyJxG61gSHu/Q+nL06R4KJXnpJVmQpxf4Xh38A+oXQwM0MBdVXZaTGYu9T9pa87SYe7Q+Hn77oLxpZABeelEpCRSgQ81syhax/mMaeIfcn05+tghY+i/qfg757YJAHQjmwWK/16tUjz+QIvXDK/RP/Tsz9SVfNA7hYO4FZ/DK+qNITtoYHq4KSxEiYyykouZEFNBJBoF1TUAPUNKuSMApNyUQmJqSMm+W0clFT8JIkCX68XRbFonJJcllUVV0/crPslF1VRSnFDh3b5VLLyJk5Y9tome8UM8kZltUgZrYZzJDJzZg32+jsjm3FRvzNiXvFVm44TFil7W3fxZFQug3KWfhO8HBqz/2QcKOSjGco+/6ZPD1/i2FPu+/I3HvCQPk/0BENGmNcVNzFfCDzO0EPfDzOSo377TY+tN8j9NeR1/7DmHl/0APSJlKOlfy7ymp/ZKYWPkAgfovdT+rGNFW6HP74shD7+Zth+74C5/FNCHOQUopFMIg0TUiVOq9cSHnIAbRrIUJAq0jdN9wqGoSk7uyJ5oJ3oNxXVYEZQFYGTrIV7RkWzUyKuPolIaAho9xohYDg9YqU7LIyLaYQFS6CpHJdNwXfVDW55N1l/8VVtYib8hXAmlPs4n2JasacaRlmtuqSs+WqkbeZ5Xriapmr6+obzeDieCsep1gMb5K+h3kHnh2ojuP12gT4uvCsPfcpwVn8WW77mEaYI4WOxHpHvOcP25/ji1kigZG0RQWxw/beeYjPZ278Xg2yE6HuS5edUB47yj7tyv2XB59P/VNFQPHzuZ5ldYffw2RPBifoJvP5p1ZK/7Ld+o+G1tXT7vjNhP4MNEwzoaXq6V2KVSajFs1dKmdnQ2WA3bBMg5+NKZQie6GrlWuh9J6DOzynOGkXgtqgupPsWjea0/SGK+hHuGLmajkGqIFJIFkAuREpMScmkwQmhSVZDPlGFdw0L4KkECnc3vKWxFmKYtKs3ykVh0rq9L2cl96UlpELdXfkTjessBE5l8pb6FupLE3rH+steoA4/j6UHcCyjdRQMNAO7MiB9H2yvOXw5/ygHOUItVu2vJKEUASqVKWIkVZY9iEAfhlaCclu0l0Tlgs1gVvKOTMbiOSa85UpdBpkVWNExA+AJqlReXBtkEbgDvBy6By4bbujcLZKz6ZD5ho/jLSmRg8xKgM9pBUrNLvNxmeoiCwtJdC0iWI9JYhJqppFe1dTLc1xythwW3p7NTa9LZWyP5U/L5znVZVElOkxbz+1XbED2zGssL2pYjfaDhYoSeueSL2d9S/JjeVr7/ej38LyItG37pqgNLuUMsnBavrMtxrfGWUA0EQSqe/3/nvJAvitUZdKOBkGs/jnjjyFRX/btJJhMRBQqHNmktvCycwglFCB4N0UNFCL/gkF7uiIBA1Wjb+UfQBlndSlhA0AjIqi9wsC2GjXvCLj9zfNNJrm7EJ10RyHr7vocZr+j23BiPwKi3EyXldb4ayFh7IPcdu2dlbhdN9zuObmR6EHzGdObQ10l+zj8J7v1mdyzE8KcXZ+vj33s5QP3+63kzGGQCT6JXIPT0xE7EBRnSSC6uQOEw0IvUV+D4LbT4U9rd8PUMT1T+b/MOX/Ch4+izO7H359OMo8Cc9WRachQd35DuaVXw1B+1PEJTQVhKYi7SHLVuOqu3U9ApWZOwx3XnnI2W/E0sQ0mapa0bpsn9mw36850ocOEc6IaeoW4Wmt+X7ca5Wso55b9IU/0YF7bfi7hk+25372fJj+zpDcIakzLAN81IR9SLr2RShCwrX+PbH7C0fxkPLG58jFuAgFXcr0Mln/N52p3xw+i7ifSwGjbIQ45/Tvh1yPzfv4VtLeJMUeCmWXeQI4/VXCRACqOmYqFVNJGWSZeDBWTr94pJ/PXbhbepG+102hE4FbdpaaIlRCpJyRmlGksnNxsfdq7wjBUZAl0mugq6hXn9c2rP1q1Hcg9LFdU/g7r6ZTe+4TBWkPz9hzb69q/wRKFD6c6BeOaHoE72MYqJuX08rbyAUKIAenP17o2SWgKbfp99fDq1R436Wxhvt98TT/ow3jXmW+VdT87xdenQ8v774v9v/0efAW2ZUXbLTkEz+XYotUUK24k24NwqU2sCzq1n1HEC4yCyJq1TspHJHiCW9qEC0cg7PWgJP4lkmVcysArNYKZf2912XtjK1wHoEhmLrEfz37Sk56/Nk4fvp8+MTwyfbcW277yMP0E2/VYo4eSrYuVW+3iypYB4AcJJwuewWq98OaeDgIqsuJ5GiJoy8z/ZaxeB5W74n+J2b+v0ILZ5fFnu/2Z8JnDU0HBG3l1umdO/EqiN5heSvPaTfgdlfgVuDNoI0VhktpOooioQpQzNBMB1dNkgGt3+HOPQQsX+TxogjtCA8Mf3qC2rA7g8JqymiqhYWDgvjKwpnSv/t6ObXnvifHjW25A9s5imUAH6ej9CL7gZ9Q6hQiZpeu7d64JwDIfh0DgKpWybubuD6bAfcR+qsDfL+U+yEu9X0+e3bnmfz9CDluYw+r8b8CvpFjwzjuH+OcRMSK8BtA1y7ds3fdcxPbqZXfWoKIJMCqhYOmQ2WknzfGzBrzrFquKzVFtYm5lCZeGevjz9WSYFHVFxGXfmfDoVjGRmlZIzZnPZPEy4q9ADTivif6crypf5ZY5jvX36nMHa/D9omyH8J/BPIdMf4ZTZzig0CmVDKTBmR2vQ46s0la8wwGYCwuNnbaouYK3Om8Tw0TiNuLiVDYlFnKifsHSjtseMha/a/wRYF8MLFdAhAfXgqFrAPSzM5UeI7iIK+UYph3juCAzz2VQ0QSJQucsLs9gyjMcDwl6lcUyu5QlvDuas8hepukHIRG7prFsHBVcmQ/sy0J7mkyP9ljQCDuwLA18OioRc6Phc/i/w7L6/8F+G15dKW0kU0AAAAASUVORK5CYII=", + "text/plain": [ + "" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "image_dataset[0][\"image\"]" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "3URV1v5Zntxb" + }, + "source": [ + "### Audio datasets" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "Ry1dqcUunzEW" + }, + "source": [ + "Audio files are decoded using torchaudio or librosa using to the sampling rate of your choice.\n", + "\n", + "To read mp3 files you need ffmpeg and restart your runtime" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "k6FSL7S3odEl", + "outputId": "13299935-e2ff-43b1-e622-33895c3426a7" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\r0% [Working]\r \rHit:1 https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/ InRelease\n", + "Hit:2 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64 InRelease\n", + "Hit:3 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu focal InRelease\n", + "Hit:4 http://archive.ubuntu.com/ubuntu focal InRelease\n", + "Hit:5 http://security.ubuntu.com/ubuntu focal-security InRelease\n", + "Hit:6 http://archive.ubuntu.com/ubuntu focal-updates InRelease\n", + "Hit:7 http://ppa.launchpad.net/cran/libgit2/ubuntu focal InRelease\n", + "Hit:8 http://archive.ubuntu.com/ubuntu focal-backports InRelease\n", + "Hit:9 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu focal InRelease\n", + "Hit:10 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu focal InRelease\n", + "Ign:11 http://ppa.launchpad.net/jonathonf/ffmpeg-4/ubuntu focal InRelease\n", + "Hit:12 http://ppa.launchpad.net/ubuntugis/ppa/ubuntu focal InRelease\n", + "Err:13 http://ppa.launchpad.net/jonathonf/ffmpeg-4/ubuntu focal Release\n", + " 404 Not Found [IP: 185.125.190.52 80]\n", + "Reading package lists... Done\n", + "E: The repository 'http://ppa.launchpad.net/jonathonf/ffmpeg-4/ubuntu focal Release' does not have a Release file.\n", + "N: Updating from such a repository can't be done securely, and is therefore disabled by default.\n", + "N: See apt-secure(8) manpage for repository creation and user configuration details.\n" + ] + } + ], + "source": [ + "!add-apt-repository -y ppa:jonathonf/ffmpeg-4 && apt update && apt install -y ffmpeg" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "lpKCz3CHnsre", + "outputId": "8bb79710-04a6-4563-c1db-d966296baa6b" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:datasets.info:Loading Dataset Infos from /root/.cache/huggingface/modules/datasets_modules/datasets/common_voice/220833898d6a60c50f621126e51fb22eb2dfe5244392c70dccd8e6e2f055f4bf\n", + "/root/.cache/huggingface/modules/datasets_modules/datasets/common_voice/220833898d6a60c50f621126e51fb22eb2dfe5244392c70dccd8e6e2f055f4bf/common_voice.py:634: FutureWarning: \n", + " This version of the Common Voice dataset is deprecated.\n", + " You can download the latest one with\n", + " >>> load_dataset(\"mozilla-foundation/common_voice_11_0\", \"en\")\n", + " \n", + " warnings.warn(\n", + "INFO:datasets.builder:Overwrite dataset info from restored data version if exists.\n", + "INFO:datasets.info:Loading Dataset info from /root/.cache/huggingface/datasets/common_voice/fi/6.1.0/220833898d6a60c50f621126e51fb22eb2dfe5244392c70dccd8e6e2f055f4bf\n", + "WARNING:datasets.builder:Found cached dataset common_voice (/root/.cache/huggingface/datasets/common_voice/fi/6.1.0/220833898d6a60c50f621126e51fb22eb2dfe5244392c70dccd8e6e2f055f4bf)\n", + "INFO:datasets.info:Loading Dataset info from /root/.cache/huggingface/datasets/common_voice/fi/6.1.0/220833898d6a60c50f621126e51fb22eb2dfe5244392c70dccd8e6e2f055f4bf\n" + ] + }, + { + "data": { + "text/plain": [ + "{'client_id': '4eeeb22a3bbb52e5215593a09a845f0f8c496e0a7c498c6d1e9e5e0f8730f79bf16b2b30483dfcc771d430918f27e3ce8b546d068017302109c5c76ca75b0944',\n", + " 'path': '/root/.cache/huggingface/datasets/downloads/extracted/cb1c332c2b5d74b2663eb9d5a6181c2972a0a069831f91fadaac8362eb7899fe/cv-corpus-6.1-2020-12-11/fi/clips/common_voice_fi_22986631.mp3',\n", + " 'audio': {'path': '/root/.cache/huggingface/datasets/downloads/extracted/cb1c332c2b5d74b2663eb9d5a6181c2972a0a069831f91fadaac8362eb7899fe/cv-corpus-6.1-2020-12-11/fi/clips/common_voice_fi_22986631.mp3',\n", + " 'array': array([ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,\n", + " -1.04925891e-06, 4.06746835e-07, 8.70920871e-07]),\n", + " 'sampling_rate': 48000},\n", + " 'sentence': 'Mitä nyt tekisimme?',\n", + " 'up_votes': 2,\n", + " 'down_votes': 0,\n", + " 'age': 'thirties',\n", + " 'gender': 'male',\n", + " 'accent': '',\n", + " 'locale': 'fi',\n", + " 'segment': \"''\"}" + ] + }, + "execution_count": 30, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from datasets import load_dataset\n", + "audio_dataset = load_dataset(\"common_voice\", \"fi\", split=\"train\")\n", + "audio_dataset[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "2Uw3iTdfo9mu", + "outputId": "9f6d13c9-7cbf-4f7b-f7ae-fcbfea1a5a11" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "(array([ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,\n", + " -1.04925891e-06, 4.06746835e-07, 8.70920871e-07]),\n", + " 48000)" + ] + }, + "execution_count": 31, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "audio_dataset[0][\"audio\"][\"array\"], audio_dataset[0][\"audio\"][\"sampling_rate\"]" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "q6E2SnHupF5l" + }, + "source": [ + "Audio decoding and resampling is done in-the-fly when accessing examples. You can change the sampling rate this way:" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "nuoyq-E2pJKf", + "outputId": "99fb9f52-00e0-462a-e772-e93b790e0009" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "(array([ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,\n", + " -4.28493877e-07, -1.03890284e-06, -5.02728994e-07]),\n", + " 16000)" + ] + }, + "execution_count": 32, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from datasets import Audio\n", + "audio_dataset = audio_dataset.cast_column(\"audio\", Audio(sampling_rate=16_000))\n", + "audio_dataset[0][\"audio\"][\"array\"], audio_dataset[0][\"audio\"][\"sampling_rate\"]" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "NzOXxNzQvSVo", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "## Formatting outputs for PyTorch, Tensorflow, Numpy, Pandas\n", + "\n", + "Now that we have tokenized our inputs, we probably want to use this dataset in a `torch.Dataloader` or a `tf.data.Dataset`. There are various ways to approach this.\n", + "\n", + "Using the `set_format()` method, we can:\n", + "\n", + "- format the indexing (`__getitem__`) to return numpy/pytorch/tensorflow tensors, instead of python objects, and\n", + "- format the indexing (`__getitem__`) to return only the subset of the columns that we need for our model inputs.\n", + "\n", + " We don't want the columns `id` or `title` as inputs to train our model, but we could still want to keep them in the dataset, for instance for the evaluation of the model.\n", + " \n", + "This is handled by the `.set_format(type: Union[None, str], columns: Union[None, str, List[str]])` where:\n", + "\n", + "- `type` define the return type for our dataset `__getitem__` method and is one of `[None, 'numpy', 'pandas', 'torch', 'tensorflow']` (`None` means return python objects), and\n", + "- `columns` define the columns returned by `__getitem__` and takes the name of a column in the dataset or a list of columns to return (`None` means return all columns)." + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "aU2h_qQDvSVo", + "outputId": "46af4ce3-d232-440a-d899-30d30c8b16f9", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'attention_mask': ,\n", + " 'end_positions': ,\n", + " 'input_ids': ,\n", + " 'start_positions': ,\n", + " 'token_type_ids': }\n" + ] + } + ], + "source": [ + "columns_to_return = ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions']\n", + "\n", + "# Uncomment whichever one is appropriate for you\n", + "# encoded_dataset.set_format(type='torch', columns=columns_to_return)\n", + "encoded_dataset.set_format(type='tensorflow', columns=columns_to_return)\n", + "\n", + "# Our dataset indexing output is now ready for being used in a pytorch dataloader\n", + "pprint(encoded_dataset[1], compact=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Wj1ukGIuvSVq", + "outputId": "f7c6014b-dfff-4885-b696-d93d812a04b3", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['id', 'title', 'context', 'question', 'answers', 'input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions']\n" + ] + } + ], + "source": [ + "# Note that the columns are not removed from the dataset, just not returned when calling __getitem__\n", + "# Similarly the inner type of the dataset is not changed to torch.Tensor, the conversion and filtering is done on-the-fly when querying the dataset\n", + "print(encoded_dataset.column_names)" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "pWmmUdatasetsvSVs", + "outputId": "bb959fb6-22cc-42fa-c93e-d0c924cc3ad0", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'answers': {'answer_start': [249, 249, 249],\n", + " 'text': ['Carolina Panthers', 'Carolina Panthers',\n", + " 'Carolina Panthers']},\n", + " 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n", + " 'context': 'Super Bowl 50 was an American football game to determine the '\n", + " 'champion of the National Football League (NFL) for the 2015 '\n", + " 'season. The American Football Conference (AFC) champion Denver '\n", + " 'Broncos defeated the National Football Conference (NFC) champion '\n", + " 'Carolina Panthers 24–10 to earn their third Super Bowl title. The '\n", + " \"game was played on February 7, 2016, at Levi's Stadium in the San \"\n", + " 'Francisco Bay Area at Santa Clara, California. As this was the '\n", + " '50th Super Bowl, the league emphasized the \"golden anniversary\" '\n", + " 'with various gold-themed initiatives, as well as temporarily '\n", + " 'suspending the tradition of naming each Super Bowl game with '\n", + " 'Roman numerals (under which the game would have been known as '\n", + " '\"Super Bowl L\"), so that the logo could prominently feature the '\n", + " 'Arabic numerals 50.',\n", + " 'end_positions': 46,\n", + " 'id': '56be4db0acb8001400a502ed',\n", + " 'input_ids': [101, 3198, 5308, 1851, 1108, 1126, 1237, 1709, 1342, 1106, 4959,\n", + " 1103, 3628, 1104, 1103, 1305, 2289, 1453, 113, 4279, 114, 1111,\n", + " 1103, 1410, 1265, 119, 1109, 1237, 2289, 3047, 113, 10402, 114,\n", + " 3628, 7068, 14722, 2378, 1103, 1305, 2289, 3047, 113, 24743, 114,\n", + " 3628, 2938, 13598, 1572, 782, 1275, 1106, 7379, 1147, 1503, 3198,\n", + " 5308, 1641, 119, 1109, 1342, 1108, 1307, 1113, 1428, 128, 117,\n", + " 1446, 117, 1120, 12388, 112, 188, 3339, 1107, 1103, 1727, 2948,\n", + " 2410, 3894, 1120, 3364, 10200, 117, 1756, 119, 1249, 1142, 1108,\n", + " 1103, 13163, 3198, 5308, 117, 1103, 2074, 13463, 1103, 107, 5404,\n", + " 5453, 107, 1114, 1672, 2284, 118, 12005, 11751, 117, 1112, 1218,\n", + " 1112, 7818, 28117, 20080, 16264, 1103, 3904, 1104, 10505, 1296,\n", + " 3198, 5308, 1342, 1114, 2264, 183, 15447, 16179, 113, 1223, 1134,\n", + " 1103, 1342, 1156, 1138, 1151, 1227, 1112, 107, 3198, 5308, 149,\n", + " 107, 114, 117, 1177, 1115, 1103, 7998, 1180, 15199, 2672, 1103,\n", + " 4944, 183, 15447, 16179, 1851, 119, 102, 5979, 4279, 1264, 2533,\n", + " 1103, 24743, 1120, 3198, 5308, 1851, 136, 102],\n", + " 'question': 'Which NFL team represented the NFC at Super Bowl 50?',\n", + " 'start_positions': 45,\n", + " 'title': 'Super_Bowl_50',\n", + " 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n" + ] + } + ], + "source": [ + "# We can remove the formatting with `.reset_format()`\n", + "# or, identically, a call to `.set_format()` with no arguments\n", + "encoded_dataset.reset_format()\n", + "\n", + "pprint(encoded_dataset[1], compact=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "VyUOA07svSVu", + "outputId": "343d5e56-2d7b-4c4b-db2d-e72282e9e377", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'columns': ['id',\n", + " 'title',\n", + " 'context',\n", + " 'question',\n", + " 'answers',\n", + " 'input_ids',\n", + " 'token_type_ids',\n", + " 'attention_mask',\n", + " 'start_positions',\n", + " 'end_positions'],\n", + " 'format_kwargs': {},\n", + " 'output_all_columns': False,\n", + " 'type': None}\n" + ] + } + ], + "source": [ + "# The current format can be checked with `.format`,\n", + "# which is a dict of the type and formatting\n", + "pprint(encoded_dataset.format)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "Gpa2-z37lUGc", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "There is also a convenience method, `to_tf_dataset()`, for the creation of `tf.data.Dataset` objects directly from a HuggingFace `Dataset`. An example will be shown below - when using this method, it is sufficient to pass the `columns` argument and your `DataCollator` - make sure you set the `return_tensors` argument of your `DataCollator` to `tf` or `np`, though, because TensorFlow won't be happy if you start passing it PyTorch Tensors!" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "xyi2eMeSvSVv", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "# Wrapping this all up\n", + "\n", + "Let's wrap this all up with the full code to load and prepare SQuAD for training a PyTorch or TensorFlow model from HuggingFace `transformers` library.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "l0j8BPLi6Qlv", + "outputId": "334e9749-6187-473b-e5f5-805d8fbc9e22", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Requirement already satisfied: transformers in /usr/local/lib/python3.10/dist-packages (4.29.2)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers) (3.12.0)\n", + "Requirement already satisfied: huggingface-hub<1.0,>=0.14.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.14.1)\n", + "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from transformers) (1.22.4)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers) (23.1)\n", + "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (6.0)\n", + "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers) (2022.10.31)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from transformers) (2.27.1)\n", + "Requirement already satisfied: tokenizers!=0.11.3,<0.14,>=0.11.1 in /usr/local/lib/python3.10/dist-packages (from transformers) (0.13.3)\n", + "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers) (4.65.0)\n", + "Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.14.1->transformers) (2023.4.0)\n", + "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.14.1->transformers) (4.5.0)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (1.26.15)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (2022.12.7)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (2.0.12)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->transformers) (3.4)\n" + ] + } + ], + "source": [ + "!pip install transformers" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 208, + "referenced_widgets": [ + "98a45b56fdb040418e42f7c59e28bc14", + "bf67657a3f5d47a79d078beb8589a098", + "f0fa32d1b256417db2850569674350d9", + "8f77a47ffc79400cbd84280e8bbc9979", + "defca41aeb5b4f8689930bfea05915f1", + "dbc2e3e6c2cb4c108d46430e132777a1", + "f3aa463526554d9da89c2fd0fe8efe2a", + "329b19be2aff486f8a737751ead4d79c", + "5a78f50d4f4742f08ee3abe4e9c38129", + "74ff87c33af14cc093694692397a9ee0", + "cf01a82f5de54ffb97af38ca88e170c2" + ] + }, + "id": "QvExTIZWvSVw", + "outputId": "1ba5ebeb-d4ac-4a3f-b853-2800a3714913", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:datasets.builder:No config specified, defaulting to the single config: squad/plain_text\n", + "INFO:datasets.info:Loading Dataset Infos from /root/.cache/huggingface/modules/datasets_modules/datasets/squad/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453\n", + "INFO:datasets.builder:Overwrite dataset info from restored data version if exists.\n", + "INFO:datasets.info:Loading Dataset info from /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453\n", + "WARNING:datasets.builder:Found cached dataset squad (/root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453)\n", + "INFO:datasets.info:Loading Dataset info from /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "98a45b56fdb040418e42f7c59e28bc14", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/2 [00:00 5:\n", + " break" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "HmBZ6FZnlUGd", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "Next, we'll initialize and train our TensorFlow model. Note the lack of a loss argument when we `compile()` our model here! All Transformers models support computing loss internally. When no loss argument is provided, the model will use its internal loss - this is especially helpful for cases like QA models, when the loss can be quite complex." + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "XnX5xPd9lUGd", + "outputId": "7b779ab0-9959-4f01-f724-6503039a6831", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "All model checkpoint layers were used when initializing TFBertForQuestionAnswering.\n", + "\n", + "Some layers of TFBertForQuestionAnswering were not initialized from the model checkpoint at bert-base-cased and are newly initialized: ['qa_outputs']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n", + "No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! To disable this behaviour please pass a loss argument, or explicitly pass `loss=None` if you do not want your model to compute a loss.\n" + ] + } + ], + "source": [ + "# Let's load a pretrained Bert model and a simple optimizer\n", + "from transformers import TFAutoModelForQuestionAnswering\n", + "import tensorflow as tf\n", + "\n", + "model = TFAutoModelForQuestionAnswering.from_pretrained('bert-base-cased')\n", + "# No loss argument!\n", + "model.compile(optimizer=tf.keras.optimizers.Adam(1e-5))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "NcOtZ86mlUGe", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "Now that all the preprocessing is done, training is an extremely comforting single line of Keras. We stop training early with the `steps_per_epoch` argument - you should probably leave that one out of your actual production code!" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "uJ4B9qU-lUGe", + "outputId": "1243a53e-e292-49eb-eb77-c54f35786510", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3/3 [==============================] - 73s 927ms/step - loss: 5.5575\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 44, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.fit(encoded_tf_dataset, epochs=1, steps_per_epoch=3)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "ySL-vDadvSV8", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "Example with a NER metric: `seqeval`" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "f4uZym7MvSV9", + "outputId": "2ba24e81-9b35-4284-da34-38221885a4da", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", + "Requirement already satisfied: evaluate in /usr/local/lib/python3.10/dist-packages (0.4.0)\n", + "Requirement already satisfied: seqeval in /usr/local/lib/python3.10/dist-packages (1.2.2)\n", + "Requirement already satisfied: datasets>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from evaluate) (2.12.0)\n", + "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from evaluate) (1.22.4)\n", + "Requirement already satisfied: dill in /usr/local/lib/python3.10/dist-packages (from evaluate) (0.3.6)\n", + "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from evaluate) (1.5.3)\n", + "Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from evaluate) (2.27.1)\n", + "Requirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.10/dist-packages (from evaluate) (4.65.0)\n", + "Requirement already satisfied: xxhash in /usr/local/lib/python3.10/dist-packages (from evaluate) (3.2.0)\n", + "Requirement already satisfied: multiprocess in /usr/local/lib/python3.10/dist-packages (from evaluate) (0.70.14)\n", + "Requirement already satisfied: fsspec[http]>=2021.05.0 in /usr/local/lib/python3.10/dist-packages (from evaluate) (2023.4.0)\n", + "Requirement already satisfied: huggingface-hub>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from evaluate) (0.14.1)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from evaluate) (23.1)\n", + "Requirement already satisfied: responses<0.19 in /usr/local/lib/python3.10/dist-packages (from evaluate) (0.18.0)\n", + "Requirement already satisfied: scikit-learn>=0.21.3 in /usr/local/lib/python3.10/dist-packages (from seqeval) (1.2.2)\n", + "Requirement already satisfied: pyarrow>=8.0.0 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->evaluate) (9.0.0)\n", + "Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->evaluate) (3.8.4)\n", + "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->evaluate) (6.0)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from huggingface-hub>=0.7.0->evaluate) (3.12.0)\n", + "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub>=0.7.0->evaluate) (4.5.0)\n", + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->evaluate) (1.26.15)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->evaluate) (2022.12.7)\n", + "Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->evaluate) (2.0.12)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->evaluate) (3.4)\n", + "Requirement already satisfied: scipy>=1.3.2 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21.3->seqeval) (1.10.1)\n", + "Requirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21.3->seqeval) (1.2.0)\n", + "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21.3->seqeval) (3.1.0)\n", + "Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.10/dist-packages (from pandas->evaluate) (2.8.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->evaluate) (2022.7.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->evaluate) (23.1.0)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->evaluate) (6.0.4)\n", + "Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->evaluate) (4.0.2)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->evaluate) (1.9.2)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->evaluate) (1.3.3)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->evaluate) (1.3.1)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.1->pandas->evaluate) (1.16.0)\n" + ] + }, + { + "data": { + "text/plain": [ + "{'MISC': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 1},\n", + " 'PER': {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 1},\n", + " 'overall_precision': 0.5,\n", + " 'overall_recall': 0.5,\n", + " 'overall_f1': 0.5,\n", + " 'overall_accuracy': 0.8}" + ] + }, + "execution_count": 45, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "!pip install evaluate seqeval\n", + "import evaluate\n", + "ner_metric = evaluate.load('seqeval')\n", + "references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]\n", + "predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]\n", + "ner_metric.compute(predictions=predictions, references=references)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "ctY6AIAilLdH", + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "# Adding a new dataset\n", + "\n", + "Datasets can be added with a direct upload using `my_dataset.push_to_hub('username/my_dataset_name')` to a user or organization on the Hugging Face Hub (like for models in `transformers`). In this case the dataset will be accessible under the gien user/organization name, e.g. `datasets.load_dataset('thomwolf/squad')`.\n", + "\n", + "You can also upload your data files directly on the website (see [step-by-step guide here](https://huggingface.co/docs/datasets/upload_dataset)) or using git (see [how to do it using git](https://huggingface.co/docs/datasets/share))." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "name": "HuggingFace datasets library - Overview", + "provenance": [], + "toc_visible": true + }, + "file_extension": ".py", + "gpuClass": "standard", + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.0" + }, + "mimetype": "text/x-python", + "name": "python", + "npconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": 3, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "09e1966daf9e481da118af73af218d88": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "0b3581ddec0b4cabb33593e272a50249": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_b3d5c33915084f26b060e086138bf898", + "placeholder": "​", + "style": "IPY_MODEL_429bdd21215f4ef38687daa6def128f8", + "value": " 1057/1057 [00:00<00:00, 2026.75 examples/s]" + } + }, + "21a2deb93c614338a9944b5032220c8d": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "329b19be2aff486f8a737751ead4d79c": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "429bdd21215f4ef38687daa6def128f8": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "51f49669810a4b5f941c18e4b1896866": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_21a2deb93c614338a9944b5032220c8d", + "placeholder": "​", + "style": "IPY_MODEL_d8494cdc5ce04f4690a9adadb921de4c", + "value": " 981/1057 [00:00<00:00, 1238.52 examples/s]" + } + }, + "5a78f50d4f4742f08ee3abe4e9c38129": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "60682d73f15b4020b57f87dabba5f320": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d8edc4f0a0a44882a7beeca0321276d6", + "max": 1057, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_09e1966daf9e481da118af73af218d88", + "value": 1057 + } + }, + "74ff87c33af14cc093694692397a9ee0": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "757dd94ac5e04ff09ee6fab419f1692d": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7edfe69de64a4af18febff677b57ab65": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_dc5418db9c3e49cd95b3f85f0dc562ab", + "IPY_MODEL_8db902b229e545649282c130c2a049b8", + "IPY_MODEL_0b3581ddec0b4cabb33593e272a50249" + ], + "layout": "IPY_MODEL_d580bdf43d1e44b8afcfefc962410d73" + } + }, + "8db902b229e545649282c130c2a049b8": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_757dd94ac5e04ff09ee6fab419f1692d", + "max": 1057, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_d46a381be01a460cb49cc838c5aa29c0", + "value": 1057 + } + }, + "8f77a47ffc79400cbd84280e8bbc9979": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_74ff87c33af14cc093694692397a9ee0", + "placeholder": "​", + "style": "IPY_MODEL_cf01a82f5de54ffb97af38ca88e170c2", + "value": " 2/2 [00:00<00:00, 33.51it/s]" + } + }, + "961929641bfc4b06b0603bd792c6d351": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_c497e117ef7142338bd45e57b722616b", + "IPY_MODEL_60682d73f15b4020b57f87dabba5f320", + "IPY_MODEL_51f49669810a4b5f941c18e4b1896866" + ], + "layout": "IPY_MODEL_f4da65dff9374ace9b92d341ec2793f1" + } + }, + "98a45b56fdb040418e42f7c59e28bc14": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_bf67657a3f5d47a79d078beb8589a098", + "IPY_MODEL_f0fa32d1b256417db2850569674350d9", + "IPY_MODEL_8f77a47ffc79400cbd84280e8bbc9979" + ], + "layout": "IPY_MODEL_defca41aeb5b4f8689930bfea05915f1" + } + }, + "9b5b8acd984f44d696f8f83862f20bf1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b3d5c33915084f26b060e086138bf898": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "bf67657a3f5d47a79d078beb8589a098": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_dbc2e3e6c2cb4c108d46430e132777a1", + "placeholder": "​", + "style": "IPY_MODEL_f3aa463526554d9da89c2fd0fe8efe2a", + "value": "100%" + } + }, + "c497e117ef7142338bd45e57b722616b": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_9b5b8acd984f44d696f8f83862f20bf1", + "placeholder": "​", + "style": "IPY_MODEL_f9fdd11e8b6f411e818447528be333df", + "value": "Map: 93%" + } + }, + "cbcbb3853ed544f8b946aab31eaa7f56": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "cf01a82f5de54ffb97af38ca88e170c2": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "d46a381be01a460cb49cc838c5aa29c0": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "d580bdf43d1e44b8afcfefc962410d73": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": "hidden", + "width": null + } + }, + "d8494cdc5ce04f4690a9adadb921de4c": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "d8edc4f0a0a44882a7beeca0321276d6": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "dbc2e3e6c2cb4c108d46430e132777a1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "dc5418db9c3e49cd95b3f85f0dc562ab": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_cbcbb3853ed544f8b946aab31eaa7f56", + "placeholder": "​", + "style": "IPY_MODEL_e21ced63bda64379832735d5aa2e0178", + "value": "Map: 100%" + } + }, + "defca41aeb5b4f8689930bfea05915f1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "e21ced63bda64379832735d5aa2e0178": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "f0fa32d1b256417db2850569674350d9": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_329b19be2aff486f8a737751ead4d79c", + "max": 2, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_5a78f50d4f4742f08ee3abe4e9c38129", + "value": 2 + } + }, + "f3aa463526554d9da89c2fd0fe8efe2a": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "f4da65dff9374ace9b92d341ec2793f1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": "hidden", + "width": null + } + }, + "f9fdd11e8b6f411e818447528be333df": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/testbed/huggingface__datasets/notebooks/README.md b/testbed/huggingface__datasets/notebooks/README.md new file mode 100644 index 0000000000000000000000000000000000000000..14e938417cd97d8dc4bead1d13b5902eb4a12324 --- /dev/null +++ b/testbed/huggingface__datasets/notebooks/README.md @@ -0,0 +1,33 @@ + + +# 🤗 Datasets Notebooks + +You can find here a list of the official notebooks provided by Hugging Face. + +Also, we would like to list here interesting content created by the community. +If you wrote some notebook(s) leveraging 🤗 Datasets and would like it to be listed here, please open a +Pull Request so it can be included under the Community notebooks. + +## Hugging Face's notebooks 🤗 + +### Documentation notebooks + +You can open any page of the documentation as a notebook in Colab (there is a button directly on said pages) but they are also listed here if you need them: + +| Notebook | Description | | | +|:----------|:-------------|:-------------|------:| +| [Quickstart](https://github.com/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb) | A quick presentation on integrating Datasets into a model training workflow |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb)| diff --git a/testbed/huggingface__datasets/src/datasets/__init__.py b/testbed/huggingface__datasets/src/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ce99c70ca3bb5d64f555d2f1a0d2980075e0e4b --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/__init__.py @@ -0,0 +1,73 @@ +# flake8: noqa +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +# pylint: enable=line-too-long +# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position + +__version__ = "2.15.1.dev0" + +from .arrow_dataset import Dataset +from .arrow_reader import ReadInstruction +from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder +from .combine import concatenate_datasets, interleave_datasets +from .dataset_dict import DatasetDict, IterableDatasetDict +from .download import * +from .features import * +from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled +from .info import DatasetInfo, MetricInfo +from .inspect import ( + get_dataset_config_info, + get_dataset_config_names, + get_dataset_infos, + get_dataset_split_names, + inspect_dataset, + inspect_metric, + list_datasets, + list_metrics, +) +from .iterable_dataset import IterableDataset +from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric +from .metric import Metric +from .splits import ( + NamedSplit, + NamedSplitAll, + Split, + SplitBase, + SplitDict, + SplitGenerator, + SplitInfo, + SubSplitInfo, + percent, +) +from .tasks import * +from .utils import * +from .utils import logging + + +# deprecated modules +from datasets import arrow_dataset as _arrow_dataset # isort:skip +from datasets import utils as _utils # isort:skip +from datasets.utils import download_manager as _deprecated_download_manager # isort:skip + +_arrow_dataset.concatenate_datasets = concatenate_datasets +_utils.DownloadConfig = DownloadConfig +_utils.DownloadManager = DownloadManager +_utils.DownloadMode = DownloadMode +_deprecated_download_manager.DownloadConfig = DownloadConfig +_deprecated_download_manager.DownloadMode = DownloadMode +_deprecated_download_manager.DownloadManager = DownloadManager + +del _arrow_dataset, _utils, _deprecated_download_manager diff --git a/testbed/huggingface__datasets/src/datasets/arrow_dataset.py b/testbed/huggingface__datasets/src/datasets/arrow_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..472a09c9a6eaf67134fc943cfec50fcf287e8e6e --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/arrow_dataset.py @@ -0,0 +1,6207 @@ +# Copyright 2020 The HuggingFace Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +""" Simple Dataset wrapping an Arrow Table.""" + +import contextlib +import copy +import fnmatch +import itertools +import json +import math +import os +import posixpath +import re +import shutil +import sys +import tempfile +import time +import warnings +import weakref +from collections import Counter +from collections.abc import Mapping +from copy import deepcopy +from functools import partial, wraps +from io import BytesIO +from math import ceil, floor +from pathlib import Path +from random import sample +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Tuple, + Union, + overload, +) +from typing import Sequence as Sequence_ + +import fsspec +import numpy as np +import pandas as pd +import pyarrow as pa +import pyarrow.compute as pc +from huggingface_hub import CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi +from multiprocess import Pool +from requests import HTTPError + +from . import config +from .arrow_reader import ArrowReader +from .arrow_writer import ArrowWriter, OptimizedTypedSequence +from .data_files import sanitize_patterns +from .download.streaming_download_manager import xgetsize +from .features import Audio, ClassLabel, Features, Image, Sequence, Value +from .features.features import ( + FeatureType, + _align_features, + _check_if_features_can_be_aligned, + generate_from_arrow_type, + pandas_types_mapper, + require_decoding, +) +from .filesystems import is_remote_filesystem +from .fingerprint import ( + fingerprint_transform, + format_kwargs_for_fingerprint, + format_transform_for_fingerprint, + generate_fingerprint, + generate_random_fingerprint, + get_temporary_cache_files_directory, + is_caching_enabled, + maybe_register_dataset_for_temp_dir_deletion, + update_fingerprint, + validate_fingerprint, +) +from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table +from .formatting.formatting import LazyDict, _is_range_contiguous +from .info import DatasetInfo, DatasetInfosDict +from .naming import _split_re +from .search import IndexableMixin +from .splits import NamedSplit, Split, SplitDict, SplitInfo +from .table import ( + InMemoryTable, + MemoryMappedTable, + Table, + _memory_mapped_record_batch_reader_from_file, + cast_array_to_feature, + concat_tables, + embed_table_storage, + list_table_cache_files, + table_cast, + table_iter, + table_visitor, +) +from .tasks import TaskTemplate +from .utils import logging +from .utils import tqdm as hf_tqdm +from .utils.deprecation_utils import deprecated +from .utils.file_utils import _retry, estimate_dataset_size +from .utils.info_utils import is_small_dataset +from .utils.metadata import MetadataConfigs +from .utils.py_utils import ( + Literal, + asdict, + convert_file_size_to_int, + glob_pattern_to_regex, + iflatmap_unordered, + string_to_dict, + unique_values, +) +from .utils.stratify import stratified_shuffle_split_generate_indices +from .utils.tf_utils import dataset_to_tf, minimal_tf_collate_fn, multiprocess_dataset_to_tf +from .utils.typing import ListLike, PathLike + + +if TYPE_CHECKING: + import sqlite3 + + import pyspark + import sqlalchemy + + from .dataset_dict import DatasetDict + from .iterable_dataset import IterableDataset + +logger = logging.get_logger(__name__) + +PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED = ( + "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.parquet" +) + + +class DatasetInfoMixin: + """This base class exposes some attributes of DatasetInfo + at the base level of the Dataset for easy access. + """ + + def __init__(self, info: DatasetInfo, split: Optional[NamedSplit]): + self._info = info + self._split = split + + @property + def info(self): + """[`~datasets.DatasetInfo`] object containing all the metadata in the dataset.""" + return self._info + + @property + def split(self): + """[`~datasets.NamedSplit`] object corresponding to a named dataset split.""" + return self._split + + @property + def builder_name(self) -> str: + return self._info.builder_name + + @property + def citation(self) -> str: + return self._info.citation + + @property + def config_name(self) -> str: + return self._info.config_name + + @property + def dataset_size(self) -> Optional[int]: + return self._info.dataset_size + + @property + def description(self) -> str: + return self._info.description + + @property + def download_checksums(self) -> Optional[dict]: + return self._info.download_checksums + + @property + def download_size(self) -> Optional[int]: + return self._info.download_size + + @property + def features(self) -> Optional[Features]: + return self._info.features.copy() if self._info.features is not None else None + + @property + def homepage(self) -> Optional[str]: + return self._info.homepage + + @property + def license(self) -> Optional[str]: + return self._info.license + + @property + def size_in_bytes(self) -> Optional[int]: + return self._info.size_in_bytes + + @property + def supervised_keys(self): + return self._info.supervised_keys + + @property + def task_templates(self): + return self._info.task_templates + + @property + def version(self): + return self._info.version + + +class TensorflowDatasetMixin: + _TF_DATASET_REFS = set() + + @staticmethod + def _get_output_signature( + dataset: "Dataset", + collate_fn: Callable, + collate_fn_args: dict, + cols_to_retain: Optional[List[str]] = None, + batch_size: Optional[int] = None, + num_test_batches: int = 20, + ): + """Private method used by `to_tf_dataset()` to find the shapes and dtypes of samples from this dataset + after being passed through the collate_fn. Tensorflow needs an exact signature for tf.numpy_function, so + the only way to do this is to run test batches - the collator may add or rename columns, so we can't figure + it out just by inspecting the dataset. + + Args: + dataset (`Dataset`): Dataset to load samples from. + collate_fn(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for + validation/evaluation. + collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate + lists of samples into a batch. + collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the + `collate_fn`. + batch_size (`int`, optional): The size of batches loaded from the dataset. Used for shape inference. + Can be None, which indicates that batch sizes can be variable. + num_test_batches (`int`): The number of batches to load from the dataset for shape inference. + + Returns: + `dict`: Dict mapping column names to tf.Tensorspec objects + `dict`: Dict mapping column names to np.dtype objects + """ + if config.TF_AVAILABLE: + import tensorflow as tf + else: + raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") + + if len(dataset) == 0: + raise ValueError("Unable to get the output signature because the dataset is empty.") + if batch_size is not None: + batch_size = min(len(dataset), batch_size) + test_batch_size = 1 + + if cols_to_retain is not None: + cols_to_retain = list(set(cols_to_retain + ["label_ids", "label", "labels"])) + + test_batches = [] + for _ in range(num_test_batches): + indices = sample(range(len(dataset)), test_batch_size) + test_batch = dataset[indices] + if cols_to_retain is not None: + test_batch = {key: value for key, value in test_batch.items() if key in cols_to_retain} + test_batch = [{key: value[i] for key, value in test_batch.items()} for i in range(test_batch_size)] + test_batch = collate_fn(test_batch, **collate_fn_args) + test_batches.append(test_batch) + + tf_columns_to_signatures = {} + np_columns_to_dtypes = {} + for column in test_batches[0].keys(): + raw_arrays = [batch[column] for batch in test_batches] + # In case the collate_fn returns something strange + np_arrays = [] + for array in raw_arrays: + if isinstance(array, np.ndarray): + np_arrays.append(array) + elif isinstance(array, tf.Tensor): + np_arrays.append(array.numpy()) + else: + np_arrays.append(np.array(array)) + + if np.issubdtype(np_arrays[0].dtype, np.integer) or np_arrays[0].dtype == bool: + tf_dtype = tf.int64 + np_dtype = np.int64 + elif np.issubdtype(np_arrays[0].dtype, np.number): + tf_dtype = tf.float32 + np_dtype = np.float32 + elif np_arrays[0].dtype.kind == "U": # Unicode strings + np_dtype = np.unicode_ + tf_dtype = tf.string + else: + raise RuntimeError( + f"Unrecognized array dtype {np_arrays[0].dtype}. \n" + "Nested types and image/audio types are not supported yet." + ) + shapes = [array.shape for array in np_arrays] + static_shape = [] + for dim in range(len(shapes[0])): + sizes = {shape[dim] for shape in shapes} + if dim == 0: + static_shape.append(batch_size) + continue + if len(sizes) == 1: # This dimension looks constant + static_shape.append(sizes.pop()) + else: # Use None for variable dimensions + static_shape.append(None) + tf_columns_to_signatures[column] = tf.TensorSpec(shape=static_shape, dtype=tf_dtype) + np_columns_to_dtypes[column] = np_dtype + + return tf_columns_to_signatures, np_columns_to_dtypes + + def to_tf_dataset( + self, + batch_size: Optional[int] = None, + columns: Optional[Union[str, List[str]]] = None, + shuffle: bool = False, + collate_fn: Optional[Callable] = None, + drop_remainder: bool = False, + collate_fn_args: Optional[Dict[str, Any]] = None, + label_cols: Optional[Union[str, List[str]]] = None, + prefetch: bool = True, + num_workers: int = 0, + num_test_batches: int = 20, + ): + """Create a `tf.data.Dataset` from the underlying Dataset. This `tf.data.Dataset` will load and collate batches from + the Dataset, and is suitable for passing to methods like `model.fit()` or `model.predict()`. The dataset will yield + `dicts` for both inputs and labels unless the `dict` would contain only a single key, in which case a raw + `tf.Tensor` is yielded instead. + + Args: + batch_size (`int`, *optional*): + Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be + batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`. + columns (`List[str]` or `str`, *optional*): + Dataset column(s) to load in the `tf.data.Dataset`. + Column names that are created by the `collate_fn` and that do not exist in the original dataset can be used. + shuffle(`bool`, defaults to `False`): + Shuffle the dataset order when loading. Recommended `True` for training, `False` for + validation/evaluation. + drop_remainder(`bool`, defaults to `False`): + Drop the last incomplete batch when loading. Ensures + that all batches yielded by the dataset will have the same length on the batch dimension. + collate_fn(`Callable`, *optional*): + A function or callable object (such as a `DataCollator`) that will collate + lists of samples into a batch. + collate_fn_args (`Dict`, *optional*): + An optional `dict` of keyword arguments to be passed to the + `collate_fn`. + label_cols (`List[str]` or `str`, defaults to `None`): + Dataset column(s) to load as labels. + Note that many models compute loss internally rather than letting Keras do it, in which case + passing the labels here is optional, as long as they're in the input `columns`. + prefetch (`bool`, defaults to `True`): + Whether to run the dataloader in a separate thread and maintain + a small buffer of batches for training. Improves performance by allowing data to be loaded in the + background while the model is training. + num_workers (`int`, defaults to `0`): + Number of workers to use for loading the dataset. Only supported on Python versions >= 3.8. + num_test_batches (`int`, defaults to `20`): + Number of batches to use to infer the output signature of the dataset. + The higher this number, the more accurate the signature will be, but the longer it will take to + create the dataset. + + Returns: + `tf.data.Dataset` + + Example: + + ```py + >>> ds_train = ds["train"].to_tf_dataset( + ... columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'], + ... shuffle=True, + ... batch_size=16, + ... collate_fn=data_collator, + ... ) + ``` + """ + if config.TF_AVAILABLE: + import tensorflow as tf + else: + raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") + + if (isinstance(columns, list) and len(columns) == 1) or ( + isinstance(label_cols, list) and len(label_cols) == 1 + ): + warnings.warn( + "The output of `to_tf_dataset` will change when a passing single element list for `labels` or " + "`columns` in the next datasets version. To return a tuple structure rather than dict, pass a " + "single string.\n" + "Old behaviour: columns=['a'], labels=['labels'] -> (tf.Tensor, tf.Tensor) \n" + " : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) \n" + "New behaviour: columns=['a'],labels=['labels'] -> ({'a': tf.Tensor}, {'labels': tf.Tensor}) \n" + " : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) ", + FutureWarning, + ) + + if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy): + logger.warning( + "Note that to_tf_dataset() loads the data with a generator rather than a full tf.data " + "pipeline and is not compatible with remote TPU connections. If you encounter errors, please " + "try using a TPU VM or, if your data can fit in memory, loading it into memory as a dict of " + "Tensors instead of streaming with to_tf_dataset()." + ) + + if collate_fn is None: + # Set a very simple default collator that just stacks things together + collate_fn = minimal_tf_collate_fn + if collate_fn_args is None: + collate_fn_args = {} + if label_cols and not columns: + raise ValueError("Cannot specify label_cols without specifying columns!") + if label_cols is None: + label_cols = [] + elif isinstance(label_cols, str): + label_cols = [label_cols] + if len(set(label_cols)) < len(label_cols): + raise ValueError("List of label_cols contains duplicates.") + if columns: + if isinstance(columns, str): + columns = [columns] + if len(set(columns)) < len(columns): + raise ValueError("List of columns contains duplicates.") + cols_to_retain = list(set(columns + label_cols)) + else: + cols_to_retain = None # Indicates keeping all valid columns + columns = [] + + if self.format["type"] not in ["custom", "numpy"]: + dataset = self.with_format("numpy") + else: + dataset = self + + # TODO(Matt, QL): deprecate the retention of label_ids and label + + output_signature, columns_to_np_types = dataset._get_output_signature( + dataset, + collate_fn=collate_fn, + collate_fn_args=collate_fn_args, + cols_to_retain=cols_to_retain, + batch_size=batch_size if drop_remainder and batch_size is not None else None, + num_test_batches=num_test_batches, + ) + + if "labels" in output_signature: + if ("label_ids" in columns or "label" in columns) and "labels" not in columns: + columns = [col for col in columns if col not in ["label_ids", "label"]] + ["labels"] + if ("label_ids" in label_cols or "label" in label_cols) and "labels" not in label_cols: + label_cols = [col for col in label_cols if col not in ["label_ids", "label"]] + ["labels"] + + for col in columns: + if col not in output_signature: + raise ValueError(f"Column {col} not found in dataset!") + + for col in label_cols: + if col not in output_signature: + raise ValueError(f"Label column {col} not found in dataset!") + + if num_workers == 0: + tf_dataset = dataset_to_tf( + dataset=dataset, + cols_to_retain=cols_to_retain, + collate_fn=collate_fn, + collate_fn_args=collate_fn_args, + columns_to_np_types=columns_to_np_types, + output_signature=output_signature, + shuffle=shuffle, + batch_size=batch_size, + drop_remainder=drop_remainder, + ) + elif num_workers > 0: + if batch_size is None: + raise NotImplementedError( + "`batch_size` must be specified when using multiple workers, as unbatched multiprocessing " + "is not supported yet. Please provide a `batch_size` if `num_workers` is greater than 0." + ) + tf_dataset = multiprocess_dataset_to_tf( + dataset=dataset, + cols_to_retain=cols_to_retain, + collate_fn=collate_fn, + collate_fn_args=collate_fn_args, + columns_to_np_types=columns_to_np_types, + output_signature=output_signature, + shuffle=shuffle, + batch_size=batch_size, + drop_remainder=drop_remainder, + num_workers=num_workers, + ) + else: + raise ValueError("num_workers must be >= 0") + + def split_features_and_labels(input_batch): + # TODO(Matt, QL): deprecate returning the dict content when there's only one key + features = {key: tensor for key, tensor in input_batch.items() if key in columns} + labels = {key: tensor for key, tensor in input_batch.items() if key in label_cols} + if len(features) == 1: + features = list(features.values())[0] + if len(labels) == 1: + labels = list(labels.values())[0] + if isinstance(labels, dict) and len(labels) == 0: + return features + else: + return features, labels + + if cols_to_retain is not None: + tf_dataset = tf_dataset.map(split_features_and_labels) + + if prefetch: + tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE) + + # Remove a reference to the open Arrow file on delete + def cleanup_callback(ref): + dataset.__del__() + self._TF_DATASET_REFS.remove(ref) + + self._TF_DATASET_REFS.add(weakref.ref(tf_dataset, cleanup_callback)) + + return tf_dataset + + +class DatasetTransformationNotAllowedError(Exception): + pass + + +def transmit_format(func): + """Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset""" + + @wraps(func) + def wrapper(*args, **kwargs): + if args: + self: "Dataset" = args[0] + args = args[1:] + else: + self: "Dataset" = kwargs.pop("self") + # don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None + unformatted_columns = set(self.column_names) - set(self._format_columns or []) + self_format = { + "type": self._format_type, + "format_kwargs": self._format_kwargs, + "columns": self._format_columns, + "output_all_columns": self._output_all_columns, + } + # apply actual function + out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) + datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] + # re-apply format to the output + for dataset in datasets: + new_format = self_format.copy() + if new_format["columns"] is not None: # new formatted columns = (columns - previously unformatted columns) + # sort the columns to have a deterministic list of columns that we can compare with `out_format` + new_format["columns"] = sorted(set(dataset.column_names) - unformatted_columns) + out_format = { + "type": dataset._format_type, + "format_kwargs": dataset._format_kwargs, + "columns": sorted(dataset._format_columns) if dataset._format_columns is not None else None, + "output_all_columns": dataset._output_all_columns, + } + if out_format != new_format: + fingerprint = dataset._fingerprint + dataset.set_format(**new_format) + dataset._fingerprint = fingerprint + return out + + wrapper._decorator_name_ = "transmit_format" + return wrapper + + +def transmit_tasks(func): + """Wrapper for dataset transforms that recreate a new Dataset to transmit the task templates of the original dataset to the new dataset""" + + @wraps(func) + def wrapper(*args, **kwargs): + if args: + self: "Dataset" = args[0] + args = args[1:] + else: + self: "Dataset" = kwargs.pop("self") + # apply actual function + out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) + datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] + for dataset in datasets: + # Remove task templates if a column mapping of the template is no longer valid + if self.info.task_templates is not None: + dataset.info.task_templates = [ + template + for template in self.info.task_templates + if all( + dataset._info.features.get(k) == self._info.features.get(k) + for k in template.column_mapping.keys() + ) + ] + return out + + wrapper._decorator_name_ = "transmit_tasks" + return wrapper + + +def update_metadata_with_features(table: Table, features: Features): + """To be used in dataset transforms that modify the features of the dataset, in order to update the features stored in the metadata of its schema.""" + features = Features({col_name: features[col_name] for col_name in table.column_names}) + if table.schema.metadata is None or b"huggingface" not in table.schema.metadata: + pa_metadata = ArrowWriter._build_metadata(DatasetInfo(features=features)) + else: + metadata = json.loads(table.schema.metadata[b"huggingface"].decode()) + if "info" not in metadata: + metadata["info"] = asdict(DatasetInfo(features=features)) + else: + metadata["info"]["features"] = asdict(DatasetInfo(features=features))["features"] + pa_metadata = {"huggingface": json.dumps(metadata)} + table = table.replace_schema_metadata(pa_metadata) + return table + + +def _check_table(table) -> Table: + """We check the table type to make sure it's an instance of :class:`datasets.table.Table`""" + if isinstance(table, pa.Table): + # for a pyarrow table, we can just consider it as a in-memory table + # this is here for backward compatibility + return InMemoryTable(table) + elif isinstance(table, Table): + return table + else: + raise TypeError(f"Expected a pyarrow.Table or a datasets.table.Table object, but got {table}.") + + +def _check_column_names(column_names: List[str]): + """Check the column names to make sure they don't contain duplicates.""" + counter = Counter(column_names) + if not all(count == 1 for count in counter.values()): + duplicated_columns = [col for col in counter if counter[col] > 1] + raise ValueError(f"The table can't have duplicated columns but columns {duplicated_columns} are duplicated.") + + +def _check_valid_indices_value(index, size): + if (index < 0 and index + size < 0) or (index >= size): + raise IndexError(f"Index {index} out of range for dataset of size {size}.") + + +class NonExistentDatasetError(Exception): + """Used when we expect the existence of a dataset""" + + pass + + +class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin): + """A Dataset backed by an Arrow table.""" + + def __init__( + self, + arrow_table: Table, + info: Optional[DatasetInfo] = None, + split: Optional[NamedSplit] = None, + indices_table: Optional[Table] = None, + fingerprint: Optional[str] = None, + ): + info = info.copy() if info is not None else DatasetInfo() + DatasetInfoMixin.__init__(self, info=info, split=split) + IndexableMixin.__init__(self) + + self._data: Table = _check_table(arrow_table) + self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None + maybe_register_dataset_for_temp_dir_deletion(self) + + self._format_type: Optional[str] = None + self._format_kwargs: dict = {} + self._format_columns: Optional[list] = None + self._output_all_columns: bool = False + self._fingerprint: str = fingerprint + + # Read metadata + + if self._data.schema.metadata is not None and b"huggingface" in self._data.schema.metadata: + metadata = json.loads(self._data.schema.metadata[b"huggingface"].decode()) + if ( + "fingerprint" in metadata and self._fingerprint is None + ): # try to load fingerprint from the arrow file metadata + self._fingerprint = metadata["fingerprint"] + + # Infer features if None + inferred_features = Features.from_arrow_schema(arrow_table.schema) + if self.info.features is None: + self.info.features = inferred_features + else: # make sure the nested columns are in the right order + try: + self.info.features = self.info.features.reorder_fields_as(inferred_features) + except ValueError as e: + raise ValueError( + f"{e}\nThe 'source' features come from dataset_info.json, and the 'target' ones are those of the dataset arrow file." + ) + + # Infer fingerprint if None + + if self._fingerprint is None: + self._fingerprint = generate_fingerprint(self) + + # Sanity checks + + if self._info.features is None: + raise ValueError("Features can't be None in a Dataset object") + if self._fingerprint is None: + raise ValueError("Fingerprint can't be None in a Dataset object") + if self.info.features.type != inferred_features.type: + raise ValueError( + f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}" + ) + + if self._indices is not None: + if not pa.types.is_unsigned_integer(self._indices.column(0).type): + raise ValueError( + f"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0).type}" + ) + _check_column_names(self._data.column_names) + + self._data = update_metadata_with_features(self._data, self._info.features) + + @property + def features(self) -> Features: + features = super().features + if features is None: # this is already checked in __init__ + raise ValueError("Features can't be None in a Dataset object") + return features + + @classmethod + def from_file( + cls, + filename: str, + info: Optional[DatasetInfo] = None, + split: Optional[NamedSplit] = None, + indices_filename: Optional[str] = None, + in_memory: bool = False, + ) -> "Dataset": + """Instantiate a Dataset backed by an Arrow table at filename. + + Args: + filename (`str`): + File name of the dataset. + info (`DatasetInfo`, *optional*): + Dataset information, like description, citation, etc. + split (`NamedSplit`, *optional*): + Name of the dataset split. + indices_filename (`str`, *optional*): + File names of the indices. + in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + + Returns: + [`Dataset`] + """ + table = ArrowReader.read_table(filename, in_memory=in_memory) + + if indices_filename is not None: + indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory) + else: + indices_pa_table = None + + return cls( + arrow_table=table, + info=info, + split=split, + indices_table=indices_pa_table, + ) + + @classmethod + def from_buffer( + cls, + buffer: pa.Buffer, + info: Optional[DatasetInfo] = None, + split: Optional[NamedSplit] = None, + indices_buffer: Optional[pa.Buffer] = None, + ) -> "Dataset": + """Instantiate a Dataset backed by an Arrow buffer. + + Args: + buffer (`pyarrow.Buffer`): + Arrow buffer. + info (`DatasetInfo`, *optional*): + Dataset information, like description, citation, etc. + split (`NamedSplit`, *optional*): + Name of the dataset split. + indices_buffer (`pyarrow.Buffer`, *optional*): + Indices Arrow buffer. + + Returns: + [`Dataset`] + """ + table = InMemoryTable.from_buffer(buffer) + + if indices_buffer is not None: + indices_table = InMemoryTable.from_buffer(buffer) + else: + indices_table = None + + return cls(table, info=info, split=split, indices_table=indices_table) + + @classmethod + def from_pandas( + cls, + df: pd.DataFrame, + features: Optional[Features] = None, + info: Optional[DatasetInfo] = None, + split: Optional[NamedSplit] = None, + preserve_index: Optional[bool] = None, + ) -> "Dataset": + """ + Convert `pandas.DataFrame` to a `pyarrow.Table` to create a [`Dataset`]. + + The column types in the resulting Arrow Table are inferred from the dtypes of the `pandas.Series` in the + DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the + case of `object`, we need to guess the datatype by looking at the Python objects in this Series. + + Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow + type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only + contains `None/nan` objects, the type is set to `null`. This behavior can be avoided by constructing explicit + features and passing it to this function. + + Args: + df (`pandas.DataFrame`): + Dataframe that contains the dataset. + features ([`Features`], *optional*): + Dataset features. + info (`DatasetInfo`, *optional*): + Dataset information, like description, citation, etc. + split (`NamedSplit`, *optional*): + Name of the dataset split. + preserve_index (`bool`, *optional*): + Whether to store the index as an additional column in the resulting Dataset. + The default of `None` will store the index as a column, except for `RangeIndex` which is stored as metadata only. + Use `preserve_index=True` to force it to be stored as a column. + + Returns: + [`Dataset`] + + Example: + + ```py + >>> ds = Dataset.from_pandas(df) + ``` + """ + if info is not None and features is not None and info.features != features: + raise ValueError( + f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" + ) + features = features if features is not None else info.features if info is not None else None + if info is None: + info = DatasetInfo() + info.features = features + table = InMemoryTable.from_pandas( + df=df, + preserve_index=preserve_index, + ) + if features is not None: + # more expensive cast than InMemoryTable.from_pandas(..., schema=features.arrow_schema) + # needed to support the str to Audio conversion for instance + table = table.cast(features.arrow_schema) + return cls(table, info=info, split=split) + + @classmethod + def from_dict( + cls, + mapping: dict, + features: Optional[Features] = None, + info: Optional[DatasetInfo] = None, + split: Optional[NamedSplit] = None, + ) -> "Dataset": + """ + Convert `dict` to a `pyarrow.Table` to create a [`Dataset`]. + + Args: + mapping (`Mapping`): + Mapping of strings to Arrays or Python lists. + features ([`Features`], *optional*): + Dataset features. + info (`DatasetInfo`, *optional*): + Dataset information, like description, citation, etc. + split (`NamedSplit`, *optional*): + Name of the dataset split. + + Returns: + [`Dataset`] + """ + if info is not None and features is not None and info.features != features: + raise ValueError( + f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" + ) + features = features if features is not None else info.features if info is not None else None + arrow_typed_mapping = {} + for col, data in mapping.items(): + if isinstance(data, (pa.Array, pa.ChunkedArray)): + data = cast_array_to_feature(data, features[col]) if features is not None else data + else: + data = OptimizedTypedSequence( + features.encode_column(data, col) if features is not None else data, + type=features[col] if features is not None else None, + col=col, + ) + arrow_typed_mapping[col] = data + mapping = arrow_typed_mapping + pa_table = InMemoryTable.from_pydict(mapping=mapping) + if info is None: + info = DatasetInfo() + info.features = features + if info.features is None: + info.features = Features( + { + col: generate_from_arrow_type(data.type) + if isinstance(data, (pa.Array, pa.ChunkedArray)) + else data.get_inferred_type() + for col, data in mapping.items() + } + ) + return cls(pa_table, info=info, split=split) + + @classmethod + def from_list( + cls, + mapping: List[dict], + features: Optional[Features] = None, + info: Optional[DatasetInfo] = None, + split: Optional[NamedSplit] = None, + ) -> "Dataset": + """ + Convert a list of dicts to a `pyarrow.Table` to create a [`Dataset`]`. + + Note that the keys of the first entry will be used to determine the dataset columns, + regardless of what is passed to features. + + Args: + mapping (`List[dict]`): A list of mappings of strings to row values. + features (`Features`, optional): Dataset features. + info (`DatasetInfo`, optional): Dataset information, like description, citation, etc. + split (`NamedSplit`, optional): Name of the dataset split. + + Returns: + [`Dataset`] + """ + # for simplicity and consistency wrt OptimizedTypedSequence we do not use InMemoryTable.from_pylist here + mapping = {k: [r.get(k) for r in mapping] for k in mapping[0]} if mapping else {} + return cls.from_dict(mapping, features, info, split) + + @staticmethod + def from_csv( + path_or_paths: Union[PathLike, List[PathLike]], + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + """Create Dataset from CSV file(s). + + Args: + path_or_paths (`path-like` or list of `path-like`): + Path(s) of the CSV file(s). + split ([`NamedSplit`], *optional*): + Split name to be assigned to the dataset. + features ([`Features`], *optional*): + Dataset features. + cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): + Directory to cache data. + keep_in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + num_proc (`int`, *optional*, defaults to `None`): + Number of processes when downloading and generating the dataset locally. + This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. + + + **kwargs (additional keyword arguments): + Keyword arguments to be passed to [`pandas.read_csv`]. + + Returns: + [`Dataset`] + + Example: + + ```py + >>> ds = Dataset.from_csv('path/to/dataset.csv') + ``` + """ + # Dynamic import to avoid circular dependency + from .io.csv import CsvDatasetReader + + return CsvDatasetReader( + path_or_paths, + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + num_proc=num_proc, + **kwargs, + ).read() + + @staticmethod + def from_generator( + generator: Callable, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + gen_kwargs: Optional[dict] = None, + num_proc: Optional[int] = None, + **kwargs, + ): + """Create a Dataset from a generator. + + Args: + generator (:`Callable`): + A generator function that `yields` examples. + features ([`Features`], *optional*): + Dataset features. + cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): + Directory to cache data. + keep_in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + gen_kwargs(`dict`, *optional*): + Keyword arguments to be passed to the `generator` callable. + You can define a sharded dataset by passing the list of shards in `gen_kwargs` and setting `num_proc` greater than 1. + num_proc (`int`, *optional*, defaults to `None`): + Number of processes when downloading and generating the dataset locally. + This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. + If `num_proc` is greater than one, then all list values in `gen_kwargs` must be the same length. These values will be split between calls to the generator. The number of shards will be the minimum of the shortest list in `gen_kwargs` and `num_proc`. + + + **kwargs (additional keyword arguments): + Keyword arguments to be passed to :[`GeneratorConfig`]. + + Returns: + [`Dataset`] + + Example: + + ```py + >>> def gen(): + ... yield {"text": "Good", "label": 0} + ... yield {"text": "Bad", "label": 1} + ... + >>> ds = Dataset.from_generator(gen) + ``` + + ```py + >>> def gen(shards): + ... for shard in shards: + ... with open(shard) as f: + ... for line in f: + ... yield {"line": line} + ... + >>> shards = [f"data{i}.txt" for i in range(32)] + >>> ds = Dataset.from_generator(gen, gen_kwargs={"shards": shards}) + ``` + """ + from .io.generator import GeneratorDatasetInputStream + + return GeneratorDatasetInputStream( + generator=generator, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + gen_kwargs=gen_kwargs, + num_proc=num_proc, + **kwargs, + ).read() + + @staticmethod + def from_json( + path_or_paths: Union[PathLike, List[PathLike]], + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + field: Optional[str] = None, + num_proc: Optional[int] = None, + **kwargs, + ): + """Create Dataset from JSON or JSON Lines file(s). + + Args: + path_or_paths (`path-like` or list of `path-like`): + Path(s) of the JSON or JSON Lines file(s). + split ([`NamedSplit`], *optional*): + Split name to be assigned to the dataset. + features ([`Features`], *optional*): + Dataset features. + cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): + Directory to cache data. + keep_in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + field (`str`, *optional*): + Field name of the JSON file where the dataset is contained in. + num_proc (`int`, *optional* defaults to `None`): + Number of processes when downloading and generating the dataset locally. + This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. + + + **kwargs (additional keyword arguments): + Keyword arguments to be passed to [`JsonConfig`]. + + Returns: + [`Dataset`] + + Example: + + ```py + >>> ds = Dataset.from_json('path/to/dataset.json') + ``` + """ + # Dynamic import to avoid circular dependency + from .io.json import JsonDatasetReader + + return JsonDatasetReader( + path_or_paths, + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + field=field, + num_proc=num_proc, + **kwargs, + ).read() + + @staticmethod + def from_parquet( + path_or_paths: Union[PathLike, List[PathLike]], + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + columns: Optional[List[str]] = None, + num_proc: Optional[int] = None, + **kwargs, + ): + """Create Dataset from Parquet file(s). + + Args: + path_or_paths (`path-like` or list of `path-like`): + Path(s) of the Parquet file(s). + split (`NamedSplit`, *optional*): + Split name to be assigned to the dataset. + features (`Features`, *optional*): + Dataset features. + cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): + Directory to cache data. + keep_in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + columns (`List[str]`, *optional*): + If not `None`, only these columns will be read from the file. + A column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e'. + num_proc (`int`, *optional*, defaults to `None`): + Number of processes when downloading and generating the dataset locally. + This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. + + + **kwargs (additional keyword arguments): + Keyword arguments to be passed to [`ParquetConfig`]. + + Returns: + [`Dataset`] + + Example: + + ```py + >>> ds = Dataset.from_parquet('path/to/dataset.parquet') + ``` + """ + # Dynamic import to avoid circular dependency + from .io.parquet import ParquetDatasetReader + + return ParquetDatasetReader( + path_or_paths, + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + columns=columns, + num_proc=num_proc, + **kwargs, + ).read() + + @staticmethod + def from_text( + path_or_paths: Union[PathLike, List[PathLike]], + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + """Create Dataset from text file(s). + + Args: + path_or_paths (`path-like` or list of `path-like`): + Path(s) of the text file(s). + split (`NamedSplit`, *optional*): + Split name to be assigned to the dataset. + features (`Features`, *optional*): + Dataset features. + cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): + Directory to cache data. + keep_in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + num_proc (`int`, *optional*, defaults to `None`): + Number of processes when downloading and generating the dataset locally. + This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. + + + **kwargs (additional keyword arguments): + Keyword arguments to be passed to [`TextConfig`]. + + Returns: + [`Dataset`] + + Example: + + ```py + >>> ds = Dataset.from_text('path/to/dataset.txt') + ``` + """ + # Dynamic import to avoid circular dependency + from .io.text import TextDatasetReader + + return TextDatasetReader( + path_or_paths, + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + num_proc=num_proc, + **kwargs, + ).read() + + @staticmethod + def from_spark( + df: "pyspark.sql.DataFrame", + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + keep_in_memory: bool = False, + cache_dir: str = None, + working_dir: str = None, + load_from_cache_file: bool = True, + **kwargs, + ): + """Create a Dataset from Spark DataFrame. Dataset downloading is distributed over Spark workers. + + Args: + df (`pyspark.sql.DataFrame`): + The DataFrame containing the desired data. + split (`NamedSplit`, *optional*): + Split name to be assigned to the dataset. + features (`Features`, *optional*): + Dataset features. + cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): + Directory to cache data. When using a multi-node Spark cluster, the cache_dir must be accessible to both + workers and the driver. + keep_in_memory (`bool`): + Whether to copy the data in-memory. + working_dir (`str`, *optional*) + Intermediate directory for each Spark worker to write data to before moving it to `cache_dir`. Setting + a non-NFS intermediate directory may improve performance. + load_from_cache_file (`bool`): + Whether to load the dataset from the cache if possible. + + Returns: + [`Dataset`] + + Example: + + ```py + >>> df = spark.createDataFrame( + >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], + >>> columns=["id", "name"], + >>> ) + >>> ds = Dataset.from_spark(df) + ``` + """ + # Dynamic import to avoid circular dependency + from .io.spark import SparkDatasetReader + + if sys.platform == "win32": + raise EnvironmentError("Dataset.from_spark is not currently supported on Windows") + + return SparkDatasetReader( + df, + split=split, + features=features, + streaming=False, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + working_dir=working_dir, + load_from_cache_file=load_from_cache_file, + **kwargs, + ).read() + + @staticmethod + def from_sql( + sql: Union[str, "sqlalchemy.sql.Selectable"], + con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + **kwargs, + ): + """Create Dataset from SQL query or database table. + + Args: + sql (`str` or `sqlalchemy.sql.Selectable`): + SQL query to be executed or a table name. + con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`): + A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) used to instantiate a database connection or a SQLite3/SQLAlchemy connection object. + features ([`Features`], *optional*): + Dataset features. + cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): + Directory to cache data. + keep_in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + **kwargs (additional keyword arguments): + Keyword arguments to be passed to [`SqlConfig`]. + + Returns: + [`Dataset`] + + Example: + + ```py + >>> # Fetch a database table + >>> ds = Dataset.from_sql("test_data", "postgres:///db_name") + >>> # Execute a SQL query on the table + >>> ds = Dataset.from_sql("SELECT sentence FROM test_data", "postgres:///db_name") + >>> # Use a Selectable object to specify the query + >>> from sqlalchemy import select, text + >>> stmt = select([text("sentence")]).select_from(text("test_data")) + >>> ds = Dataset.from_sql(stmt, "postgres:///db_name") + ``` + + + + The returned dataset can only be cached if `con` is specified as URI string. + + + """ + from .io.sql import SqlDatasetReader + + return SqlDatasetReader( + sql, + con, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + **kwargs, + ).read() + + def __del__(self): + if hasattr(self, "_data"): + del self._data + if hasattr(self, "_indices"): + del self._indices + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables + self.__del__() + + def save_to_disk( + self, + dataset_path: PathLike, + fs="deprecated", + max_shard_size: Optional[Union[str, int]] = None, + num_shards: Optional[int] = None, + num_proc: Optional[int] = None, + storage_options: Optional[dict] = None, + ): + """ + Saves a dataset to a dataset directory, or in a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`. + + For [`Image`] and [`Audio`] data: + + All the Image() and Audio() data are stored in the arrow files. + If you want to store paths or urls, please use the Value("string") type. + + Args: + dataset_path (`str`): + Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`) + of the dataset directory where the dataset will be saved to. + fs (`fsspec.spec.AbstractFileSystem`, *optional*): + Instance of the remote filesystem where the dataset will be saved to. + + + + `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. + Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` + + + + max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): + The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit + (like `"50MB"`). + num_shards (`int`, *optional*): + Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`. + + + num_proc (`int`, *optional*): + Number of processes when downloading and generating the dataset locally. + Multiprocessing is disabled by default. + + + storage_options (`dict`, *optional*): + Key/value pairs to be passed on to the file-system backend, if any. + + + + Example: + + ```py + >>> ds.save_to_disk("path/to/dataset/directory") + >>> ds.save_to_disk("path/to/dataset/directory", max_shard_size="1GB") + >>> ds.save_to_disk("path/to/dataset/directory", num_shards=1024) + ``` + """ + if max_shard_size is not None and num_shards is not None: + raise ValueError( + "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both." + ) + if fs != "deprecated": + warnings.warn( + "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" + "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", + FutureWarning, + ) + storage_options = fs.storage_options + + if self.list_indexes(): + raise ValueError("please remove all the indexes using `dataset.drop_index` before saving a dataset") + + if num_shards is None: + dataset_nbytes = self._estimate_nbytes() + max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) + num_shards = int(dataset_nbytes / max_shard_size) + 1 + num_shards = max(num_shards, num_proc or 1) + + num_proc = num_proc if num_proc is not None else 1 + num_shards = num_shards if num_shards is not None else num_proc + + fs: fsspec.AbstractFileSystem + fs, _, _ = fsspec.get_fs_token_paths(dataset_path, storage_options=storage_options) + + if not is_remote_filesystem(fs): + parent_cache_files_paths = { + Path(cache_filename["filename"]).resolve().parent for cache_filename in self.cache_files + } + # Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux. + if Path(dataset_path).expanduser().resolve() in parent_cache_files_paths: + raise PermissionError( + f"Tried to overwrite {Path(dataset_path).expanduser().resolve()} but a dataset can't overwrite itself." + ) + + fs.makedirs(dataset_path, exist_ok=True) + + # Get json serializable state + state = { + key: self.__dict__[key] + for key in [ + "_fingerprint", + "_format_columns", + "_format_kwargs", + "_format_type", + "_output_all_columns", + ] + } + state["_split"] = str(self.split) if self.split is not None else self.split + state["_data_files"] = [ + {"filename": f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"} for shard_idx in range(num_shards) + ] + for k in state["_format_kwargs"].keys(): + try: + json.dumps(state["_format_kwargs"][k]) + except TypeError as e: + raise TypeError( + str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't." + ) from None + # Get json serializable dataset info + dataset_info = asdict(self._info) + + shards_done = 0 + pbar = hf_tqdm( + unit=" examples", + total=len(self), + desc=f"Saving the dataset ({shards_done}/{num_shards} shards)", + ) + kwargs_per_job = ( + { + "job_id": shard_idx, + "shard": self.shard(num_shards=num_shards, index=shard_idx, contiguous=True), + "fpath": posixpath.join(dataset_path, f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"), + "storage_options": storage_options, + } + for shard_idx in range(num_shards) + ) + shard_lengths = [None] * num_shards + shard_sizes = [None] * num_shards + if num_proc > 1: + with Pool(num_proc) as pool: + with pbar: + for job_id, done, content in iflatmap_unordered( + pool, Dataset._save_to_disk_single, kwargs_iterable=kwargs_per_job + ): + if done: + shards_done += 1 + pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)") + logger.debug(f"Finished writing shard number {job_id} of {num_shards}.") + shard_lengths[job_id], shard_sizes[job_id] = content + else: + pbar.update(content) + else: + with pbar: + for kwargs in kwargs_per_job: + for job_id, done, content in Dataset._save_to_disk_single(**kwargs): + if done: + shards_done += 1 + pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)") + logger.debug(f"Finished writing shard number {job_id} of {num_shards}.") + shard_lengths[job_id], shard_sizes[job_id] = content + else: + pbar.update(content) + with fs.open( + posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME), "w", encoding="utf-8" + ) as state_file: + json.dump(state, state_file, indent=2, sort_keys=True) + with fs.open( + posixpath.join(dataset_path, config.DATASET_INFO_FILENAME), "w", encoding="utf-8" + ) as dataset_info_file: + # Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True + sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)} + json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2) + + @staticmethod + def _save_to_disk_single(job_id: int, shard: "Dataset", fpath: str, storage_options: Optional[dict]): + batch_size = config.DEFAULT_MAX_BATCH_SIZE + + num_examples_progress_update = 0 + writer = ArrowWriter( + features=shard.features, + path=fpath, + storage_options=storage_options, + embed_local_files=True, + ) + try: + _time = time.time() + for pa_table in shard.with_format("arrow").iter(batch_size): + writer.write_table(pa_table) + num_examples_progress_update += len(pa_table) + if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: + _time = time.time() + yield job_id, False, num_examples_progress_update + num_examples_progress_update = 0 + finally: + yield job_id, False, num_examples_progress_update + num_examples, num_bytes = writer.finalize() + writer.close() + + yield job_id, True, (num_examples, num_bytes) + + @staticmethod + def _build_local_temp_path(uri_or_path: str) -> Path: + """ + Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative + path extracted from the uri) passed. + + Args: + uri_or_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. + `"s3://my-bucket/dataset/train"`) to concatenate. + + Returns: + :class:`Path`: the concatenated path (temp dir + path) + """ + src_dataset_path = Path(uri_or_path) + tmp_dir = get_temporary_cache_files_directory() + return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor)) + + @staticmethod + def load_from_disk( + dataset_path: str, + fs="deprecated", + keep_in_memory: Optional[bool] = None, + storage_options: Optional[dict] = None, + ) -> "Dataset": + """ + Loads a dataset that was previously saved using [`save_to_disk`] from a dataset directory, or from a + filesystem using any implementation of `fsspec.spec.AbstractFileSystem`. + + Args: + dataset_path (`str`): + Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`) + of the dataset directory where the dataset will be loaded from. + fs (`fsspec.spec.AbstractFileSystem`, *optional*): + Instance of the remote filesystem where the dataset will be saved to. + + + + `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. + Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` + + + + keep_in_memory (`bool`, defaults to `None`): + Whether to copy the dataset in-memory. If `None`, the + dataset will not be copied in-memory unless explicitly enabled by setting + `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the + [improve performance](../cache#improve-performance) section. + storage_options (`dict`, *optional*): + Key/value pairs to be passed on to the file-system backend, if any. + + + + Returns: + [`Dataset`] or [`DatasetDict`]: + - If `dataset_path` is a path of a dataset directory, the dataset requested. + - If `dataset_path` is a path of a dataset dict directory, a `datasets.DatasetDict` with each split. + + Example: + + ```py + >>> ds = load_from_disk("path/to/dataset/directory") + ``` + """ + if fs != "deprecated": + warnings.warn( + "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" + "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", + FutureWarning, + ) + storage_options = fs.storage_options + + fs: fsspec.AbstractFileSystem + fs, _, [dataset_path] = fsspec.get_fs_token_paths(dataset_path, storage_options=storage_options) + + dest_dataset_path = dataset_path + dataset_dict_json_path = posixpath.join(dest_dataset_path, config.DATASETDICT_JSON_FILENAME) + dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME) + dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME) + + dataset_dict_is_file = fs.isfile(dataset_dict_json_path) + dataset_info_is_file = fs.isfile(dataset_info_path) + dataset_state_is_file = fs.isfile(dataset_state_json_path) + if not dataset_info_is_file and not dataset_state_is_file: + if dataset_dict_is_file: + raise FileNotFoundError( + f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." + ) + raise FileNotFoundError( + f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object but provided path is not a `Dataset`." + ) + if not dataset_info_is_file: + if dataset_dict_is_file: + raise FileNotFoundError( + f"No such file: '{dataset_info_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." + ) + raise FileNotFoundError( + f"No such file: '{dataset_info_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`." + ) + if not dataset_state_is_file: + if dataset_dict_is_file: + raise FileNotFoundError( + f"No such file: '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." + ) + raise FileNotFoundError( + f"No such file: '{dataset_state_json_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`." + ) + + # copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies + if is_remote_filesystem(fs): + src_dataset_path = dest_dataset_path + dest_dataset_path = Dataset._build_local_temp_path(src_dataset_path) + fs.download(src_dataset_path, dest_dataset_path.as_posix(), recursive=True) + dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME) + dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME) + + with open(dataset_state_json_path, encoding="utf-8") as state_file: + state = json.load(state_file) + with open(dataset_info_path, encoding="utf-8") as dataset_info_file: + dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file)) + + dataset_size = estimate_dataset_size( + Path(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"] + ) + keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size) + table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable + arrow_table = concat_tables( + table_cls.from_file(posixpath.join(dest_dataset_path, data_file["filename"])) + for data_file in state["_data_files"] + ) + + split = state["_split"] + split = Split(split) if split is not None else split + + dataset = Dataset( + arrow_table=arrow_table, + info=dataset_info, + split=split, + fingerprint=state["_fingerprint"], + ) + + format = { + "type": state["_format_type"], + "format_kwargs": state["_format_kwargs"], + "columns": state["_format_columns"], + "output_all_columns": state["_output_all_columns"], + } + dataset = dataset.with_format(**format) + + return dataset + + @property + def data(self) -> Table: + """The Apache Arrow table backing the dataset. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.data + MemoryMappedTable + text: string + label: int64 + ---- + text: [["compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .","the soundtrack alone is worth the price of admission .","rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .","beneath the film's obvious determination to shock at any cost lies considerable skill and determination , backed by sheer nerve .","bielinsky is a filmmaker of impressive talent .","so beautifully acted and directed , it's clear that washington most certainly has a new career ahead of him if he so chooses .","a visual spectacle full of stunning images and effects .","a gentle and engrossing character study .","it's enough to watch huppert scheming , with her small , intelligent eyes as steady as any noir villain , and to enjoy the perfectly pitched web of tension that chabrol spins .","an engrossing portrait of uncompromising artists trying to create something original against the backdrop of a corporate music industry that only seems to care about the bottom line .",...,"ultimately , jane learns her place as a girl , softens up and loses some of the intensity that made her an interesting character to begin with .","ah-nuld's action hero days might be over .","it's clear why deuces wild , which was shot two years ago , has been gathering dust on mgm's shelf .","feels like nothing quite so much as a middle-aged moviemaker's attempt to surround himself with beautiful , half-naked women .","when the precise nature of matthew's predicament finally comes into sharp focus , the revelation fails to justify the build-up .","this picture is murder by numbers , and as easy to be bored by as your abc's , despite a few whopping shootouts .","hilarious musical comedy though stymied by accents thick as mud .","if you are into splatter movies , then you will probably have a reasonably good time with the salton sea .","a dull , simple-minded and stereotypical tale of drugs , death and mind-numbing indifference on the inner-city streets .","the feature-length stretch . . . strains the show's concept ."]] + label: [[1,1,1,1,1,1,1,1,1,1,...,0,0,0,0,0,0,0,0,0,0]] + ``` + """ + return self._data + + @property + def cache_files(self) -> List[dict]: + """The cache files containing the Apache Arrow table backing the dataset. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.cache_files + [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}] + ``` + """ + cache_files = list_table_cache_files(self._data) + if self._indices is not None: + cache_files += list_table_cache_files(self._indices) + return [{"filename": cache_filename} for cache_filename in cache_files] + + @property + def num_columns(self) -> int: + """Number of columns in the dataset. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.num_columns + 2 + ``` + """ + return self._data.num_columns + + @property + def num_rows(self) -> int: + """Number of rows in the dataset (same as [`Dataset.__len__`]). + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.num_rows + 1066 + ``` + """ + if self._indices is not None: + return self._indices.num_rows + return self._data.num_rows + + @property + def column_names(self) -> List[str]: + """Names of the columns in the dataset. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.column_names + ['text', 'label'] + ``` + """ + return self._data.column_names + + @property + def shape(self) -> Tuple[int, int]: + """Shape of the dataset (number of columns, number of rows). + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.shape + (1066, 2) + ``` + """ + if self._indices is not None: + return (self._indices.num_rows, self._data.num_columns) + return self._data.shape + + def unique(self, column: str) -> List: + """Return a list of the unique elements in a column. + + This is implemented in the low-level backend and as such, very fast. + + Args: + column (`str`): + Column name (list all the column names with [`~datasets.Dataset.column_names`]). + + Returns: + `list`: List of unique elements in the given column. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.unique('label') + [1, 0] + ``` + """ + if column not in self._data.column_names: + raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") + + if self._indices is not None and self._indices.num_rows != self._data.num_rows: + dataset = self.flatten_indices() + else: + dataset = self + + return dataset._data.column(column).unique().to_pylist() + + def class_encode_column(self, column: str, include_nulls: bool = False) -> "Dataset": + """Casts the given column as [`~datasets.features.ClassLabel`] and updates the table. + + Args: + column (`str`): + The name of the column to cast (list all the column names with [`~datasets.Dataset.column_names`]) + include_nulls (`bool`, defaults to `False`): + Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label. + + + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("boolq", split="validation") + >>> ds.features + {'answer': Value(dtype='bool', id=None), + 'passage': Value(dtype='string', id=None), + 'question': Value(dtype='string', id=None)} + >>> ds = ds.class_encode_column('answer') + >>> ds.features + {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None), + 'passage': Value(dtype='string', id=None), + 'question': Value(dtype='string', id=None)} + ``` + """ + # Sanity checks + if column not in self._data.column_names: + raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") + src_feat = self._info.features[column] + if not isinstance(src_feat, Value): + raise ValueError( + f"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}." + ) + + if src_feat.dtype != "string" or (include_nulls and None in self.unique(column)): + + def stringify_column(batch): + batch[column] = [ + str(sample) if include_nulls or sample is not None else None for sample in batch[column] + ] + return batch + + dset = self.map( + stringify_column, + batched=True, + desc="Stringifying the column", + ) + else: + dset = self + + # Create the new feature + class_names = sorted(str(sample) for sample in dset.unique(column) if include_nulls or sample is not None) + dst_feat = ClassLabel(names=class_names) + + def cast_to_class_labels(batch): + batch[column] = [ + dst_feat.str2int(str(sample)) if include_nulls or sample is not None else None + for sample in batch[column] + ] + return batch + + new_features = dset.features.copy() + new_features[column] = dst_feat + + dset = dset.map( + cast_to_class_labels, + batched=True, + features=new_features, + desc="Casting to class labels", + ) + + return dset + + @fingerprint_transform(inplace=False) + def flatten(self, new_fingerprint: Optional[str] = None, max_depth=16) -> "Dataset": + """Flatten the table. + Each column with a struct type is flattened into one column per struct field. + Other columns are left unchanged. + + Args: + new_fingerprint (`str`, *optional*): + The new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. + + Returns: + [`Dataset`]: A copy of the dataset with flattened columns. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("squad", split="train") + >>> ds.features + {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), + 'context': Value(dtype='string', id=None), + 'id': Value(dtype='string', id=None), + 'question': Value(dtype='string', id=None), + 'title': Value(dtype='string', id=None)} + >>> ds.flatten() + Dataset({ + features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], + num_rows: 87599 + }) + ``` + """ + dataset = copy.deepcopy(self) + for depth in range(1, max_depth): + if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema): + dataset._data = dataset._data.flatten() + else: + break + dataset.info.features = self._info.features.flatten(max_depth=max_depth) + dataset.info.features = Features({col: dataset.info.features[col] for col in dataset.data.column_names}) + dataset._data = update_metadata_with_features(dataset._data, dataset.features) + logger.info(f'Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else "unknown"}.') + dataset._fingerprint = new_fingerprint + return dataset + + def cast( + self, + features: Features, + batch_size: Optional[int] = 1000, + keep_in_memory: bool = False, + load_from_cache_file: Optional[bool] = None, + cache_file_name: Optional[str] = None, + writer_batch_size: Optional[int] = 1000, + num_proc: Optional[int] = None, + ) -> "Dataset": + """ + Cast the dataset to a new set of features. + + Args: + features ([`Features`]): + New features to cast the dataset to. + The name of the fields in the features must match the current column names. + The type of the data must also be convertible from one type to the other. + For non-trivial conversion, e.g. `str` <-> `ClassLabel` you should use [`~datasets.Dataset.map`] to update the Dataset. + batch_size (`int`, defaults to `1000`): + Number of examples per batch provided to cast. + If `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to cast. + keep_in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + load_from_cache_file (`bool`, defaults to `True` if caching is enabled): + If a cache file storing the current computation from `function` + can be identified, use it instead of recomputing. + cache_file_name (`str`, *optional*, defaults to `None`): + Provide the name of a path for the cache file. It is used to store the + results of the computation instead of the automatically generated cache file name. + writer_batch_size (`int`, defaults to `1000`): + Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running [`~datasets.Dataset.map`]. + num_proc (`int`, *optional*, defaults to `None`): + Number of processes for multiprocessing. By default it doesn't + use multiprocessing. + + Returns: + [`Dataset`]: A copy of the dataset with casted features. + + Example: + + ```py + >>> from datasets import load_dataset, ClassLabel, Value + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.features + {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), + 'text': Value(dtype='string', id=None)} + >>> new_features = ds.features.copy() + >>> new_features['label'] = ClassLabel(names=['bad', 'good']) + >>> new_features['text'] = Value('large_string') + >>> ds = ds.cast(new_features) + >>> ds.features + {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), + 'text': Value(dtype='large_string', id=None)} + ``` + """ + if sorted(features) != sorted(self._data.column_names): + raise ValueError( + f"The columns in features ({list(features)}) must be identical " + f"as the columns in the dataset: {self._data.column_names}" + ) + + schema = features.arrow_schema + format = self.format + dataset = self.with_format("arrow") + # capture the PyArrow version here to make the lambda serializable on Windows + dataset = dataset.map( + partial(table_cast, schema=schema), + batched=True, + batch_size=batch_size, + keep_in_memory=keep_in_memory, + load_from_cache_file=load_from_cache_file, + cache_file_name=cache_file_name, + writer_batch_size=writer_batch_size, + num_proc=num_proc, + features=features, + desc="Casting the dataset", + ) + dataset = dataset.with_format(**format) + return dataset + + @fingerprint_transform(inplace=False) + def cast_column(self, column: str, feature: FeatureType, new_fingerprint: Optional[str] = None) -> "Dataset": + """Cast column to feature for decoding. + + Args: + column (`str`): + Column name. + feature (`FeatureType`): + Target feature. + new_fingerprint (`str`, *optional*): + The new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. + + Returns: + [`Dataset`] + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.features + {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), + 'text': Value(dtype='string', id=None)} + >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good'])) + >>> ds.features + {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), + 'text': Value(dtype='string', id=None)} + ``` + """ + if hasattr(feature, "decode_example"): + dataset = copy.deepcopy(self) + dataset._info.features[column] = feature + dataset._fingerprint = new_fingerprint + dataset._data = dataset._data.cast(dataset.features.arrow_schema) + dataset._data = update_metadata_with_features(dataset._data, dataset.features) + return dataset + else: + features = self.features + features[column] = feature + return self.cast(features) + + @transmit_tasks + @transmit_format + @fingerprint_transform(inplace=False) + def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset": + """ + Remove one or several column(s) in the dataset and the features associated to them. + + You can also remove a column using [`~datasets.Dataset.map`] with `remove_columns` but the present method + is in-place (doesn't copy the data to a new dataset) and is thus faster. + + Args: + column_names (`Union[str, List[str]]`): + Name of the column(s) to remove. + new_fingerprint (`str`, *optional*): + The new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. + + Returns: + [`Dataset`]: A copy of the dataset object without the columns to remove. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.remove_columns('label') + Dataset({ + features: ['text'], + num_rows: 1066 + }) + >>> ds.remove_columns(column_names=ds.column_names) # Removing all the columns returns an empty dataset with the `num_rows` property set to 0 + Dataset({ + features: [], + num_rows: 0 + }) + ``` + """ + dataset = copy.deepcopy(self) + if isinstance(column_names, str): + column_names = [column_names] + + for column_name in column_names: + if column_name not in dataset._data.column_names: + raise ValueError( + f"Column name {column_name} not in the dataset. " + f"Current columns in the dataset: {dataset._data.column_names}" + ) + + for column_name in column_names: + del dataset._info.features[column_name] + + dataset._data = dataset._data.drop(column_names) + dataset._data = update_metadata_with_features(dataset._data, dataset.features) + dataset._fingerprint = new_fingerprint + return dataset + + @transmit_tasks + @fingerprint_transform(inplace=False) + def rename_column( + self, original_column_name: str, new_column_name: str, new_fingerprint: Optional[str] = None + ) -> "Dataset": + """ + Rename a column in the dataset, and move the features associated to the original column under the new column + name. + + Args: + original_column_name (`str`): + Name of the column to rename. + new_column_name (`str`): + New name for the column. + new_fingerprint (`str`, *optional*): + The new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. + + Returns: + [`Dataset`]: A copy of the dataset with a renamed column. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.rename_column('label', 'label_new') + Dataset({ + features: ['text', 'label_new'], + num_rows: 1066 + }) + ``` + """ + dataset = copy.deepcopy(self) + if original_column_name not in dataset._data.column_names: + raise ValueError( + f"Original column name {original_column_name} not in the dataset. " + f"Current columns in the dataset: {dataset._data.column_names}" + ) + if new_column_name in dataset._data.column_names: + raise ValueError( + f"New column name {new_column_name} already in the dataset. " + f"Please choose a column name which is not already in the dataset. " + f"Current columns in the dataset: {dataset._data.column_names}" + ) + if not new_column_name: + raise ValueError("New column name is empty.") + + def rename(columns): + return [new_column_name if col == original_column_name else col for col in columns] + + new_column_names = rename(self._data.column_names) + if self._format_columns is not None: + dataset._format_columns = rename(self._format_columns) + + dataset._info.features = Features( + { + new_column_name if col == original_column_name else col: feature + for col, feature in self._info.features.items() + } + ) + + dataset._data = dataset._data.rename_columns(new_column_names) + dataset._data = update_metadata_with_features(dataset._data, dataset.features) + dataset._fingerprint = new_fingerprint + return dataset + + @transmit_tasks + @fingerprint_transform(inplace=False) + def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint: Optional[str] = None) -> "Dataset": + """ + Rename several columns in the dataset, and move the features associated to the original columns under + the new column names. + + Args: + column_mapping (`Dict[str, str]`): + A mapping of columns to rename to their new names + new_fingerprint (`str`, *optional*): + The new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. + + Returns: + [`Dataset`]: A copy of the dataset with renamed columns + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'}) + Dataset({ + features: ['text_new', 'label_new'], + num_rows: 1066 + }) + ``` + """ + dataset = copy.deepcopy(self) + + extra_columns = set(column_mapping.keys()) - set(dataset.column_names) + if extra_columns: + raise ValueError( + f"Original column names {extra_columns} not in the dataset. " + f"Current columns in the dataset: {dataset._data.column_names}" + ) + + number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values())) + if number_of_duplicates_in_new_columns != 0: + raise ValueError( + "New column names must all be different, but this column mapping " + f"has {number_of_duplicates_in_new_columns} duplicates" + ) + + empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col] + if empty_new_columns: + raise ValueError(f"New column names {empty_new_columns} are empty.") + + def rename(columns): + return [column_mapping[col] if col in column_mapping else col for col in columns] + + new_column_names = rename(self._data.column_names) + if self._format_columns is not None: + dataset._format_columns = rename(self._format_columns) + + dataset._info.features = Features( + { + column_mapping[col] if col in column_mapping else col: feature + for col, feature in (self._info.features or {}).items() + } + ) + + dataset._data = dataset._data.rename_columns(new_column_names) + dataset._data = update_metadata_with_features(dataset._data, dataset.features) + dataset._fingerprint = new_fingerprint + return dataset + + @transmit_tasks + @transmit_format + @fingerprint_transform(inplace=False) + def select_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset": + """Select one or several column(s) in the dataset and the features + associated to them. + + Args: + column_names (`Union[str, List[str]]`): + Name of the column(s) to keep. + new_fingerprint (`str`, *optional*): + The new fingerprint of the dataset after transform. If `None`, + the new fingerprint is computed using a hash of the previous + fingerprint, and the transform arguments. + + Returns: + [`Dataset`]: A copy of the dataset object which only consists of + selected columns. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.select_columns(['text']) + Dataset({ + features: ['text'], + num_rows: 1066 + }) + ``` + """ + if isinstance(column_names, str): + column_names = [column_names] + + for column_name in column_names: + if column_name not in self._data.column_names: + raise ValueError( + f"Column name {column_name} not in the " + "dataset. Current columns in the dataset: " + f"{self._data.column_names}." + ) + + dataset = copy.deepcopy(self) + dataset._data = dataset._data.select(column_names) + dataset._info.features = Features({col: self._info.features[col] for col in dataset._data.column_names}) + dataset._data = update_metadata_with_features(dataset._data, dataset.features) + dataset._fingerprint = new_fingerprint + return dataset + + def __len__(self): + """Number of rows in the dataset. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.__len__ + + ``` + """ + return self.num_rows + + def __iter__(self): + """Iterate through the examples. + + If a formatting is set with :meth:`Dataset.set_format` rows will be returned with the + selected format. + """ + if self._indices is None: + # Fast iteration + # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch) + format_kwargs = self._format_kwargs if self._format_kwargs is not None else {} + formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs) + batch_size = config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER + for pa_subtable in table_iter(self.data, batch_size=batch_size): + for i in range(pa_subtable.num_rows): + pa_subtable_ex = pa_subtable.slice(i, 1) + formatted_output = format_table( + pa_subtable_ex, + 0, + formatter=formatter, + format_columns=self._format_columns, + output_all_columns=self._output_all_columns, + ) + yield formatted_output + else: + for i in range(self.num_rows): + yield self._getitem( + i, + ) + + def iter(self, batch_size: int, drop_last_batch: bool = False): + """Iterate through the batches of size `batch_size`. + + If a formatting is set with [`~datasets.Dataset.set_format`] rows will be returned with the + selected format. + + Args: + batch_size (:obj:`int`): size of each batch to yield. + drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be + dropped + """ + if self._indices is None: + # Fast iteration + # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch) + format_kwargs = self._format_kwargs if self._format_kwargs is not None else {} + formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs) + for pa_subtable in table_iter(self.data, batch_size=batch_size, drop_last_batch=drop_last_batch): + formatted_batch = format_table( + pa_subtable, + range(pa_subtable.num_rows), + formatter=formatter, + format_columns=self._format_columns, + output_all_columns=self._output_all_columns, + ) + yield formatted_batch + else: + num_rows = self.num_rows if not drop_last_batch else self.num_rows // batch_size * batch_size + for i in range(0, num_rows, batch_size): + yield self._getitem( + slice(i, i + batch_size), + ) + + def __repr__(self): + return f"Dataset({{\n features: {list(self._info.features.keys())},\n num_rows: {self.num_rows}\n}})" + + @property + def format(self): + return { + "type": self._format_type, + "format_kwargs": self._format_kwargs, + "columns": self.column_names if self._format_columns is None else self._format_columns, + "output_all_columns": self._output_all_columns, + } + + @contextlib.contextmanager + def formatted_as( + self, + type: Optional[str] = None, + columns: Optional[List] = None, + output_all_columns: bool = False, + **format_kwargs, + ): + """To be used in a `with` statement. Set `__getitem__` return format (type and columns). + + Args: + type (`str`, *optional*): + Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. + `None` means `__getitem__`` returns python objects (default). + columns (`List[str]`, *optional*): + Columns to format in the output. + `None` means `__getitem__` returns all columns (default). + output_all_columns (`bool`, defaults to `False`): + Keep un-formatted columns as well in the output (as python objects). + **format_kwargs (additional keyword arguments): + Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. + """ + old_format_type = self._format_type + old_format_kwargs = self._format_kwargs + old_format_columns = self._format_columns + old_output_all_columns = self._output_all_columns + try: + self.set_format(type, columns, output_all_columns, **format_kwargs) + yield + finally: + self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs) + + @fingerprint_transform(inplace=True) + def set_format( + self, + type: Optional[str] = None, + columns: Optional[List] = None, + output_all_columns: bool = False, + **format_kwargs, + ): + """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. + The format `type` (for example "numpy") is used to format batches when using `__getitem__`. + It's also possible to use custom transforms for formatting using [`~datasets.Dataset.set_transform`]. + + Args: + type (`str`, *optional*): + Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. + `None` means `__getitem__` returns python objects (default). + columns (`List[str]`, *optional*): + Columns to format in the output. + `None` means `__getitem__` returns all columns (default). + output_all_columns (`bool`, defaults to `False`): + Keep un-formatted columns as well in the output (as python objects). + **format_kwargs (additional keyword arguments): + Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. + + It is possible to call [`~datasets.Dataset.map`] after calling `set_format`. Since `map` may add new columns, then the list of formatted columns + gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted as: + + ``` + new formatted columns = (all columns - previously unformatted columns) + ``` + + Example: + + ```py + >>> from datasets import load_dataset + >>> from transformers import AutoTokenizer + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) + >>> ds.set_format(type='numpy', columns=['text', 'label']) + >>> ds.format + {'type': 'numpy', + 'format_kwargs': {}, + 'columns': ['text', 'label'], + 'output_all_columns': False} + ``` + """ + format_kwargs.update(format_kwargs.pop("format_kwargs", {})) # allow to use self.set_format(**self.format) + + # Check that the format_type and format_kwargs are valid and make it possible to have a Formatter + type = get_format_type_from_alias(type) + get_formatter(type, features=self._info.features, **format_kwargs) + + # Check filter column + if isinstance(columns, str): + columns = [columns] + if isinstance(columns, tuple): + columns = list(columns) + if columns is not None and any(col not in self._data.column_names for col in columns): + raise ValueError( + f"Columns {list(filter(lambda col: col not in self._data.column_names, columns))} not in the dataset. Current columns in the dataset: {self._data.column_names}" + ) + if columns is not None: + columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs + + self._format_type = type + self._format_kwargs = format_kwargs + self._format_columns = columns + self._output_all_columns = output_all_columns + logger.debug( + "Set __getitem__(key) output type to %s for %s columns " + " (when key is int or slice) and %s output other (un-formatted) columns.", + "python objects" if type is None else type, + "no" if columns is None else str(columns), + "do" if output_all_columns else "don't", + ) + + def reset_format(self): + """Reset `__getitem__` return format to python objects and all columns. + + Same as `self.set_format()` + + Example: + + ```py + >>> from datasets import load_dataset + >>> from transformers import AutoTokenizer + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) + >>> ds.set_format(type='numpy', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) + >>> ds.format + {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], + 'format_kwargs': {}, + 'output_all_columns': False, + 'type': 'numpy'} + >>> ds.reset_format() + >>> ds.format + {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], + 'format_kwargs': {}, + 'output_all_columns': False, + 'type': None} + ``` + """ + self.set_format() + + def set_transform( + self, + transform: Optional[Callable], + columns: Optional[List] = None, + output_all_columns: bool = False, + ): + """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called. + As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`]. + + Args: + transform (`Callable`, *optional*): + User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`]. + A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch. + This function is applied right before returning the objects in `__getitem__`. + columns (`List[str]`, *optional*): + Columns to format in the output. + If specified, then the input batch of the transform only contains those columns. + output_all_columns (`bool`, defaults to `False`): + Keep un-formatted columns as well in the output (as python objects). + If set to True, then the other un-formatted columns are kept with the output of the transform. + + Example: + + ```py + >>> from datasets import load_dataset + >>> from transformers import AutoTokenizer + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') + >>> def encode(batch): + ... return tokenizer(batch['text'], padding=True, truncation=True, return_tensors='pt') + >>> ds.set_transform(encode) + >>> ds[0] + {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1]), + 'input_ids': tensor([ 101, 29353, 2135, 15102, 1996, 9428, 20868, 2890, 8663, 6895, + 20470, 2571, 3663, 2090, 4603, 3017, 3008, 1998, 2037, 24211, + 5637, 1998, 11690, 2336, 1012, 102]), + 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0])} + ``` + """ + self.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform) + + def with_format( + self, + type: Optional[str] = None, + columns: Optional[List] = None, + output_all_columns: bool = False, + **format_kwargs, + ): + """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. + The format `type` (for example "numpy") is used to format batches when using `__getitem__`. + + It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`]. + + Contrary to [`~datasets.Dataset.set_format`], `with_format` returns a new [`Dataset`] object. + + Args: + type (`str`, *optional*): + Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. + `None` means `__getitem__` returns python objects (default). + columns (`List[str]`, *optional*): + Columns to format in the output. + `None` means `__getitem__` returns all columns (default). + output_all_columns (`bool`, defaults to `False`): + Keep un-formatted columns as well in the output (as python objects). + **format_kwargs (additional keyword arguments): + Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. + + Example: + + ```py + >>> from datasets import load_dataset + >>> from transformers import AutoTokenizer + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) + >>> ds.format + {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], + 'format_kwargs': {}, + 'output_all_columns': False, + 'type': None} + >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) + >>> ds.format + {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], + 'format_kwargs': {}, + 'output_all_columns': False, + 'type': 'tensorflow'} + ``` + """ + dataset = copy.deepcopy(self) + dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) + return dataset + + def with_transform( + self, + transform: Optional[Callable], + columns: Optional[List] = None, + output_all_columns: bool = False, + ): + """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called. + + As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`]. + + Contrary to [`~datasets.Dataset.set_transform`], `with_transform` returns a new [`Dataset`] object. + + Args: + transform (`Callable`, `optional`): + User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`]. + A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch. + This function is applied right before returning the objects in `__getitem__`. + columns (`List[str]`, `optional`): + Columns to format in the output. + If specified, then the input batch of the transform only contains those columns. + output_all_columns (`bool`, defaults to `False`): + Keep un-formatted columns as well in the output (as python objects). + If set to `True`, then the other un-formatted columns are kept with the output of the transform. + + Example: + + ```py + >>> from datasets import load_dataset + >>> from transformers import AutoTokenizer + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + >>> def encode(example): + ... return tokenizer(example["text"], padding=True, truncation=True, return_tensors='pt') + >>> ds = ds.with_transform(encode) + >>> ds[0] + {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1]), + 'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617, + 1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105, + 1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102]), + 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0])} + ``` + """ + dataset = copy.deepcopy(self) + dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns) + return dataset + + @deprecated() + def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "Dataset": + """ + Prepare a dataset for the given task by casting the dataset's [`Features`] to standardized column names and types as detailed in [`datasets.tasks`](./task_templates). + + Casts [`datasets.DatasetInfo.features`] according to a task-specific schema. Intended for single-use only, so all task templates are removed from [`datasets.DatasetInfo.task_templates`] after casting. + + Args: + task (`Union[str, TaskTemplate]`): + The task to prepare the dataset for during training and evaluation. If `str`, supported tasks include: + + - `"text-classification"` + - `"question-answering"` + + If [`TaskTemplate`], must be one of the task templates in [`datasets.tasks`](./task_templates). + id (`int`, defaults to `0`): + The id required to unambiguously identify the task template when multiple task templates of the same type are supported. + """ + # TODO(lewtun): Add support for casting nested features like answers.text and answers.answer_start in SQuAD + if isinstance(task, str): + tasks = [template.task for template in (self.info.task_templates or [])] + compatible_templates = [template for template in (self.info.task_templates or []) if template.task == task] + if not compatible_templates: + raise ValueError( + f"Task {task} is not compatible with this dataset! Available tasks: {list(unique_values(tasks))}" + ) + + if not 0 <= id < len(compatible_templates): + templates_list_str = "\n".join( + f"- `{idx}` for task {template}" for idx, template in enumerate(compatible_templates) + ) + raise ValueError( + f"Id {id} for task {task} is not in a valid range. Supported ids:\n{templates_list_str}" + ) + template = compatible_templates[id] + elif isinstance(task, TaskTemplate): + template = task + else: + raise ValueError( + f"Expected a `str` or `datasets.TaskTemplate` object but got task {task} with type {type(task)}." + ) + template = template.align_with_features(self.info.features) + column_mapping = template.column_mapping + columns_to_drop = [column for column in self.column_names if column not in column_mapping] + dataset = self.remove_columns(columns_to_drop) + dataset = dataset.rename_columns(column_mapping) + # We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__` + dataset.info.task_templates = None + dataset = dataset.cast(features=template.features) + return dataset + + def _getitem(self, key: Union[int, slice, str, ListLike[int]], **kwargs) -> Union[Dict, List]: + """ + Can be used to index columns (by string names) or rows (by integer, slice, or list-like of integer indices) + """ + if isinstance(key, bool): + raise TypeError("dataset index must be int, str, slice or collection of int, not bool") + format_type = kwargs["format_type"] if "format_type" in kwargs else self._format_type + format_columns = kwargs["format_columns"] if "format_columns" in kwargs else self._format_columns + output_all_columns = ( + kwargs["output_all_columns"] if "output_all_columns" in kwargs else self._output_all_columns + ) + format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._format_kwargs + format_kwargs = format_kwargs if format_kwargs is not None else {} + formatter = get_formatter(format_type, features=self._info.features, **format_kwargs) + pa_subtable = query_table(self._data, key, indices=self._indices if self._indices is not None else None) + formatted_output = format_table( + pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns + ) + return formatted_output + + @overload + def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811 + ... + + @overload + def __getitem__(self, key: str) -> List: # noqa: F811 + ... + + def __getitem__(self, key): # noqa: F811 + """Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools).""" + return self._getitem(key) + + def __getitems__(self, keys: List) -> List: + """Can be used to get a batch using a list of integers indices.""" + batch = self.__getitem__(keys) + n_examples = len(batch[next(iter(batch))]) + return [{col: array[i] for col, array in batch.items()} for i in range(n_examples)] + + def cleanup_cache_files(self) -> int: + """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is + one. + + Be careful when running this command that no other process is currently using other cache files. + + Returns: + `int`: Number of removed files. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.cleanup_cache_files() + 10 + ``` + """ + current_cache_files = [os.path.abspath(cache_file["filename"]) for cache_file in self.cache_files] + if not current_cache_files: + return 0 + cache_directory = os.path.dirname(current_cache_files[0]) + logger.info(f"Listing files in {cache_directory}") + files: List[str] = os.listdir(cache_directory) + files_to_remove = [] + for f_name in files: + full_name = os.path.abspath(os.path.join(cache_directory, f_name)) + if f_name.startswith("cache-") and f_name.endswith(".arrow"): + if full_name in current_cache_files: + logger.info(f"Keeping currently used cache file at {full_name}") + continue + files_to_remove.append(full_name) + for file_path in files_to_remove: + logger.info(f"Removing {file_path}") + os.remove(file_path) + return len(files_to_remove) + + def _get_cache_file_path(self, fingerprint): + if is_caching_enabled() and self.cache_files: + cache_file_name = "cache-" + fingerprint + ".arrow" + cache_directory = os.path.dirname(self.cache_files[0]["filename"]) + else: + cache_file_name = "cache-" + generate_random_fingerprint() + ".arrow" + cache_directory = get_temporary_cache_files_directory() + cache_file_path = os.path.join(cache_directory, cache_file_name) + return cache_file_path + + @transmit_tasks + @transmit_format + def map( + self, + function: Optional[Callable] = None, + with_indices: bool = False, + with_rank: bool = False, + input_columns: Optional[Union[str, List[str]]] = None, + batched: bool = False, + batch_size: Optional[int] = 1000, + drop_last_batch: bool = False, + remove_columns: Optional[Union[str, List[str]]] = None, + keep_in_memory: bool = False, + load_from_cache_file: Optional[bool] = None, + cache_file_name: Optional[str] = None, + writer_batch_size: Optional[int] = 1000, + features: Optional[Features] = None, + disable_nullable: bool = False, + fn_kwargs: Optional[dict] = None, + num_proc: Optional[int] = None, + suffix_template: str = "_{rank:05d}_of_{num_proc:05d}", + new_fingerprint: Optional[str] = None, + desc: Optional[str] = None, + ) -> "Dataset": + """ + Apply a function to all the examples in the table (individually or in batches) and update the table. + If your function returns a column that already exists, then it overwrites it. + + You can specify whether the function should be batched or not with the `batched` parameter: + + - If batched is `False`, then the function takes 1 example in and should return 1 example. + An example is a dictionary, e.g. `{"text": "Hello there !"}`. + - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. + A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`. + - If batched is `True` and `batch_size` is `n > 1`, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. + Note that the last batch may have less than `n` examples. + A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. + + Args: + function (`Callable`): Function with one of the following signatures: + + - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` + - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) + - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False` + - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) + + For advanced usage, the function can also return a `pyarrow.Table`. + Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. + If no function is provided, default to identity function: `lambda x: x`. + with_indices (`bool`, defaults to `False`): + Provide example indices to `function`. Note that in this case the + signature of `function` should be `def function(example, idx[, rank]): ...`. + with_rank (`bool`, defaults to `False`): + Provide process rank to `function`. Note that in this case the + signature of `function` should be `def function(example[, idx], rank): ...`. + input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): + The columns to be passed into `function` + as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument. + batched (`bool`, defaults to `False`): + Provide batch of examples to `function`. + batch_size (`int`, *optional*, defaults to `1000`): + Number of examples per batch provided to `function` if `batched=True`. + If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`. + drop_last_batch (`bool`, defaults to `False`): + Whether a last batch smaller than the batch_size should be + dropped instead of being processed by the function. + remove_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): + Remove a selection of columns while doing the mapping. + Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding + columns with names in `remove_columns`, these columns will be kept. + keep_in_memory (`bool`, defaults to `False`): + Keep the dataset in memory instead of writing it to a cache file. + load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): + If a cache file storing the current computation from `function` + can be identified, use it instead of recomputing. + cache_file_name (`str`, *optional*, defaults to `None`): + Provide the name of a path for the cache file. It is used to store the + results of the computation instead of the automatically generated cache file name. + writer_batch_size (`int`, defaults to `1000`): + Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. + features (`Optional[datasets.Features]`, defaults to `None`): + Use a specific Features to store the cache file + instead of the automatically generated one. + disable_nullable (`bool`, defaults to `False`): + Disallow null values in the table. + fn_kwargs (`Dict`, *optional*, defaults to `None`): + Keyword arguments to be passed to `function`. + num_proc (`int`, *optional*, defaults to `None`): + Max number of processes when generating cache. Already cached shards are loaded sequentially. + suffix_template (`str`): + If `cache_file_name` is specified, then this suffix + will be added at the end of the base name of each. Defaults to `"_{rank:05d}_of_{num_proc:05d}"`. For example, if `cache_file_name` is "processed.arrow", then for + `rank=1` and `num_proc=4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix. + new_fingerprint (`str`, *optional*, defaults to `None`): + The new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. + desc (`str`, *optional*, defaults to `None`): + Meaningful description to be displayed alongside with the progress bar while mapping examples. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> def add_prefix(example): + ... example["text"] = "Review: " + example["text"] + ... return example + >>> ds = ds.map(add_prefix) + >>> ds[0:3]["text"] + ['Review: compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .', + 'Review: the soundtrack alone is worth the price of admission .', + 'Review: rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .'] + + # process a batch of examples + >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True) + # set number of processors + >>> ds = ds.map(add_prefix, num_proc=4) + ``` + """ + if keep_in_memory and cache_file_name is not None: + raise ValueError("Please use either `keep_in_memory` or `cache_file_name` but not both.") + + if num_proc is not None and num_proc <= 0: + raise ValueError("num_proc must be an integer > 0.") + + # If the array is empty we do nothing (but we make sure to handle an empty indices mapping and remove the requested columns anyway) + if len(self) == 0: + if self._indices is not None: # empty indices mapping + self = Dataset( + self.data.slice(0, 0), + info=self.info.copy(), + split=self.split, + fingerprint=new_fingerprint, + ) + if remove_columns: + return self.remove_columns(remove_columns) + else: + return self + + if function is None: + function = lambda x: x # noqa: E731 + + if isinstance(input_columns, str): + input_columns = [input_columns] + + if input_columns is not None: + for input_column in input_columns: + if input_column not in self._data.column_names: + raise ValueError( + f"Input column {input_column} not in the dataset. Current columns in the dataset: {self._data.column_names}" + ) + + if isinstance(remove_columns, str): + remove_columns = [remove_columns] + + if remove_columns is not None and any(col not in self._data.column_names for col in remove_columns): + raise ValueError( + f"Column to remove {list(filter(lambda col: col not in self._data.column_names, remove_columns))} not in the dataset. Current columns in the dataset: {self._data.column_names}" + ) + + load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() + + if fn_kwargs is None: + fn_kwargs = {} + + if num_proc is not None and num_proc > len(self): + num_proc = len(self) + logger.warning( + f"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}." + ) + + dataset_kwargs = { + "shard": self, + "function": function, + "with_indices": with_indices, + "with_rank": with_rank, + "input_columns": input_columns, + "batched": batched, + "batch_size": batch_size, + "drop_last_batch": drop_last_batch, + "remove_columns": remove_columns, + "keep_in_memory": keep_in_memory, + "writer_batch_size": writer_batch_size, + "features": features, + "disable_nullable": disable_nullable, + "fn_kwargs": fn_kwargs, + } + + if new_fingerprint is None: + # we create a unique hash from the function, + # current dataset file and the mapping args + transform = format_transform_for_fingerprint(Dataset._map_single) + kwargs_for_fingerprint = format_kwargs_for_fingerprint(Dataset._map_single, (), dataset_kwargs) + kwargs_for_fingerprint["fingerprint_name"] = "new_fingerprint" + new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint) + else: + validate_fingerprint(new_fingerprint) + dataset_kwargs["new_fingerprint"] = new_fingerprint + + if self.cache_files: + if cache_file_name is None: + cache_file_name = self._get_cache_file_path(new_fingerprint) + dataset_kwargs["cache_file_name"] = cache_file_name + + def load_processed_shard_from_cache(shard_kwargs): + """Load a processed shard from cache if it exists, otherwise throw an error.""" + shard = shard_kwargs["shard"] + # Check if we've already cached this computation (indexed by a hash) + if shard_kwargs["cache_file_name"] is not None: + if os.path.exists(shard_kwargs["cache_file_name"]) and load_from_cache_file: + info = shard.info.copy() + info.features = features + info.task_templates = None + return Dataset.from_file(shard_kwargs["cache_file_name"], info=info, split=shard.split) + raise NonExistentDatasetError + + num_shards = num_proc if num_proc is not None else 1 + if batched and drop_last_batch: + pbar_total = len(self) // num_shards // batch_size * num_shards * batch_size + else: + pbar_total = len(self) + + shards_done = 0 + if num_proc is None or num_proc == 1: + transformed_dataset = None + try: + transformed_dataset = load_processed_shard_from_cache(dataset_kwargs) + logger.info(f"Loading cached processed dataset at {dataset_kwargs['cache_file_name']}") + except NonExistentDatasetError: + pass + if transformed_dataset is None: + with hf_tqdm( + unit=" examples", + total=pbar_total, + desc=desc or "Map", + ) as pbar: + for rank, done, content in Dataset._map_single(**dataset_kwargs): + if done: + shards_done += 1 + logger.debug(f"Finished processing shard number {rank} of {num_shards}.") + transformed_dataset = content + else: + pbar.update(content) + assert transformed_dataset is not None, "Failed to retrieve the result from map" + # update fingerprint if the dataset changed + if transformed_dataset._fingerprint != self._fingerprint: + transformed_dataset._fingerprint = new_fingerprint + return transformed_dataset + else: + + def format_cache_file_name( + cache_file_name: Optional[str], + rank: Union[int, Literal["*"]], # noqa: F722 + ) -> Optional[str]: + if not cache_file_name: + return cache_file_name + sep = cache_file_name.rindex(".") + base_name, extension = cache_file_name[:sep], cache_file_name[sep:] + if isinstance(rank, int): + cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension + logger.info(f"Process #{rank} will write at {cache_file_name}") + else: + cache_file_name = ( + base_name + + suffix_template.replace("{rank:05d}", "{rank}").format(rank=rank, num_proc=num_proc) + + extension + ) + return cache_file_name + + def format_new_fingerprint(new_fingerprint: str, rank: int) -> str: + new_fingerprint = new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc) + validate_fingerprint(new_fingerprint) + return new_fingerprint + + prev_env = deepcopy(os.environ) + # check if parallelism if off + # from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22 + if prev_env.get("TOKENIZERS_PARALLELISM", "false").lower() not in ( + "", + "off", + "false", + "f", + "no", + "n", + "0", + ): + logger.warning("Setting TOKENIZERS_PARALLELISM=false for forked processes.") + os.environ["TOKENIZERS_PARALLELISM"] = "false" + shards = [ + self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory) + for rank in range(num_proc) + ] + kwargs_per_job = [ + { + **dataset_kwargs, + "shard": shards[rank], + "cache_file_name": format_cache_file_name(cache_file_name, rank), + "rank": rank, + "offset": sum(len(s) for s in shards[:rank]), + "new_fingerprint": format_new_fingerprint(new_fingerprint, rank), + } + for rank in range(num_shards) + ] + + transformed_shards = [None] * num_shards + for rank in range(num_shards): + try: + transformed_shards[rank] = load_processed_shard_from_cache(kwargs_per_job[rank]) + kwargs_per_job[rank] = None + except NonExistentDatasetError: + pass + + kwargs_per_job = [kwargs for kwargs in kwargs_per_job if kwargs is not None] + + # We try to create a pool with as many workers as dataset not yet cached. + if kwargs_per_job: + if len(kwargs_per_job) < num_shards: + logger.info( + f"Reprocessing {len(kwargs_per_job)}/{num_shards} shards because some of them were missing from the cache." + ) + with Pool(len(kwargs_per_job)) as pool: + os.environ = prev_env + logger.info(f"Spawning {num_proc} processes") + with hf_tqdm( + unit=" examples", + total=pbar_total, + desc=(desc or "Map") + f" (num_proc={num_proc})", + ) as pbar: + for rank, done, content in iflatmap_unordered( + pool, Dataset._map_single, kwargs_iterable=kwargs_per_job + ): + if done: + shards_done += 1 + logger.debug(f"Finished processing shard number {rank} of {num_shards}.") + transformed_shards[rank] = content + else: + pbar.update(content) + # Avoids PermissionError on Windows (the error: https://github.com/huggingface/datasets/actions/runs/4026734820/jobs/6921621805) + for kwargs in kwargs_per_job: + del kwargs["shard"] + else: + logger.info(f"Loading cached processed dataset at {format_cache_file_name(cache_file_name, '*')}") + assert ( + None not in transformed_shards + ), f"Failed to retrieve results from map: result list {transformed_shards} still contains None - at least one worker failed to return its results" + logger.info(f"Concatenating {num_proc} shards") + result = _concatenate_map_style_datasets(transformed_shards) + # update fingerprint if the dataset changed + if any( + transformed_shard._fingerprint != shard._fingerprint + for transformed_shard, shard in zip(transformed_shards, shards) + ): + result._fingerprint = new_fingerprint + else: + result._fingerprint = self._fingerprint + return result + + @staticmethod + def _map_single( + shard: "Dataset", + function: Optional[Callable] = None, + with_indices: bool = False, + with_rank: bool = False, + input_columns: Optional[List[str]] = None, + batched: bool = False, + batch_size: Optional[int] = 1000, + drop_last_batch: bool = False, + remove_columns: Optional[List[str]] = None, + keep_in_memory: bool = False, + cache_file_name: Optional[str] = None, + writer_batch_size: Optional[int] = 1000, + features: Optional[Features] = None, + disable_nullable: bool = False, + fn_kwargs: Optional[dict] = None, + new_fingerprint: Optional[str] = None, + rank: Optional[int] = None, + offset: int = 0, + ) -> Iterable[Tuple[int, bool, Union[int, "Dataset"]]]: + """Apply a function to all the elements in the table (individually or in batches) + and update the table (if function does update examples). + + Args: + shard (`datasets.Dataset`): Dataset to map the transform on. + function (`Callable`): with one of the following signature: + - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` + - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) + - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False` + - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) + + For advanced usage, the function can also return a `pyarrow.Table`. + Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. + If no function is provided, default to identity function: lambda x: x + with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. + with_rank (`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. + input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as + positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. + batched (`bool`, defaults to `False`): Provide batch of examples to `function` + batch_size (`int`, optional, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True` + `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function` + drop_last_batch (`bool`, default: `False`): Whether a last batch smaller than the batch_size should be + dropped instead of being processed by the function. + remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping. + Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding + columns with names in `remove_columns`, these columns will be kept. + keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. + cache_file_name (`str`, optional, defaults to `None`): Provide the name of a path for the cache file. It is used to store the + results of the computation instead of the automatically generated cache file name. + writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. + features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file + instead of the automatically generated one. + disable_nullable (`bool`, defaults to `False`): Disallow null values in the table. + fn_kwargs (`Dict`, optional, defaults to `None`): Keyword arguments to be passed to `function` + new_fingerprint (`str`, optional, defaults to `None`): the new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments + rank: (`int`, optional, defaults to `None`): If specified, this is the process rank when doing multiprocessing + offset: (`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`. + """ + if fn_kwargs is None: + fn_kwargs = {} + + # If we do batch computation but no batch size is provided, default to the full dataset + if batched and (batch_size is None or batch_size <= 0): + batch_size = shard.num_rows + + # We set this variable to True after processing the first example/batch in + # `apply_function_on_filtered_inputs` if the map function returns a dict. + # If set to False, no new arrow table will be created + + update_data = None + + format_kwargs = shard._format_kwargs.copy() + # Lazy formatting is only available for the default format (None/python) + if not input_columns and shard._format_type is None: + format_kwargs["lazy"] = True + input_formatter = get_formatter( + shard._format_type, + features=shard.features, + **format_kwargs, + ) + + class NumExamplesMismatchError(Exception): + pass + + def validate_function_output(processed_inputs, indices): + """Validate output of the map function.""" + if processed_inputs is not None and not isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame)): + raise TypeError( + f"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects." + ) + elif isinstance(indices, list) and isinstance(processed_inputs, Mapping): + allowed_batch_return_types = (list, np.ndarray, pd.Series) + if config.TF_AVAILABLE and "tensorflow" in sys.modules: + import tensorflow as tf + + allowed_batch_return_types += (tf.Tensor,) + if config.TORCH_AVAILABLE and "torch" in sys.modules: + import torch + + allowed_batch_return_types += (torch.Tensor,) + if config.JAX_AVAILABLE and "jax" in sys.modules: + import jax.numpy as jnp + + allowed_batch_return_types += (jnp.ndarray,) + all_dict_values_are_lists = all( + isinstance(value, allowed_batch_return_types) for value in processed_inputs.values() + ) + if all_dict_values_are_lists is False: + raise TypeError( + f"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`." + ) + + def apply_function_on_filtered_inputs(pa_inputs, indices, check_same_num_examples=False, offset=0): + """Utility to apply the function on a selection of columns.""" + nonlocal update_data + inputs = format_table( + pa_inputs, + 0 if not batched else range(pa_inputs.num_rows), + format_columns=input_columns, + formatter=input_formatter, + ) + fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns] + if offset == 0: + effective_indices = indices + else: + effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset + additional_args = () + if with_indices: + additional_args += (effective_indices,) + if with_rank: + additional_args += (rank,) + processed_inputs = function(*fn_args, *additional_args, **fn_kwargs) + if isinstance(processed_inputs, LazyDict): + processed_inputs = { + k: v for k, v in processed_inputs.data.items() if k not in processed_inputs.keys_to_format + } + returned_lazy_dict = True + else: + returned_lazy_dict = False + if update_data is None: + # Check if the function returns updated examples + update_data = isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame)) + validate_function_output(processed_inputs, indices) + if not update_data: + return None # Nothing to update, let's move on + if shard._format_type or input_columns: + # TODO(QL, MS): ideally the behavior should be the same even if the dataset is formatted (may require major release) + inputs_to_merge = dict(zip(pa_inputs.column_names, pa_inputs.itercolumns())) + elif isinstance(inputs, LazyDict): + inputs_to_merge = { + k: (v if k not in inputs.keys_to_format else pa_inputs[k]) for k, v in inputs.data.items() + } + else: + inputs_to_merge = inputs + if remove_columns is not None: + for column in remove_columns: + # `function` can modify input in-place causing column to be already removed. + if column in inputs_to_merge: + inputs_to_merge.pop(column) + if returned_lazy_dict and column in processed_inputs: + processed_inputs.pop(column) + if check_same_num_examples: + input_num_examples = len(pa_inputs) + processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))]) + if input_num_examples != processed_inputs_num_examples: + raise NumExamplesMismatchError() + if isinstance(inputs, Mapping) and isinstance(processed_inputs, Mapping): + # The .map() transform *updates* the dataset: + # the output dictionary contains both the the input data and the output data. + # The output dictionary may contain Arrow values from `inputs_to_merge` so that we can re-write them efficiently. + return {**inputs_to_merge, **processed_inputs} + else: + return processed_inputs + + def init_buffer_and_writer(): + # Prepare output buffer and batched writer in memory or on file if we update the table + writer_features = features + if writer_features is None: + writer_features = shard.features + update_features = True + else: + update_features = False + if keep_in_memory or cache_file_name is None: + buf_writer = pa.BufferOutputStream() + tmp_file = None + writer = ArrowWriter( + features=writer_features, + stream=buf_writer, + writer_batch_size=writer_batch_size, + update_features=update_features, + fingerprint=new_fingerprint, + disable_nullable=disable_nullable, + ) + else: + buf_writer = None + logger.info(f"Caching processed dataset at {cache_file_name}") + tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(cache_file_name), delete=False) + writer = ArrowWriter( + features=writer_features, + path=tmp_file.name, + writer_batch_size=writer_batch_size, + update_features=update_features, + fingerprint=new_fingerprint, + disable_nullable=disable_nullable, + ) + return buf_writer, writer, tmp_file + + num_examples_progress_update = 0 + # If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer` + buf_writer, writer, tmp_file = None, None, None + + # Optionally initialize the writer as a context manager + with contextlib.ExitStack() as stack: + try: + arrow_formatted_shard = shard.with_format("arrow") + + # Loop over single examples or batches and write to buffer/file if examples are to be updated + if not batched: + shard_iterable = enumerate(arrow_formatted_shard) + else: + num_rows = len(shard) if not drop_last_batch else len(shard) // batch_size * batch_size + shard_iterable = zip( + range(0, num_rows, batch_size), + arrow_formatted_shard.iter(batch_size, drop_last_batch=drop_last_batch), + ) + if not batched: + _time = time.time() + for i, example in shard_iterable: + example = apply_function_on_filtered_inputs(example, i, offset=offset) + if update_data: + if i == 0: + buf_writer, writer, tmp_file = init_buffer_and_writer() + stack.enter_context(writer) + if isinstance(example, pa.Table): + writer.write_row(example) + elif isinstance(example, pd.DataFrame): + writer.write_row(pa.Table.from_pandas(example)) + else: + writer.write(example) + num_examples_progress_update += 1 + if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: + _time = time.time() + yield rank, False, num_examples_progress_update + num_examples_progress_update = 0 + else: + _time = time.time() + for i, batch in shard_iterable: + num_examples_in_batch = len(batch) + indices = list( + range(*(slice(i, i + batch_size).indices(shard.num_rows))) + ) # Something simpler? + try: + batch = apply_function_on_filtered_inputs( + batch, + indices, + check_same_num_examples=len(shard.list_indexes()) > 0, + offset=offset, + ) + except NumExamplesMismatchError: + raise DatasetTransformationNotAllowedError( + "Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it." + ) from None + if update_data: + if i == 0: + buf_writer, writer, tmp_file = init_buffer_and_writer() + stack.enter_context(writer) + if isinstance(batch, pa.Table): + writer.write_table(batch) + elif isinstance(batch, pd.DataFrame): + writer.write_table(pa.Table.from_pandas(batch)) + else: + writer.write_batch(batch) + num_examples_progress_update += num_examples_in_batch + if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: + _time = time.time() + yield rank, False, num_examples_progress_update + num_examples_progress_update = 0 + if update_data and writer is not None: + writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file + except (Exception, KeyboardInterrupt): + yield rank, False, num_examples_progress_update + if update_data: + if writer is not None: + writer.finalize() + if tmp_file is not None: + tmp_file.close() + if os.path.exists(tmp_file.name): + os.remove(tmp_file.name) + raise + + yield rank, False, num_examples_progress_update + if update_data and tmp_file is not None: + tmp_file.close() + shutil.move(tmp_file.name, cache_file_name) + umask = os.umask(0o666) + os.umask(umask) + os.chmod(cache_file_name, 0o666 & ~umask) + + if update_data: + # Create new Dataset from buffer or file + info = shard.info.copy() + info.features = writer._features + info.task_templates = None + if buf_writer is None: + yield rank, True, Dataset.from_file(cache_file_name, info=info, split=shard.split) + else: + yield rank, True, Dataset.from_buffer(buf_writer.getvalue(), info=info, split=shard.split) + else: + yield rank, True, shard + + @transmit_format + @fingerprint_transform( + inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name", "desc"], version="2.0.1" + ) + def filter( + self, + function: Optional[Callable] = None, + with_indices=False, + input_columns: Optional[Union[str, List[str]]] = None, + batched: bool = False, + batch_size: Optional[int] = 1000, + keep_in_memory: bool = False, + load_from_cache_file: Optional[bool] = None, + cache_file_name: Optional[str] = None, + writer_batch_size: Optional[int] = 1000, + fn_kwargs: Optional[dict] = None, + num_proc: Optional[int] = None, + suffix_template: str = "_{rank:05d}_of_{num_proc:05d}", + new_fingerprint: Optional[str] = None, + desc: Optional[str] = None, + ) -> "Dataset": + """Apply a filter function to all the elements in the table in batches + and update the table so that the dataset only includes examples according to the filter function. + + Args: + function (`Callable`): Callable with one of the following signatures: + + - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False` + - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False` + - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True` + - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True` + + If no function is provided, defaults to an always `True` function: `lambda x: True`. + with_indices (`bool`, defaults to `False`): + Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. + input_columns (`str` or `List[str]`, *optional*): + The columns to be passed into `function` as + positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument. + batched (`bool`, defaults to `False`): + Provide batch of examples to `function`. + batch_size (`int`, *optional*, defaults to `1000`): + Number of examples per batch provided to `function` if + `batched = True`. If `batched = False`, one example per batch is passed to `function`. + If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`. + keep_in_memory (`bool`, defaults to `False`): + Keep the dataset in memory instead of writing it to a cache file. + load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): + If a cache file storing the current computation from `function` + can be identified, use it instead of recomputing. + cache_file_name (`str`, *optional*): + Provide the name of a path for the cache file. It is used to store the + results of the computation instead of the automatically generated cache file name. + writer_batch_size (`int`, defaults to `1000`): + Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. + fn_kwargs (`dict`, *optional*): + Keyword arguments to be passed to `function`. + num_proc (`int`, *optional*): + Number of processes for multiprocessing. By default it doesn't + use multiprocessing. + suffix_template (`str`): + If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each. + For example, if `cache_file_name` is `"processed.arrow"`, then for `rank = 1` and `num_proc = 4`, + the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix (default + `_{rank:05d}_of_{num_proc:05d}`). + new_fingerprint (`str`, *optional*): + The new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. + desc (`str`, *optional*, defaults to `None`): + Meaningful description to be displayed alongside with the progress bar while filtering examples. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.filter(lambda x: x["label"] == 1) + Dataset({ + features: ['text', 'label'], + num_rows: 533 + }) + ``` + """ + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError( + "Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`" + ) + + if function is None: + function = lambda x: True # noqa: E731 + + if len(self) == 0: + return self + + indices = self.map( + function=partial( + get_indices_from_mask_function, function, batched, with_indices, input_columns, self._indices + ), + with_indices=True, + features=Features({"indices": Value("uint64")}), + batched=True, + batch_size=batch_size, + remove_columns=self.column_names, + keep_in_memory=keep_in_memory, + load_from_cache_file=load_from_cache_file, + cache_file_name=cache_file_name, + writer_batch_size=writer_batch_size, + fn_kwargs=fn_kwargs, + num_proc=num_proc, + suffix_template=suffix_template, + new_fingerprint=new_fingerprint, + input_columns=input_columns, + desc=desc or "Filter", + ) + new_dataset = copy.deepcopy(self) + new_dataset._indices = indices.data + new_dataset._fingerprint = new_fingerprint + return new_dataset + + @transmit_format + @fingerprint_transform(inplace=False, ignore_kwargs=["cache_file_name"]) + def flatten_indices( + self, + keep_in_memory: bool = False, + cache_file_name: Optional[str] = None, + writer_batch_size: Optional[int] = 1000, + features: Optional[Features] = None, + disable_nullable: bool = False, + num_proc: Optional[int] = None, + new_fingerprint: Optional[str] = None, + ) -> "Dataset": + """Create and cache a new Dataset by flattening the indices mapping. + + Args: + keep_in_memory (`bool`, defaults to `False`): + Keep the dataset in memory instead of writing it to a cache file. + cache_file_name (`str`, *optional*, default `None`): + Provide the name of a path for the cache file. It is used to store the + results of the computation instead of the automatically generated cache file name. + writer_batch_size (`int`, defaults to `1000`): + Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. + features (`Optional[datasets.Features]`, defaults to `None`): + Use a specific [`Features`] to store the cache file + instead of the automatically generated one. + disable_nullable (`bool`, defaults to `False`): + Allow null values in the table. + num_proc (`int`, optional, default `None`): + Max number of processes when generating cache. Already cached shards are loaded sequentially + new_fingerprint (`str`, *optional*, defaults to `None`): + The new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments + """ + + return self.map( + batched=True, # for speed + keep_in_memory=keep_in_memory, + cache_file_name=cache_file_name, + writer_batch_size=writer_batch_size, + features=features, + disable_nullable=disable_nullable, + new_fingerprint=new_fingerprint, + desc="Flattening the indices", + num_proc=num_proc, + ) + + def _new_dataset_with_indices( + self, + indices_cache_file_name: Optional[str] = None, + indices_buffer: Optional[pa.Buffer] = None, + fingerprint: Optional[str] = None, + ) -> "Dataset": + """Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the + current Dataset. + """ + + if indices_cache_file_name is None and indices_buffer is None: + raise ValueError("At least one of indices_cache_file_name or indices_buffer must be provided.") + + if fingerprint is None: + raise ValueError("please specify a fingerprint for the dataset with indices") + + if indices_cache_file_name is not None: + indices_table = MemoryMappedTable.from_file(indices_cache_file_name) + else: + indices_table = InMemoryTable.from_buffer(indices_buffer) + + # Return new Dataset object + # don't forget to copy the objects + return Dataset( + self._data, + info=self.info.copy(), + split=self.split, + indices_table=indices_table, + fingerprint=fingerprint, + ) + + @transmit_format + @fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"]) + def select( + self, + indices: Iterable, + keep_in_memory: bool = False, + indices_cache_file_name: Optional[str] = None, + writer_batch_size: Optional[int] = 1000, + new_fingerprint: Optional[str] = None, + ) -> "Dataset": + """Create a new dataset with rows selected following the list/array of indices. + + Args: + indices (`range`, `list`, `iterable`, `ndarray` or `Series`): + Range, list or 1D-array of integer indices for indexing. + If the indices correspond to a contiguous range, the Arrow table is simply sliced. + However passing a list of indices that are not contiguous creates indices mapping, which is much less efficient, + but still faster than recreating an Arrow table made of the requested rows. + keep_in_memory (`bool`, defaults to `False`): + Keep the indices mapping in memory instead of writing it to a cache file. + indices_cache_file_name (`str`, *optional*, defaults to `None`): + Provide the name of a path for the cache file. It is used to store the + indices mapping instead of the automatically generated cache file name. + writer_batch_size (`int`, defaults to `1000`): + Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. + new_fingerprint (`str`, *optional*, defaults to `None`): + The new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.select(range(4)) + Dataset({ + features: ['text', 'label'], + num_rows: 4 + }) + ``` + """ + if keep_in_memory and indices_cache_file_name is not None: + raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") + + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError( + "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." + ) + + # If the array is empty we do nothing + if len(self) == 0: + return self + + # If indices is a PyArrow array, we convert to NumPy + if isinstance(indices, (pa.Array, pa.ChunkedArray)): + indices = indices.to_numpy().astype(np.int64) + + # Convert generator objects to lists + if isinstance(indices, Iterator): + indices = list(indices) + + # If the indices are contiguous, simply slice the arrow table + if isinstance(indices, range): + if _is_range_contiguous(indices) and indices.start >= 0: + start, length = indices.start, indices.stop - indices.start + return self._select_contiguous(start, length, new_fingerprint=new_fingerprint) + else: + try: + start = next(iter(indices)) + except StopIteration: + # if `indices` is an empty iterable, we return an empty dataset + return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint) + if start >= 0: + counter_from_start = itertools.count(start=start) + if all(i == j for i, j in zip(indices, counter_from_start)): + length = next(counter_from_start) - start + return self._select_contiguous(start, length, new_fingerprint=new_fingerprint) + + # If not contiguous, we need to create a new indices mapping + return self._select_with_indices_mapping( + indices, + keep_in_memory=keep_in_memory, + indices_cache_file_name=indices_cache_file_name, + writer_batch_size=writer_batch_size, + new_fingerprint=new_fingerprint, + ) + + @transmit_format + @fingerprint_transform(inplace=False) + def _select_contiguous( + self, + start: int, + length: int, + new_fingerprint: Optional[str] = None, + ) -> "Dataset": + """Create a new dataset with rows from a contiguous slice of data. + The slice is defined by that start index and its length. + + Args: + start (`int`): start index. + length (`int`): length of the slice to select. + new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds._select_contiguous(0, 4) + Dataset({ + features: ['text', 'label'], + num_rows: 4 + }) + ``` + """ + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError( + "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." + ) + + # If the array is empty we do nothing + if len(self) == 0: + return self + + _check_valid_indices_value(start, len(self)) + _check_valid_indices_value(start + length - 1, len(self)) + if self._indices is None or length == 0: + return Dataset( + self.data.slice(start, length), + info=self.info.copy(), + split=self.split, + fingerprint=new_fingerprint, + ) + else: + return Dataset( + self.data, + info=self.info.copy(), + split=self.split, + indices_table=self._indices.slice(start, length), + fingerprint=new_fingerprint, + ) + + @transmit_format + @fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"]) + def _select_with_indices_mapping( + self, + indices: Iterable, + keep_in_memory: bool = False, + indices_cache_file_name: Optional[str] = None, + writer_batch_size: Optional[int] = 1000, + new_fingerprint: Optional[str] = None, + ) -> "Dataset": + """Create a new dataset with rows selected following the list/array of indices. + The new dataset is made by creating a new indices mapping on top of the main arrow table. + + Args: + indices (sequence, iterable, range, ndarray or Series): List or 1D-array of integer indices for indexing. + keep_in_memory (`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file. + indices_cache_file_name (`str`, optional, default `None`): Provide the name of a path for the cache file. It is used to store the + indices mapping instead of the automatically generated cache file name. + writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. + new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds._select_with_indices_mapping(range(4)) + Dataset({ + features: ['text', 'label'], + num_rows: 4 + }) + ``` + """ + if keep_in_memory and indices_cache_file_name is not None: + raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") + + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError( + "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." + ) + + # If the array is empty we do nothing + if len(self) == 0: + return self + + # Prepare the writer for our indices arrow table + if keep_in_memory or indices_cache_file_name is None: + buf_writer = pa.BufferOutputStream() + tmp_file = None + writer = ArrowWriter( + stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices" + ) + else: + buf_writer = None + logger.info(f"Caching indices mapping at {indices_cache_file_name}") + tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(indices_cache_file_name), delete=False) + writer = ArrowWriter( + path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices" + ) + + indices = indices if isinstance(indices, list) else list(indices) + + size = len(self) + if indices: + _check_valid_indices_value(int(max(indices)), size=size) + _check_valid_indices_value(int(min(indices)), size=size) + else: + return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint) + + indices_array = pa.array(indices, type=pa.uint64()) + # Check if we need to convert indices + if self._indices is not None: + indices_array = self._indices.column(0).take(indices_array) + + indices_table = pa.Table.from_arrays([indices_array], names=["indices"]) + + with writer: + try: + writer.write_table(indices_table) + writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file + except (Exception, KeyboardInterrupt): + if tmp_file is not None: + tmp_file.close() + if os.path.exists(tmp_file.name): + os.remove(tmp_file.name) + raise + + if tmp_file is not None: + tmp_file.close() + shutil.move(tmp_file.name, indices_cache_file_name) + umask = os.umask(0o666) + os.umask(umask) + os.chmod(indices_cache_file_name, 0o666 & ~umask) + + # Return new Dataset object + if buf_writer is None: + return self._new_dataset_with_indices( + indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint + ) + else: + return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint) + + @transmit_format + @fingerprint_transform(inplace=False, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"]) + def sort( + self, + column_names: Union[str, Sequence_[str]], + reverse: Union[bool, Sequence_[bool]] = False, + kind="deprecated", + null_placement: str = "at_end", + keep_in_memory: bool = False, + load_from_cache_file: Optional[bool] = None, + indices_cache_file_name: Optional[str] = None, + writer_batch_size: Optional[int] = 1000, + new_fingerprint: Optional[str] = None, + ) -> "Dataset": + """Create a new dataset sorted according to a single or multiple columns. + + Args: + column_names (`Union[str, Sequence[str]]`): + Column name(s) to sort by. + reverse (`Union[bool, Sequence[bool]]`, defaults to `False`): + If `True`, sort by descending order rather than ascending. If a single bool is provided, + the value is applied to the sorting of all column names. Otherwise a list of bools with the + same length and order as column_names must be provided. + kind (`str`, *optional*): + Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`, + The default is `quicksort`. Note that both `stable` and `mergesort` use `timsort` under the covers and, in general, + the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility. + + + `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0. + + + null_placement (`str`, defaults to `at_end`): + Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last` + + + keep_in_memory (`bool`, defaults to `False`): + Keep the sorted indices in memory instead of writing it to a cache file. + load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): + If a cache file storing the sorted indices + can be identified, use it instead of recomputing. + indices_cache_file_name (`str`, *optional*, defaults to `None`): + Provide the name of a path for the cache file. It is used to store the + sorted indices instead of the automatically generated cache file name. + writer_batch_size (`int`, defaults to `1000`): + Number of rows per write operation for the cache file writer. + Higher value gives smaller cache files, lower value consume less temporary memory. + new_fingerprint (`str`, *optional*, defaults to `None`): + The new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset('rotten_tomatoes', split='validation') + >>> ds['label'][:10] + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + >>> sorted_ds = ds.sort('label') + >>> sorted_ds['label'][:10] + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False]) + >>> another_sorted_ds['label'][:10] + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + ``` + """ + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError( + "Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." + ) + # If the array is empty we do nothing + if len(self) == 0: + return self + + # Deprecation warning + if kind != "deprecated": + warnings.warn( + "'kind' was deprecated in version 2.10.0 and will be removed in 3.0.0.", + category=FutureWarning, + ) + + # Check proper format of and for duplicates in column_names + if isinstance(column_names, str): + column_names = [column_names] + + # Check proper format and length of reverse + if not isinstance(reverse, bool): + if len(reverse) != len(column_names): + raise ValueError( + "Parameter 'reverse' should be either a boolean or a list of booleans with the same length as 'column_names'." + ) + else: + reverse = [reverse] * len(column_names) + + # Check whether column name(s) exist in dataset + for column in column_names: + if not isinstance(column, str) or column not in self._data.column_names: + raise ValueError( + f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}" + ) + + # Change null_placement to conform to pyarrow's sort_indices() while ensuring backwards compatability + if null_placement not in ["at_start", "at_end"]: + if null_placement == "first": + null_placement = "at_start" + elif null_placement == "last": + null_placement = "at_end" + else: + raise ValueError( + f"null_placement '{null_placement}' is an invalid parameter value. Must be either 'last', 'at_end', 'first' or 'at_start'." + ) + + load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() + + # Check if we've already cached this computation (indexed by a hash) + if self.cache_files: + if indices_cache_file_name is None: + # we create a unique hash from the function, current dataset file and the mapping args + indices_cache_file_name = self._get_cache_file_path(new_fingerprint) + if os.path.exists(indices_cache_file_name) and load_from_cache_file: + logger.info(f"Loading cached sorted indices for dataset at {indices_cache_file_name}") + return self._new_dataset_with_indices( + fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name + ) + + sort_table = query_table( + table=self._data, + key=slice(0, len(self)), + indices=self._indices if self._indices is not None else None, + ) + + sort_keys = [ + (col, "ascending" if not col_reverse else "descending") for col, col_reverse in zip(column_names, reverse) + ] + + indices = pc.sort_indices(sort_table, sort_keys=sort_keys, null_placement=null_placement) + + return self.select( + indices=indices, + keep_in_memory=keep_in_memory, + indices_cache_file_name=indices_cache_file_name, + writer_batch_size=writer_batch_size, + new_fingerprint=new_fingerprint, + ) + + @transmit_format + @fingerprint_transform( + inplace=False, randomized_function=True, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"] + ) + def shuffle( + self, + seed: Optional[int] = None, + generator: Optional[np.random.Generator] = None, + keep_in_memory: bool = False, + load_from_cache_file: Optional[bool] = None, + indices_cache_file_name: Optional[str] = None, + writer_batch_size: Optional[int] = 1000, + new_fingerprint: Optional[str] = None, + ) -> "Dataset": + """Create a new Dataset where the rows are shuffled. + + Currently shuffling uses numpy random generators. + You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64). + + Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping. + However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower. + This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore. + To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping. + This may take a lot of time depending of the size of your dataset though: + + ```python + my_dataset[0] # fast + my_dataset = my_dataset.shuffle(seed=42) + my_dataset[0] # up to 10x slower + my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data + my_dataset[0] # fast again + ``` + + In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`]. + It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal: + + ```python + my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=128) + for example in enumerate(my_iterable_dataset): # fast + pass + + shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100) + + for example in enumerate(shuffled_iterable_dataset): # as fast as before + pass + ``` + + Args: + seed (`int`, *optional*): + A seed to initialize the default BitGenerator if `generator=None`. + If `None`, then fresh, unpredictable entropy will be pulled from the OS. + If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. + generator (`numpy.random.Generator`, *optional*): + Numpy random Generator to use to compute the permutation of the dataset rows. + If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). + keep_in_memory (`bool`, default `False`): + Keep the shuffled indices in memory instead of writing it to a cache file. + load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): + If a cache file storing the shuffled indices + can be identified, use it instead of recomputing. + indices_cache_file_name (`str`, *optional*): + Provide the name of a path for the cache file. It is used to store the + shuffled indices instead of the automatically generated cache file name. + writer_batch_size (`int`, defaults to `1000`): + Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. + new_fingerprint (`str`, *optional*, defaults to `None`): + The new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds['label'][:10] + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + + # set a seed + >>> shuffled_ds = ds.shuffle(seed=42) + >>> shuffled_ds['label'][:10] + [1, 0, 1, 1, 0, 0, 0, 0, 0, 0] + ``` + """ + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError( + "Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." + ) + # If the array is empty we do nothing + if len(self) == 0: + return self + + if keep_in_memory and indices_cache_file_name is not None: + raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") + + if seed is not None and generator is not None: + raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.") + + if generator is not None and not isinstance(generator, np.random.Generator): + raise ValueError("The provided generator must be an instance of numpy.random.Generator") + + load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() + + if generator is None: + if seed is None: + _, seed, pos, *_ = np.random.get_state() + seed = seed[pos] if pos < 624 else seed[0] + _ = np.random.random() # do 1 step of rng + generator = np.random.default_rng(seed) + + # Check if we've already cached this computation (indexed by a hash) + if self.cache_files: + if indices_cache_file_name is None: + # we create a unique hash from the function, current dataset file and the mapping args + indices_cache_file_name = self._get_cache_file_path(new_fingerprint) + if os.path.exists(indices_cache_file_name) and load_from_cache_file: + logger.info(f"Loading cached shuffled indices for dataset at {indices_cache_file_name}") + return self._new_dataset_with_indices( + fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name + ) + + permutation = generator.permutation(len(self)) + + return self.select( + indices=permutation, + keep_in_memory=keep_in_memory, + indices_cache_file_name=indices_cache_file_name if not keep_in_memory else None, + writer_batch_size=writer_batch_size, + new_fingerprint=new_fingerprint, + ) + + @transmit_format + @fingerprint_transform( + inplace=False, + randomized_function=True, + fingerprint_names=["train_new_fingerprint", "test_new_fingerprint"], + ignore_kwargs=["load_from_cache_file", "train_indices_cache_file_name", "test_indices_cache_file_name"], + ) + def train_test_split( + self, + test_size: Union[float, int, None] = None, + train_size: Union[float, int, None] = None, + shuffle: bool = True, + stratify_by_column: Optional[str] = None, + seed: Optional[int] = None, + generator: Optional[np.random.Generator] = None, + keep_in_memory: bool = False, + load_from_cache_file: Optional[bool] = None, + train_indices_cache_file_name: Optional[str] = None, + test_indices_cache_file_name: Optional[str] = None, + writer_batch_size: Optional[int] = 1000, + train_new_fingerprint: Optional[str] = None, + test_new_fingerprint: Optional[str] = None, + ) -> "DatasetDict": + """Return a dictionary ([`datasets.DatasetDict`]) with two random train and test subsets (`train` and `test` `Dataset` splits). + Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`. + + This method is similar to scikit-learn `train_test_split`. + + Args: + test_size (`numpy.random.Generator`, *optional*): + Size of the test split + If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the test split. + If `int`, represents the absolute number of test samples. + If `None`, the value is set to the complement of the train size. + If `train_size` is also `None`, it will be set to `0.25`. + train_size (`numpy.random.Generator`, *optional*): + Size of the train split + If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the train split. + If `int`, represents the absolute number of train samples. + If `None`, the value is automatically set to the complement of the test size. + shuffle (`bool`, *optional*, defaults to `True`): + Whether or not to shuffle the data before splitting. + stratify_by_column (`str`, *optional*, defaults to `None`): + The column name of labels to be used to perform stratified split of data. + seed (`int`, *optional*): + A seed to initialize the default BitGenerator if `generator=None`. + If `None`, then fresh, unpredictable entropy will be pulled from the OS. + If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. + generator (`numpy.random.Generator`, *optional*): + Numpy random Generator to use to compute the permutation of the dataset rows. + If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). + keep_in_memory (`bool`, defaults to `False`): + Keep the splits indices in memory instead of writing it to a cache file. + load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): + If a cache file storing the splits indices + can be identified, use it instead of recomputing. + train_cache_file_name (`str`, *optional*): + Provide the name of a path for the cache file. It is used to store the + train split indices instead of the automatically generated cache file name. + test_cache_file_name (`str`, *optional*): + Provide the name of a path for the cache file. It is used to store the + test split indices instead of the automatically generated cache file name. + writer_batch_size (`int`, defaults to `1000`): + Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. + train_new_fingerprint (`str`, *optional*, defaults to `None`): + The new fingerprint of the train set after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments + test_new_fingerprint (`str`, *optional*, defaults to `None`): + The new fingerprint of the test set after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds = ds.train_test_split(test_size=0.2, shuffle=True) + DatasetDict({ + train: Dataset({ + features: ['text', 'label'], + num_rows: 852 + }) + test: Dataset({ + features: ['text', 'label'], + num_rows: 214 + }) + }) + + # set a seed + >>> ds = ds.train_test_split(test_size=0.2, seed=42) + + # stratified split + >>> ds = load_dataset("imdb",split="train") + Dataset({ + features: ['text', 'label'], + num_rows: 25000 + }) + >>> ds = ds.train_test_split(test_size=0.2, stratify_by_column="label") + DatasetDict({ + train: Dataset({ + features: ['text', 'label'], + num_rows: 20000 + }) + test: Dataset({ + features: ['text', 'label'], + num_rows: 5000 + }) + }) + ``` + """ + from .dataset_dict import DatasetDict # import here because of circular dependency + + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError( + "Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it." + ) + # If the array is empty we do nothing + if len(self) == 0: + return DatasetDict({"train": self, "test": self}) + + if test_size is None and train_size is None: + test_size = 0.25 + + # Safety checks similar to scikit-learn's ones. + # (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750) + n_samples = len(self) + if ( + isinstance(test_size, int) + and (test_size >= n_samples or test_size <= 0) + or isinstance(test_size, float) + and (test_size <= 0 or test_size >= 1) + ): + raise ValueError( + f"test_size={test_size} should be either positive and smaller " + f"than the number of samples {n_samples} or a float in the (0, 1) range" + ) + + if ( + isinstance(train_size, int) + and (train_size >= n_samples or train_size <= 0) + or isinstance(train_size, float) + and (train_size <= 0 or train_size >= 1) + ): + raise ValueError( + f"train_size={train_size} should be either positive and smaller " + f"than the number of samples {n_samples} or a float in the (0, 1) range" + ) + + if train_size is not None and not isinstance(train_size, (int, float)): + raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}") + if test_size is not None and not isinstance(test_size, (int, float)): + raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}") + + if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1: + raise ValueError( + f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)" + " range. Reduce test_size and/or train_size." + ) + + if isinstance(test_size, float): + n_test = ceil(test_size * n_samples) + elif isinstance(test_size, int): + n_test = float(test_size) + + if isinstance(train_size, float): + n_train = floor(train_size * n_samples) + elif isinstance(train_size, int): + n_train = float(train_size) + + if train_size is None: + n_train = n_samples - n_test + elif test_size is None: + n_test = n_samples - n_train + + if n_train + n_test > n_samples: + raise ValueError( + f"The sum of train_size and test_size = {n_train + n_test}, " + "should be smaller than the number of " + f"samples {n_samples}. Reduce test_size and/or " + "train_size." + ) + + n_train, n_test = int(n_train), int(n_test) + + if n_train == 0: + raise ValueError( + f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the " + "resulting train set will be empty. Adjust any of the " + "aforementioned parameters." + ) + + load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() + + if generator is None and shuffle is True: + if seed is None: + _, seed, pos, *_ = np.random.get_state() + seed = seed[pos] if pos < 624 else seed[0] + _ = np.random.random() # do 1 step of rng + generator = np.random.default_rng(seed) + + # Check if we've already cached this computation (indexed by a hash) + if self.cache_files: + if train_indices_cache_file_name is None or test_indices_cache_file_name is None: + # we create a unique hash from the function, current dataset file and the mapping args + + if train_indices_cache_file_name is None: + train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint) + if test_indices_cache_file_name is None: + test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint) + if ( + os.path.exists(train_indices_cache_file_name) + and os.path.exists(test_indices_cache_file_name) + and load_from_cache_file + ): + logger.info( + f"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}" + ) + return DatasetDict( + { + "train": self._new_dataset_with_indices( + fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name + ), + "test": self._new_dataset_with_indices( + fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name + ), + } + ) + if not shuffle: + if stratify_by_column is not None: + raise ValueError("Stratified train/test split is not implemented for `shuffle=False`") + train_indices = np.arange(n_train) + test_indices = np.arange(n_train, n_train + n_test) + else: + # stratified partition + if stratify_by_column is not None: + if stratify_by_column not in self._info.features.keys(): + raise ValueError(f"Key {stratify_by_column} not found in {self._info.features.keys()}") + if not isinstance(self._info.features[stratify_by_column], ClassLabel): + raise ValueError( + f"Stratifying by column is only supported for {ClassLabel.__name__} column, and column {stratify_by_column} is {type(self._info.features[stratify_by_column]).__name__}." + ) + try: + train_indices, test_indices = next( + stratified_shuffle_split_generate_indices( + self.with_format("numpy")[stratify_by_column], n_train, n_test, rng=generator + ) + ) + except Exception as error: + if str(error) == "Minimum class count error": + raise ValueError( + f"The least populated class in {stratify_by_column} column has only 1" + " member, which is too few. The minimum" + " number of groups for any class cannot" + " be less than 2." + ) + else: + raise error + + # random partition + else: + permutation = generator.permutation(len(self)) + test_indices = permutation[:n_test] + train_indices = permutation[n_test : (n_test + n_train)] + + train_split = self.select( + indices=train_indices, + keep_in_memory=keep_in_memory, + indices_cache_file_name=train_indices_cache_file_name, + writer_batch_size=writer_batch_size, + new_fingerprint=train_new_fingerprint, + ) + test_split = self.select( + indices=test_indices, + keep_in_memory=keep_in_memory, + indices_cache_file_name=test_indices_cache_file_name, + writer_batch_size=writer_batch_size, + new_fingerprint=test_new_fingerprint, + ) + + return DatasetDict({"train": train_split, "test": test_split}) + + def shard( + self, + num_shards: int, + index: int, + contiguous: bool = False, + keep_in_memory: bool = False, + indices_cache_file_name: Optional[str] = None, + writer_batch_size: Optional[int] = 1000, + ) -> "Dataset": + """Return the `index`-nth shard from dataset split into `num_shards` pieces. + + This shards deterministically. `dset.shard(n, i)` will contain all elements of dset whose + index mod `n = i`. + + `dset.shard(n, i, contiguous=True)` will instead split dset into contiguous chunks, + so it can be easily concatenated back together after processing. If `n % i == l`, then the + first `l` shards will have length `(n // i) + 1`, and the remaining shards will have length `(n // i)`. + `datasets.concatenate([dset.shard(n, i, contiguous=True) for i in range(n)])` will return + a dataset with the same order as the original. + + Be sure to shard before using any randomizing operator (such as `shuffle`). + It is best if the shard operator is used early in the dataset pipeline. + + + Args: + num_shards (`int`): + How many shards to split the dataset into. + index (`int`): + Which shard to select and return. + contiguous: (`bool`, defaults to `False`): + Whether to select contiguous blocks of indices for shards. + keep_in_memory (`bool`, defaults to `False`): + Keep the dataset in memory instead of writing it to a cache file. + indices_cache_file_name (`str`, *optional*): + Provide the name of a path for the cache file. It is used to store the + indices of each shard instead of the automatically generated cache file name. + writer_batch_size (`int`, defaults to `1000`): + Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds + Dataset({ + features: ['text', 'label'], + num_rows: 1066 + }) + >>> ds.shard(num_shards=2, index=0) + Dataset({ + features: ['text', 'label'], + num_rows: 533 + }) + ``` + """ + if not 0 <= index < num_shards: + raise ValueError("index should be in [0, num_shards-1]") + if contiguous: + div = len(self) // num_shards + mod = len(self) % num_shards + start = div * index + min(index, mod) + end = start + div + (1 if index < mod else 0) + indices = range(start, end) + else: + indices = np.arange(index, len(self), num_shards) + + return self.select( + indices=indices, + keep_in_memory=keep_in_memory, + indices_cache_file_name=indices_cache_file_name, + writer_batch_size=writer_batch_size, + ) + + @deprecated() + def export( + self, + filename: str, + format: str = "tfrecord", + ): + """Writes the Arrow dataset to a TFRecord file. + + The dataset must already be in tensorflow format. The records will be written with + keys from `dataset._format_columns`. + + Args: + filename (`str`): The filename, including the `.tfrecord` extension, to write to. + format (`str`, optional, default `"tfrecord"`): The type of output file. Currently this is a no-op, as + TFRecords are the only option. This enables a more flexible function signature later. + """ + try: + import tensorflow as tf # noqa: F401 + except ImportError: + logger.error("Tensorflow needs to be installed to be able to return Tensorflow tensors.") + + # From https://www.tensorflow.org/tutorials/load_data/tfrecord + def _bytes_feature(values): + """Returns a bytes_list from a list of string / byte.""" + return tf.train.Feature(bytes_list=tf.train.BytesList(value=values)) + + def _float_feature(values): + """Returns a float_list from a list of float / double.""" + return tf.train.Feature(float_list=tf.train.FloatList(value=values)) + + def _int64_feature(values): + """Returns an int64_list from a list of bool / enum / int / uint.""" + return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) + + def _feature(values: Union[float, int, str, np.ndarray, list]) -> "tf.train.Feature": + """Typechecks `values` and returns the corresponding tf.train.Feature.""" + if isinstance(values, list): + if values and isinstance(values[0], str): + return _bytes_feature([v.encode() for v in values]) + else: + raise ValueError(f"values={values} is empty or contains items that cannot be serialized") + elif isinstance(values, np.ndarray): + if values.dtype == np.dtype(float): + return _float_feature(values) + elif values.dtype == np.int64: + return _int64_feature(values) + elif values.dtype == np.dtype(str) or ( + values.dtype == np.dtype(object) and len(values) > 0 and isinstance(values[0], str) + ): + return _bytes_feature([v.encode() for v in values]) + else: + raise ValueError( + f"values={values} is empty or is an np.ndarray with items of dtype {values[0].dtype}, which cannot be serialized" + ) + elif hasattr(values, "dtype"): + if np.issubdtype(values.dtype, np.floating): + return _float_feature([values.item()]) + elif np.issubdtype(values.dtype, np.integer): + return _int64_feature([values.item()]) + elif np.issubdtype(values.dtype, str): + return _bytes_feature([values.item().encode()]) + else: + raise ValueError(f"values={values} has dtype {values.dtype}, which cannot be serialized") + else: + raise ValueError(f"values={values} are not numpy objects or strings, and so cannot be serialized") + + def serialize_example(ex): + feature = {key: _feature(value) for key, value in ex.items()} + example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) + return example_proto.SerializeToString() + + def tf_serialize_example(ex): + tf_string = tf.py_function(serialize_example, (ex,), tf.string) + return tf.reshape(tf_string, ()) + + def generator(): + for ex in self: + yield serialize_example(ex) + + if self._format_type != "numpy": + raise ValueError("Dataset format must be numpy before exporting") + if not filename.endswith(".tfrecord"): + raise ValueError("filename {filename} must end with .tfrecord") + tf_dataset = tf.data.Dataset.from_generator(generator, output_types=tf.string, output_shapes=()) + writer = tf.data.experimental.TFRecordWriter(filename) + logger.info(f"Writing TFRecord to {filename}") + writer.write(tf_dataset) + logger.info(f"Finished writing TFRecord to {filename}") + self = None # delete the dataset reference used by tf_dataset + + def to_csv( + self, + path_or_buf: Union[PathLike, BinaryIO], + batch_size: Optional[int] = None, + num_proc: Optional[int] = None, + **to_csv_kwargs, + ) -> int: + """Exports the dataset to csv + + Args: + path_or_buf (`PathLike` or `FileOrBuffer`): + Either a path to a file or a BinaryIO. + batch_size (`int`, *optional*): + Size of the batch to load in memory and write at once. + Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. + num_proc (`int`, *optional*): + Number of processes for multiprocessing. By default it doesn't + use multiprocessing. `batch_size` in this case defaults to + `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default + value if you have sufficient compute power. + **to_csv_kwargs (additional keyword arguments): + Parameters to pass to pandas's [`pandas.DataFrame.to_csv`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html). + + + + Now, `index` defaults to `False` if not specified. + + If you would like to write the index, pass `index=True` and also set a name for the index column by + passing `index_label`. + + + + Returns: + `int`: The number of characters or bytes written. + + Example: + + ```py + >>> ds.to_csv("path/to/dataset/directory") + ``` + """ + # Dynamic import to avoid circular dependency + from .io.csv import CsvDatasetWriter + + return CsvDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_csv_kwargs).write() + + def to_dict(self, batch_size: Optional[int] = None, batched="deprecated") -> Union[dict, Iterator[dict]]: + """Returns the dataset as a Python dict. Can also return a generator for large datasets. + + Args: + batched (`bool`): + Set to `True` to return a generator that yields the dataset as batches + of `batch_size` rows. Defaults to `False` (returns the whole datasets once). + + + + Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead. + + + + batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`. + Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. + + Returns: + `dict` or `Iterator[dict]` + + Example: + + ```py + >>> ds.to_dict() + ``` + """ + if batched != "deprecated": + warnings.warn( + "'batched' was deprecated in version 2.11.0 and will be removed in version 3.0.0. Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.", + FutureWarning, + ) + else: + batched = False + + if not batched: + return query_table( + table=self._data, + key=slice(0, len(self)), + indices=self._indices if self._indices is not None else None, + ).to_pydict() + else: + batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + return ( + query_table( + table=self._data, + key=slice(offset, offset + batch_size), + indices=self._indices if self._indices is not None else None, + ).to_pydict() + for offset in range(0, len(self), batch_size) + ) + + def to_list(self) -> list: + """Returns the dataset as a Python list. + + Returns: + `list` + + Example: + + ```py + >>> ds.to_list() + ``` + """ + return query_table( + table=self._data, + key=slice(0, len(self)), + indices=self._indices if self._indices is not None else None, + ).to_pylist() + + def to_json( + self, + path_or_buf: Union[PathLike, BinaryIO], + batch_size: Optional[int] = None, + num_proc: Optional[int] = None, + **to_json_kwargs, + ) -> int: + """Export the dataset to JSON Lines or JSON. + + Args: + path_or_buf (`PathLike` or `FileOrBuffer`): + Either a path to a file or a BinaryIO. + batch_size (`int`, *optional*): + Size of the batch to load in memory and write at once. + Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. + num_proc (`int`, *optional*): + Number of processes for multiprocessing. By default it doesn't + use multiprocessing. `batch_size` in this case defaults to + `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default + value if you have sufficient compute power. + **to_json_kwargs (additional keyword arguments): + Parameters to pass to pandas's [`pandas.DataFrame.to_json`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html). + + + + Now, `index` defaults to `False` if `orient` is `"split"` or `"table"`. + + If you would like to write the index, pass `index=True`. + + + + Returns: + `int`: The number of characters or bytes written. + + Example: + + ```py + >>> ds.to_json("path/to/dataset/directory") + ``` + """ + # Dynamic import to avoid circular dependency + from .io.json import JsonDatasetWriter + + return JsonDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_json_kwargs).write() + + def to_pandas( + self, batch_size: Optional[int] = None, batched: bool = False + ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: + """Returns the dataset as a `pandas.DataFrame`. Can also return a generator for large datasets. + + Args: + batched (`bool`): + Set to `True` to return a generator that yields the dataset as batches + of `batch_size` rows. Defaults to `False` (returns the whole datasets once). + batch_size (`int`, *optional*): + The size (number of rows) of the batches if `batched` is `True`. + Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. + + Returns: + `pandas.DataFrame` or `Iterator[pandas.DataFrame]` + + Example: + + ```py + >>> ds.to_pandas() + ``` + """ + if not batched: + return query_table( + table=self._data, + key=slice(0, len(self)), + indices=self._indices if self._indices is not None else None, + ).to_pandas(types_mapper=pandas_types_mapper) + else: + batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + return ( + query_table( + table=self._data, + key=slice(offset, offset + batch_size), + indices=self._indices if self._indices is not None else None, + ).to_pandas(types_mapper=pandas_types_mapper) + for offset in range(0, len(self), batch_size) + ) + + def to_parquet( + self, + path_or_buf: Union[PathLike, BinaryIO], + batch_size: Optional[int] = None, + **parquet_writer_kwargs, + ) -> int: + """Exports the dataset to parquet + + Args: + path_or_buf (`PathLike` or `FileOrBuffer`): + Either a path to a file or a BinaryIO. + batch_size (`int`, *optional*): + Size of the batch to load in memory and write at once. + Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. + **parquet_writer_kwargs (additional keyword arguments): + Parameters to pass to PyArrow's `pyarrow.parquet.ParquetWriter`. + + Returns: + `int`: The number of characters or bytes written. + + Example: + + ```py + >>> ds.to_parquet("path/to/dataset/directory") + ``` + """ + # Dynamic import to avoid circular dependency + from .io.parquet import ParquetDatasetWriter + + return ParquetDatasetWriter(self, path_or_buf, batch_size=batch_size, **parquet_writer_kwargs).write() + + def to_sql( + self, + name: str, + con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], + batch_size: Optional[int] = None, + **sql_writer_kwargs, + ) -> int: + """Exports the dataset to a SQL database. + + Args: + name (`str`): + Name of SQL table. + con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`): + A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) or a SQLite3/SQLAlchemy connection object used to write to a database. + batch_size (`int`, *optional*): + Size of the batch to load in memory and write at once. + Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`. + **sql_writer_kwargs (additional keyword arguments): + Parameters to pass to pandas's [`pandas.DataFrame.to_sql`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_sql.html). + + + + Now, `index` defaults to `False` if not specified. + + If you would like to write the index, pass `index=True` and also set a name for the index column by + passing `index_label`. + + + + Returns: + `int`: The number of records written. + + Example: + + ```py + >>> # con provided as a connection URI string + >>> ds.to_sql("data", "sqlite:///my_own_db.sql") + >>> # con provided as a sqlite3 connection object + >>> import sqlite3 + >>> con = sqlite3.connect("my_own_db.sql") + >>> with con: + ... ds.to_sql("data", con) + ``` + """ + # Dynamic import to avoid circular dependency + from .io.sql import SqlDatasetWriter + + return SqlDatasetWriter(self, name, con, batch_size=batch_size, **sql_writer_kwargs).write() + + def _estimate_nbytes(self) -> int: + dataset_nbytes = self.data.nbytes + + # Find decodable columns, because if there are any, we need to + # adjust the dataset size computation (needed for sharding) to account for possible external files + decodable_columns = [ + k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True) + ] + + if decodable_columns: + # Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples + extra_nbytes = 0 + + def extra_nbytes_visitor(array, feature): + nonlocal extra_nbytes + if isinstance(feature, (Audio, Image)): + for x in array.to_pylist(): + if x is not None and x["bytes"] is None and x["path"] is not None: + size = xgetsize(x["path"]) + extra_nbytes += size + extra_nbytes -= array.field("path").nbytes + + table = self.with_format("arrow")[:1000] + table_visitor(table, extra_nbytes_visitor) + + extra_nbytes = extra_nbytes * len(self.data) / len(table) + dataset_nbytes = dataset_nbytes + extra_nbytes + + if self._indices is not None: + dataset_nbytes = dataset_nbytes * len(self._indices) / len(self.data) + return dataset_nbytes + + @staticmethod + def _generate_tables_from_shards(shards: List["Dataset"], batch_size: int): + for shard_idx, shard in enumerate(shards): + for pa_table in shard.with_format("arrow").iter(batch_size): + yield shard_idx, pa_table + + @staticmethod + def _generate_tables_from_cache_file(filename: str): + for batch_idx, batch in enumerate(_memory_mapped_record_batch_reader_from_file(filename)): + yield batch_idx, pa.Table.from_batches([batch]) + + def to_iterable_dataset(self, num_shards: Optional[int] = 1) -> "IterableDataset": + """Get an [`datasets.IterableDataset`] from a map-style [`datasets.Dataset`]. + This is equivalent to loading a dataset in streaming mode with [`datasets.load_dataset`], but much faster since the data is streamed from local files. + + Contrary to map-style datasets, iterable datasets are lazy and can only be iterated over (e.g. using a for loop). + Since they are read sequentially in training loops, iterable datasets are much faster than map-style datasets. + All the transformations applied to iterable datasets like filtering or processing are done on-the-fly when you start iterating over the dataset. + + Still, it is possible to shuffle an iterable dataset using [`datasets.IterableDataset.shuffle`]. + This is a fast approximate shuffling that works best if you have multiple shards and if you specify a buffer size that is big enough. + + To get the best speed performance, make sure your dataset doesn't have an indices mapping. + If this is the case, the data are not read contiguously, which can be slow sometimes. + You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed before switching to an iterable dataset. + + Args: + num_shards (`int`, default to `1`): + Number of shards to define when instantiating the iterable dataset. This is especially useful for big datasets to be able to shuffle properly, + and also to enable fast parallel loading using a PyTorch DataLoader or in distributed setups for example. + Shards are defined using [`datasets.Dataset.shard`]: it simply slices the data without writing anything on disk. + + Returns: + [`datasets.IterableDataset`] + + Example: + + Basic usage: + ```python + >>> ids = ds.to_iterable_dataset() + >>> for example in ids: + ... pass + ``` + + With lazy filtering and processing: + ```python + >>> ids = ds.to_iterable_dataset() + >>> ids = ids.filter(filter_fn).map(process_fn) # will filter and process on-the-fly when you start iterating over the iterable dataset + >>> for example in ids: + ... pass + ``` + + With sharding to enable efficient shuffling: + ```python + >>> ids = ds.to_iterable_dataset(num_shards=64) # the dataset is split into 64 shards to be iterated over + >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer for fast approximate shuffling when you start iterating + >>> for example in ids: + ... pass + ``` + + With a PyTorch DataLoader: + ```python + >>> import torch + >>> ids = ds.to_iterable_dataset(num_shards=64) + >>> ids = ids.filter(filter_fn).map(process_fn) + >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards to each worker to load, filter and process when you start iterating + >>> for example in ids: + ... pass + ``` + + With a PyTorch DataLoader and shuffling: + ```python + >>> import torch + >>> ids = ds.to_iterable_dataset(num_shards=64) + >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating + >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating + >>> for example in ids: + ... pass + ``` + + In a distributed setup like PyTorch DDP with a PyTorch DataLoader and shuffling + ```python + >>> from datasets.distributed import split_dataset_by_node + >>> ids = ds.to_iterable_dataset(num_shards=512) + >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating + >>> ids = split_dataset_by_node(ds, world_size=8, rank=0) # will keep only 512 / 8 = 64 shards from the shuffled lists of shards when you start iterating + >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from this node's list of shards to each worker when you start iterating + >>> for example in ids: + ... pass + ``` + + With shuffling and multiple epochs: + ```python + >>> ids = ds.to_iterable_dataset(num_shards=64) + >>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating + >>> for epoch in range(n_epochs): + ... ids.set_epoch(epoch) # will use effective_seed = seed + epoch to shuffle the shards and for the shuffle buffer when you start iterating + ... for example in ids: + ... pass + ``` + Feel free to also use [`IterableDataset.set_epoch`] when using a PyTorch DataLoader or in distributed setups. + """ + from .iterable_dataset import ArrowExamplesIterable, IterableDataset + + if self._format_type is not None: + raise NotImplementedError( + "Converting a formatted dataset to a formatted iterable dataset is not implemented yet. Please run `my_dataset = my_dataset.with_format(None)` before calling to_iterable_dataset" + ) + if num_shards > len(self): + raise ValueError( + f"Unable to shard a dataset of size {len(self)} into {num_shards} shards (the number of shards exceeds the number of samples)." + ) + if self._indices is not None: + logger.info( + "Converting an Arrow dataset to iterable but it has an indices mapping that can make it slower. " + "You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed." + ) + shards = ( + [copy.deepcopy(self)] + if num_shards == 1 + else [ + self.shard(num_shards=num_shards, index=shard_idx, contiguous=True) for shard_idx in range(num_shards) + ] + ) + ex_iterable = ArrowExamplesIterable( + Dataset._generate_tables_from_shards, + kwargs={"shards": shards, "batch_size": config.DEFAULT_MAX_BATCH_SIZE}, + ) + return IterableDataset(ex_iterable, info=DatasetInfo(features=self.features)) + + def _push_parquet_shards_to_hub( + self, + repo_id: str, + data_dir: str = "data", + split: Optional[str] = None, + token: Optional[str] = None, + revision: Optional[str] = None, + create_pr: Optional[bool] = False, + max_shard_size: Optional[Union[int, str]] = None, + num_shards: Optional[int] = None, + embed_external_files: bool = True, + ) -> Tuple[str, str, int, int, List[str], int]: + """Pushes the dataset shards as Parquet files to the hub. + + Returns: + additions (`List[CommitOperation]`): list of the `CommitOperationAdd` of the uploaded shards + uploaded_size (`int`): number of uploaded bytes to the repository + dataset_nbytes (`int`): approximate size in bytes of the uploaded dataset afer uncompression + """ + # Find decodable columns, because if there are any, we need to: + # embed the bytes from the files in the shards + decodable_columns = ( + [k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)] + if embed_external_files + else [] + ) + + dataset_nbytes = self._estimate_nbytes() + + if num_shards is None: + max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) + num_shards = int(dataset_nbytes / max_shard_size) + 1 + num_shards = max(num_shards, 1) + + shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards)) + + if decodable_columns: + + def shards_with_embedded_external_files(shards): + for shard in shards: + format = shard.format + shard = shard.with_format("arrow") + shard = shard.map( + embed_table_storage, + batched=True, + batch_size=1000, + keep_in_memory=True, + ) + shard = shard.with_format(**format) + yield shard + + shards = shards_with_embedded_external_files(shards) + + api = HfApi(endpoint=config.HF_ENDPOINT, token=token) + + uploaded_size = 0 + additions = [] + for index, shard in hf_tqdm( + enumerate(shards), + desc="Uploading the dataset shards", + total=num_shards, + ): + shard_path_in_repo = f"{data_dir}/{split}-{index:05d}-of-{num_shards:05d}.parquet" + buffer = BytesIO() + shard.to_parquet(buffer) + uploaded_size += buffer.tell() + shard_addition = CommitOperationAdd(path_in_repo=shard_path_in_repo, path_or_fileobj=buffer) + _retry( + api.preupload_lfs_files, + func_kwargs={ + "repo_id": repo_id, + "additions": [shard_addition], + "token": token, + "repo_type": "dataset", + "revision": revision, + "create_pr": create_pr, + }, + exceptions=HTTPError, + status_codes=[504], + base_wait_time=2.0, + max_retries=5, + max_wait_time=20.0, + ) + additions.append(shard_addition) + + return additions, uploaded_size, dataset_nbytes + + def push_to_hub( + self, + repo_id: str, + config_name: str = "default", + split: Optional[str] = None, + commit_message: Optional[str] = None, + private: Optional[bool] = False, + token: Optional[str] = None, + revision: Optional[str] = None, + branch="deprecated", + create_pr: Optional[bool] = False, + max_shard_size: Optional[Union[int, str]] = None, + num_shards: Optional[int] = None, + embed_external_files: bool = True, + ): + """Pushes the dataset to the hub as a Parquet dataset. + The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed. + + The resulting Parquet files are self-contained by default. If your dataset contains [`Image`] or [`Audio`] + data, the Parquet files will store the bytes of your images or audio files. + You can disable this by setting `embed_external_files` to `False`. + + Args: + repo_id (`str`): + The ID of the repository to push to in the following format: `/` or + `/`. Also accepts ``, which will default to the namespace + of the logged-in user. + config_name (`str`, defaults to "default"): + The configuration name (or subset) of a dataset. Defaults to "default". + split (`str`, *optional*): + The name of the split that will be given to that dataset. Defaults to `self.split`. + commit_message (`str`, *optional*): + Message to commit while pushing. Will default to `"Upload dataset"`. + private (`bool`, *optional*, defaults to `False`): + Whether the dataset repository should be set to private or not. Only affects repository creation: + a repository that already exists will not be affected by that parameter. + token (`str`, *optional*): + An optional authentication token for the Hugging Face Hub. If no token is passed, will default + to the token saved locally when logging in with `huggingface-cli login`. Will raise an error + if no token is passed and the user is not logged-in. + revision (`str`, *optional*): + Branch to push the uploaded files to. Defaults to the `"main"` branch. + + + branch (`str`, *optional*): + The git branch on which to push the dataset. This defaults to the default branch as specified + in your repository, which defaults to `"main"`. + + + + `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0. + + + create_pr (`bool`, *optional*, defaults to `False`): + Whether or not to create a PR with the uploaded files or directly commit. + + + max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): + The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by + a unit (like `"5MB"`). + num_shards (`int`, *optional*): Number of shards to write. By default the number of shards depends on `max_shard_size`. + + + embed_external_files (`bool`, defaults to `True`): + Whether to embed file bytes in the shards. + In particular, this will do the following before the push for the fields of type: + + - [`Audio`] and [`Image`]: remove local path information and embed file content in the Parquet files. + + Example: + + ```python + >>> dataset.push_to_hub("/") + >>> dataset_dict.push_to_hub("/", private=True) + >>> dataset.push_to_hub("/", max_shard_size="1GB") + >>> dataset.push_to_hub("/", num_shards=1024) + ``` + + If your dataset has multiple splits (e.g. train/validation/test): + + ```python + >>> train_dataset.push_to_hub("/", split="train") + >>> val_dataset.push_to_hub("/", split="validation") + >>> # later + >>> dataset = load_dataset("/") + >>> train_dataset = dataset["train"] + >>> val_dataset = dataset["validation"] + ``` + + If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages): + + ```python + >>> english_dataset.push_to_hub("/", "en") + >>> french_dataset.push_to_hub("/", "fr") + >>> # later + >>> english_dataset = load_dataset("/", "en") + >>> french_dataset = load_dataset("/", "fr") + ``` + """ + if config_name == "data": + raise ValueError("`config_name` cannot be 'data'. Please, choose another name for configuration.") + + if max_shard_size is not None and num_shards is not None: + raise ValueError( + "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both." + ) + + if split is None: + split = str(self.split) if self.split is not None else "train" + + if not re.match(_split_re, split): + raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.") + + if branch != "deprecated": + warnings.warn( + "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'revision={branch}' instead.", + FutureWarning, + ) + revision = branch + + api = HfApi(endpoint=config.HF_ENDPOINT, token=token) + + repo_url = api.create_repo( + repo_id, + token=token, + repo_type="dataset", + private=private, + exist_ok=True, + ) + repo_id = repo_url.repo_id + + if revision is not None: + api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True) + + data_dir = config_name if config_name != "default" else "data" # for backward compatibility + + additions, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub( + repo_id=repo_id, + data_dir=data_dir, + split=split, + token=token, + revision=revision, + max_shard_size=max_shard_size, + num_shards=num_shards, + create_pr=create_pr, + embed_external_files=embed_external_files, + ) + + # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern) + # and delete old split shards (if they exist) + repo_with_dataset_card, repo_with_dataset_infos = False, False + deletions, deleted_size = [], 0 + repo_splits = [] # use a list to keep the order of the splits + repo_files_to_add = [addition.path_in_repo for addition in additions] + for repo_file in api.list_files_info(repo_id, revision=revision, repo_type="dataset", token=token): + if repo_file.rfilename == "README.md": + repo_with_dataset_card = True + elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME: + repo_with_dataset_infos = True + elif ( + repo_file.rfilename.startswith(f"{data_dir}/{split}-") and repo_file.rfilename not in repo_files_to_add + ): + deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename)) + deleted_size += repo_file.size + elif fnmatch.fnmatch( + repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*") + ): + repo_split = string_to_dict( + repo_file.rfilename, + glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED), + )["split"] + if repo_split not in repo_splits: + repo_splits.append(repo_split) + + organization, dataset_name = repo_id.split("/") if "/" in repo_id else (None, repo_id) + info_to_dump = self.info.copy() + info_to_dump.download_checksums = None + info_to_dump.download_size = uploaded_size + info_to_dump.dataset_size = dataset_nbytes + info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes + info_to_dump.config_name = config_name + info_to_dump.splits = SplitDict( + {split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name)} + ) + # get the info from the README to update them + if repo_with_dataset_card: + dataset_card_path = api.hf_hub_download(repo_id, "README.md", repo_type="dataset", revision=revision) + dataset_card = DatasetCard.load(Path(dataset_card_path)) + dataset_card_data = dataset_card.data + metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) + dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card_data) + if dataset_infos and config_name in dataset_infos: + repo_info = dataset_infos[config_name] + else: + repo_info = None + # get the deprecated dataset_infos.json to update them + elif repo_with_dataset_infos: + dataset_card = None + dataset_card_data = DatasetCardData() + metadata_configs = MetadataConfigs() + dataset_infos_path = api.hf_hub_download( + repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision + ) + with open(dataset_infos_path, encoding="utf-8") as f: + dataset_infos: dict = json.load(f) + dataset_info = dataset_infos.get(config_name, None) if dataset_infos else None + repo_info = DatasetInfo.from_dict(dataset_info) if dataset_info else None + else: + dataset_card = None + dataset_card_data = DatasetCardData() + metadata_configs = MetadataConfigs() + repo_info = None + # update the total info to dump from existing info + if repo_info is not None: + logger.info("Updating downloaded metadata with the new split.") + if repo_info.splits and list(repo_info.splits) != [split]: + if self._info.features != repo_info.features: + raise ValueError( + f"Features of the new split don't match the features of the existing splits on the hub: {self._info.features} != {repo_info.features}" + ) + + if split in repo_info.splits: + repo_info.download_size -= deleted_size + repo_info.dataset_size -= repo_info.splits.get(split, SplitInfo()).num_bytes or 0 + + repo_info.download_checksums = None + repo_info.download_size = (repo_info.download_size or 0) + uploaded_size + repo_info.dataset_size = (repo_info.dataset_size or 0) + dataset_nbytes + repo_info.size_in_bytes = repo_info.download_size + repo_info.dataset_size + repo_info.splits.pop(split, None) + repo_info.splits[split] = SplitInfo( + split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name + ) + info_to_dump = repo_info + # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed + if not metadata_configs and repo_splits: + default_metadata_configs_to_dump = { + "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits] + } + MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data) + # update the metadata configs + if config_name in metadata_configs: + metadata_config = metadata_configs[config_name] + if "data_files" in metadata_config: + data_files_to_dump = sanitize_patterns(metadata_config["data_files"]) + else: + data_files_to_dump = {} + # add the new split + data_files_to_dump[split] = [f"{data_dir}/{split}-*"] + metadata_config_to_dump = { + "data_files": [ + { + "split": _split, + "path": _pattern[0] if len(_pattern) == 1 else _pattern, + } + for _split, _pattern in data_files_to_dump.items() + ] + } + else: + metadata_config_to_dump = {"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"}]} + # push to the deprecated dataset_infos.json + if repo_with_dataset_infos: + dataset_infos_path = api.hf_hub_download( + repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision + ) + with open(dataset_infos_path, encoding="utf-8") as f: + dataset_infos: dict = json.load(f) + dataset_infos[config_name] = asdict(info_to_dump) + buffer = BytesIO() + buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8")) + additions.append( + CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer) + ) + # push to README + DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data) + MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data) + dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card + additions.append(CommitOperationAdd(path_in_repo="README.md", path_or_fileobj=str(dataset_card).encode())) + + commit_message = commit_message if commit_message is not None else "Upload dataset" + if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT: + api.create_commit( + repo_id, + operations=additions + deletions, + commit_message=commit_message, + token=token, + repo_type="dataset", + revision=revision, + create_pr=create_pr, + ) + else: + logger.info( + f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits." + ) + num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT) + for i in range(0, num_commits): + operations = additions[ + i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT + ] + (deletions if i == 0 else []) + api.create_commit( + repo_id, + operations=operations, + commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})", + token=token, + repo_type="dataset", + revision=revision, + create_pr=create_pr, + ) + logger.info( + f"Commit #{i+1} completed" + + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "") + + "." + ) + + @transmit_format + @fingerprint_transform(inplace=False) + def add_column(self, name: str, column: Union[list, np.array], new_fingerprint: str): + """Add column to Dataset. + + + + Args: + name (`str`): + Column name. + column (`list` or `np.array`): + Column data to be added. + + Returns: + [`Dataset`] + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> more_text = ds["text"] + >>> ds.add_column(name="text_2", column=more_text) + Dataset({ + features: ['text', 'label', 'text_2'], + num_rows: 1066 + }) + ``` + """ + column_table = InMemoryTable.from_pydict({name: column}) + _check_column_names(self._data.column_names + column_table.column_names) + dataset = self.flatten_indices() if self._indices is not None else self + # Concatenate tables horizontally + table = concat_tables([dataset._data, column_table], axis=1) + # Update features + info = dataset.info.copy() + info.features.update(Features.from_arrow_schema(column_table.schema)) + table = update_metadata_with_features(table, info.features) + return Dataset(table, info=info, split=self.split, indices_table=None, fingerprint=new_fingerprint) + + def add_faiss_index( + self, + column: str, + index_name: Optional[str] = None, + device: Optional[int] = None, + string_factory: Optional[str] = None, + metric_type: Optional[int] = None, + custom_index: Optional["faiss.Index"] = None, # noqa: F821 + batch_size: int = 1000, + train_size: Optional[int] = None, + faiss_verbose: bool = False, + dtype=np.float32, + ): + """Add a dense index using Faiss for fast retrieval. + By default the index is done over the vectors of the specified column. + You can specify `device` if you want to run it on GPU (`device` must be the GPU index). + You can find more information about Faiss here: + + - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory) + + Args: + column (`str`): + The column of the vectors to add to the index. + index_name (`str`, *optional*): + The `index_name`/identifier of the index. + This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`]. + By default it corresponds to `column`. + device (`Union[int, List[int]]`, *optional*): + If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. + If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. + string_factory (`str`, *optional*): + This is passed to the index factory of Faiss to create the index. + Default index class is `IndexFlat`. + metric_type (`int`, *optional*): + Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. + custom_index (`faiss.Index`, *optional*): + Custom Faiss index that you already have instantiated and configured for your needs. + batch_size (`int`): + Size of the batch to use while adding vectors to the `FaissIndex`. Default value is `1000`. + + train_size (`int`, *optional*): + If the index needs a training step, specifies how many vectors will be used to train the index. + faiss_verbose (`bool`, defaults to `False`): + Enable the verbosity of the Faiss index. + dtype (`data-type`): + The dtype of the numpy arrays that are indexed. + Default is `np.float32`. + + Example: + + ```python + >>> ds = datasets.load_dataset('crime_and_punish', split='train') + >>> ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']})) + >>> ds_with_embeddings.add_faiss_index(column='embeddings') + >>> # query + >>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10) + >>> # save index + >>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss') + + >>> ds = datasets.load_dataset('crime_and_punish', split='train') + >>> # load index + >>> ds.load_faiss_index('embeddings', 'my_index.faiss') + >>> # query + >>> scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10) + ``` + """ + with self.formatted_as(type="numpy", columns=[column], dtype=dtype): + super().add_faiss_index( + column=column, + index_name=index_name, + device=device, + string_factory=string_factory, + metric_type=metric_type, + custom_index=custom_index, + batch_size=batch_size, + train_size=train_size, + faiss_verbose=faiss_verbose, + ) + return self + + def add_faiss_index_from_external_arrays( + self, + external_arrays: np.array, + index_name: str, + device: Optional[int] = None, + string_factory: Optional[str] = None, + metric_type: Optional[int] = None, + custom_index: Optional["faiss.Index"] = None, # noqa: F821 + batch_size: int = 1000, + train_size: Optional[int] = None, + faiss_verbose: bool = False, + dtype=np.float32, + ): + """Add a dense index using Faiss for fast retrieval. + The index is created using the vectors of `external_arrays`. + You can specify `device` if you want to run it on GPU (`device` must be the GPU index). + You can find more information about Faiss here: + + - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory) + + Args: + external_arrays (`np.array`): + If you want to use arrays from outside the lib for the index, you can set `external_arrays`. + It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`. + index_name (`str`): + The `index_name`/identifier of the index. + This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`]. + device (Optional `Union[int, List[int]]`, *optional*): + If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. + If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. + string_factory (`str`, *optional*): + This is passed to the index factory of Faiss to create the index. + Default index class is `IndexFlat`. + metric_type (`int`, *optional*): + Type of metric. Ex: `faiss.faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. + custom_index (`faiss.Index`, *optional*): + Custom Faiss index that you already have instantiated and configured for your needs. + batch_size (`int`, *optional*): + Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000. + + train_size (`int`, *optional*): + If the index needs a training step, specifies how many vectors will be used to train the index. + faiss_verbose (`bool`, defaults to False): + Enable the verbosity of the Faiss index. + dtype (`numpy.dtype`): + The dtype of the numpy arrays that are indexed. Default is np.float32. + """ + super().add_faiss_index_from_external_arrays( + external_arrays=external_arrays.astype(dtype), + index_name=index_name, + device=device, + string_factory=string_factory, + metric_type=metric_type, + custom_index=custom_index, + batch_size=batch_size, + train_size=train_size, + faiss_verbose=faiss_verbose, + ) + + def add_elasticsearch_index( + self, + column: str, + index_name: Optional[str] = None, + host: Optional[str] = None, + port: Optional[int] = None, + es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821 + es_index_name: Optional[str] = None, + es_index_config: Optional[dict] = None, + ): + """Add a text index using ElasticSearch for fast retrieval. This is done in-place. + + Args: + column (`str`): + The column of the documents to add to the index. + index_name (`str`, *optional*): + The `index_name`/identifier of the index. + This is the index name that is used to call [`~Dataset.get_nearest_examples`] or [`Dataset.search`]. + By default it corresponds to `column`. + host (`str`, *optional*, defaults to `localhost`): + Host of where ElasticSearch is running. + port (`str`, *optional*, defaults to `9200`): + Port of where ElasticSearch is running. + es_client (`elasticsearch.Elasticsearch`, *optional*): + The elasticsearch client used to create the index if host and port are `None`. + es_index_name (`str`, *optional*): + The elasticsearch index name used to create the index. + es_index_config (`dict`, *optional*): + The configuration of the elasticsearch index. + Default config is: + ``` + { + "settings": { + "number_of_shards": 1, + "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, + }, + "mappings": { + "properties": { + "text": { + "type": "text", + "analyzer": "standard", + "similarity": "BM25" + }, + } + }, + } + ``` + Example: + + ```python + >>> es_client = elasticsearch.Elasticsearch() + >>> ds = datasets.load_dataset('crime_and_punish', split='train') + >>> ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index") + >>> scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10) + ``` + """ + with self.formatted_as(type=None, columns=[column]): + super().add_elasticsearch_index( + column=column, + index_name=index_name, + host=host, + port=port, + es_client=es_client, + es_index_name=es_index_name, + es_index_config=es_index_config, + ) + return self + + @transmit_format + @fingerprint_transform(inplace=False) + def add_item(self, item: dict, new_fingerprint: str): + """Add item to Dataset. + + + + Args: + item (`dict`): + Item data to be added. + + Returns: + [`Dataset`] + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> new_review = {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'} + >>> ds = ds.add_item(new_review) + >>> ds[-1] + {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'} + ``` + """ + item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()}) + # We don't call _check_if_features_can_be_aligned here so this cast is "unsafe" + dset_features, item_features = _align_features( + [self._info.features, Features.from_arrow_schema(item_table.schema)] + ) + # Cast to align the schemas of the tables and concatenate the tables + table = concat_tables( + [ + self._data.cast(dset_features.arrow_schema) if self._info.features != dset_features else self._data, + item_table.cast(item_features.arrow_schema), + ] + ) + if self._indices is None: + indices_table = None + else: + item_indices_array = pa.array([len(self._data)], type=pa.uint64()) + item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"]) + indices_table = concat_tables([self._indices, item_indices_table]) + info = self.info.copy() + info.features.update(item_features) + table = update_metadata_with_features(table, info.features) + return Dataset( + table, + info=info, + split=self.split, + indices_table=indices_table, + fingerprint=new_fingerprint, + ) + + def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset": + """Align the dataset's label ID and label name mapping to match an input `label2id` mapping. + This is useful when you want to ensure that a model's predicted labels are aligned with the dataset. + The alignment in done using the lowercase label names. + + Args: + label2id (`dict`): + The label name to ID mapping to align the dataset with. + label_column (`str`): + The column name of labels to align on. + + Example: + + ```python + >>> # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2} + >>> ds = load_dataset("glue", "mnli", split="train") + >>> # mapping to align with + >>> label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2} + >>> ds_aligned = ds.align_labels_with_mapping(label2id, "label") + ``` + + """ + # Sanity checks + if label_column not in self._data.column_names: + raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).") + + label_feature = self._info.features[label_column] + if not ( + isinstance(label_feature, ClassLabel) + or (isinstance(label_feature, Sequence) and isinstance(label_feature.feature, ClassLabel)) + ): + raise ValueError( + f"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column or {Sequence.__name__} column with the inner type {ClassLabel.__name__}, and column {label_feature} is of type {type(label_feature).__name__}." + ) + + # Sort input mapping by ID value to ensure the label names are aligned + label2id = dict(sorted(label2id.items(), key=lambda item: item[1])) + label_names = list(label2id.keys()) + # Some label mappings use uppercase label names so we lowercase them during alignment + label2id = {k.lower(): v for k, v in label2id.items()} + int2str_function = ( + label_feature.int2str if isinstance(label_feature, ClassLabel) else label_feature.feature.int2str + ) + + if isinstance(label_feature, ClassLabel): + + def process_label_ids(batch): + dset_label_names = [ + int2str_function(label_id).lower() if label_id is not None else None + for label_id in batch[label_column] + ] + batch[label_column] = [ + label2id[label_name] if label_name is not None else None for label_name in dset_label_names + ] + return batch + + else: + + def process_label_ids(batch): + dset_label_names = [ + [int2str_function(label_id).lower() if label_id is not None else None for label_id in seq] + for seq in batch[label_column] + ] + batch[label_column] = [ + [label2id[label_name] if label_name is not None else None for label_name in seq] + for seq in dset_label_names + ] + return batch + + features = self.features + features[label_column] = ( + ClassLabel(num_classes=len(label_names), names=label_names) + if isinstance(label_feature, ClassLabel) + else Sequence(ClassLabel(num_classes=len(label_names), names=label_names)) + ) + return self.map(process_label_ids, features=features, batched=True, desc="Aligning the labels") + + +def _concatenate_map_style_datasets( + dsets: List[Dataset], + info: Optional[DatasetInfo] = None, + split: Optional[NamedSplit] = None, + axis: int = 0, +): + """ + Converts a list of :class:`Dataset` with the same schema into a single :class:`Dataset`. + When you concatenate on axis 0, missing data are filled with None values. + + Args: + dsets (`List[datasets.Dataset]`): List of Datasets to concatenate. + info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. + split (:class:`NamedSplit`, optional): Name of the dataset split. + axis (``{0, 1}``, default ``0``, meaning over rows): + Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns + (horizontally). + + *New in version 1.6.0* + + Example: + + ```py + >>> ds3 = _concatenate_map_style_datasets([ds1, ds2]) + ``` + """ + # Ignore datasets with no rows + if any(dset.num_rows > 0 for dset in dsets): + dsets = [dset for dset in dsets if dset.num_rows > 0] + else: + # Return first dataset if all datasets are empty + return dsets[0] + + # Perform checks (and a potentional cast if axis=0) + if axis == 0: + _check_if_features_can_be_aligned([dset.features for dset in dsets]) + else: + if not all(dset.num_rows == dsets[0].num_rows for dset in dsets): + raise ValueError("Number of rows must match for all datasets") + _check_column_names([col_name for dset in dsets for col_name in dset._data.column_names]) + + # Find common format or reset format + format = dsets[0].format + if any(dset.format != format for dset in dsets): + format = {} + logger.info("Some of the datasets have disparate format. Resetting the format of the concatenated dataset.") + + def apply_offset_to_indices_table(table, offset): + if offset == 0: + return table + else: + array = table["indices"] + new_array = pc.add(array, pa.scalar(offset, type=pa.uint64())) + return InMemoryTable.from_arrays([new_array], names=["indices"]) + + # Concatenate indices if they exist + if any(dset._indices is not None for dset in dsets): + if axis == 0: + # Datasets with no indices tables are replaced with a dataset with an indices table in memory. + # Applying an offset to an indices table also brings the table in memory. + indices_tables = [] + for i in range(len(dsets)): + if dsets[i]._indices is None: + dsets[i] = dsets[i]._select_with_indices_mapping(range(len(dsets[i]))) + indices_tables.append(dsets[i]._indices) + + # An offset needs to be applied to the indices before concatenating + offset = 0 + for i in range(len(dsets)): + indices_tables[i] = apply_offset_to_indices_table(indices_tables[i], offset) + offset += len(dsets[i]._data) + + # Concatenate indices + indices_tables = [t for t in indices_tables if len(t) > 0] + if indices_tables: + indices_table = concat_tables(indices_tables) + else: + indices_table = InMemoryTable.from_batches([], schema=pa.schema({"indices": pa.int64()})) + else: + if len(dsets) == 1: + indices_table = dsets[0]._indices + else: + for i in range(len(dsets)): + dsets[i] = dsets[i].flatten_indices() + indices_table = None + else: + indices_table = None + + table = concat_tables([dset._data for dset in dsets], axis=axis) + if axis == 0: + features_list = _align_features([dset.features for dset in dsets]) + else: + features_list = [dset.features for dset in dsets] + table = update_metadata_with_features(table, {k: v for features in features_list for k, v in features.items()}) + + # Concatenate infos + if info is None: + info = DatasetInfo.from_merge([dset.info for dset in dsets]) + fingerprint = update_fingerprint( + "".join(dset._fingerprint for dset in dsets), _concatenate_map_style_datasets, {"info": info, "split": split} + ) + + # Make final concatenated dataset + concatenated_dataset = Dataset( + table, + info=info, + split=split, + indices_table=indices_table, + fingerprint=fingerprint, + ) + concatenated_dataset.set_format(**format) + return concatenated_dataset + + +def _interleave_map_style_datasets( + datasets: List["Dataset"], + probabilities: Optional[List[float]] = None, + seed: Optional[int] = None, + info: Optional[DatasetInfo] = None, + split: Optional[NamedSplit] = None, + stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", + **kwargs, +) -> "Dataset": + """ + Interleave several map-style datasets (sources) into a single map-style dataset. + The new dataset is constructed by alternating between the sources to get the examples. + If `probabilities = None` (default) the new dataset is constructed by cycling between each source to get the examples. + If `probabilities` is not `None, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities. + + Args: + datasets (`List[Dataset]`): list of datasets to interleave + probabilities (`List[float]`, optional, default None): If specified, the new dataset is constructed by sampling + examples from one source at a time according to these probabilities. + seed (`int`, optional, default None): The random seed used to choose a source for each example. + info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc. + split (:class:`NamedSplit`, optional): Name of the dataset split. + stopping_strategy (`str`, defaults to `first_exhausted`): + Two strategies are proposed right now. + By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples. + If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once. + Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous: + - with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples. + - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting. + **kwargs (additional keyword arguments): Keyword arguments to be passed to :meth:`datasets.Datasets.select` when selecting the indices used to interleave the datasets. + + Output: + :class:`datasets.Dataset` + """ + if stopping_strategy not in ["first_exhausted", "all_exhausted"]: + raise ValueError( + f"{stopping_strategy} stopping strategy in `interleave_datasets` is not implemented yet with a list of {type(datasets[0])}" + ) + + # To interleave the datasets, we concatenate them and then we re-order the indices + concatenated_datasets = _concatenate_map_style_datasets(datasets, info=info, split=split) + + # Let's now build the indices to pass to .select() + lengths = [len(dset) for dset in datasets] + offsets = np.cumsum([0] + lengths[:-1]) + + # if stopping_strategy is "first_exhausted", it is an undersampling situation whereas it is an oversampling situation if it is "all_exhausted" + oversampling = stopping_strategy == "all_exhausted" + + if probabilities is None and not oversampling: + # Undersampling situation with cycling between each sources + # Example:: If lengths of the datasets are [3, 4, 5] + # Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 6, 9] + # Note that we only have 3 examples per dataset since the first dataset ran out of examples + + # Reasoning behind the following operation: keeping the min_length first indices of each dataset + # while offsetting in order to correspond to the right indices of the concatenated dataset + # and flattening to effectively interleave the datasets + indices = (offsets.reshape(1, -1) + np.arange(min(lengths)).reshape(-1, 1)).flatten().tolist() + elif probabilities is None: + # Oversampling situation with cycling between each sources + # Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 5, 9, 0, 6, 10, 1, 3, 11] + # Note that we have 5 examples per dataset with a rolling window since the longest dataset has 5 samples + + # Reasoning behind the following operation: for each dataset indices (i.e column) repeat the indices to have max_length indices per dataset + # For example, if the max_length is 5 and the i-th dataset has 3 samples, the i-th column will be [0,1,2,0,1] + indices = np.mod(np.arange(max(lengths)).reshape(-1, 1), np.array(lengths).reshape(1, -1)) + + # We have to keep the indices to their respective dataset offsets and to flatten to effectively interleave the datasets + indices = (indices + offsets).flatten().tolist() + + else: + # boolean array indicating if at index i if the dataset_i has been fully exhausted + is_exhausted = np.full(len(lengths), False) + + # if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted + # if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once + bool_strategy_func = np.all if oversampling else np.any + + def iter_random_indices(): + """Get an infinite iterator that randomly samples the index of the source to pick examples from.""" + rng = np.random.default_rng(seed) + while True: + yield from (int(i) for i in rng.choice(len(datasets), size=1000, p=probabilities)) + + current_index = [0] * len(datasets) + indices = [] + for source_idx in iter_random_indices(): + # If no oversampling, we stop as soon as a dataset has ran out of examples (np.any) + # Otherwise, we stop as soon as every dataset has ran out of examples (np.all) + if bool_strategy_func(is_exhausted): + # the stopping condition was reached, let's stop + break + + # let's add the example at the current index of the `source_idx`-th dataset + indices.append(current_index[source_idx] + offsets[source_idx]) + current_index[source_idx] += 1 + + # we've ran out of examples for the current dataset, let's update our boolean array and bring the current_index back to 0 + if current_index[source_idx] >= lengths[source_idx]: + is_exhausted[source_idx] = True + current_index[source_idx] = 0 + + return concatenated_datasets.select(indices, **kwargs) + + +def _split_by_node_map_style_dataset(dataset: Dataset, rank: int, world_size: int) -> Dataset: + """ + Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`. + Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset. + To maximize data loading throughput, chunks are made of contiguous data on disk if possible. + + Args: + dataset ([`Dataset`]): + The dataset to split by node. + rank (`int`): + Rank of the current node. + world_size (`int`): + Total number of nodes. + + Returns: + [`Dataset`]: The dataset to be used on the node at rank `rank`. + """ + return dataset.shard(num_shards=world_size, index=rank, contiguous=True) + + +# This is outside Dataset.filter as it needs to be picklable for multiprocessing + + +def get_indices_from_mask_function( + function: Callable, + batched: bool, + with_indices: bool, + input_columns: Optional[Union[str, List[str]]], + indices_mapping: Optional[Table] = None, + *args, + **fn_kwargs, +): + if batched: + # we extract indices from args + *inputs, indices = args + if with_indices: + mask = function(*inputs, indices, **fn_kwargs) + else: + mask = function(*inputs, **fn_kwargs) + else: + # we get batched data (to do less look-ups) but `function` only accepts one example + # therefore we need to call `function` on each example of the batch to get the mask + *inputs, indices = args + mask = [] + if input_columns is None: + # inputs only contains a batch of examples + batch: dict = inputs[0] + num_examples = len(batch[next(iter(batch.keys()))]) + for i in range(num_examples): + example = {key: batch[key][i] for key in batch} + mask.append( + function(example, indices[i], **fn_kwargs) if with_indices else function(example, **fn_kwargs) + ) + else: + # inputs is a list of columns + columns: List[List] = inputs + num_examples = len(columns[0]) + for i in range(num_examples): + input = [column[i] for column in columns] + mask.append( + function(*input, indices[i], **fn_kwargs) if with_indices else function(*input, **fn_kwargs) + ) + indices_array = [i for i, to_keep in zip(indices, mask) if to_keep] + if indices_mapping is not None: + indices_array = pa.array(indices_array, type=pa.uint64()) + indices_array = indices_mapping.column(0).take(indices_array) + indices_array = indices_array.to_pylist() + return {"indices": indices_array} diff --git a/testbed/huggingface__datasets/src/datasets/arrow_reader.py b/testbed/huggingface__datasets/src/datasets/arrow_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..fbafe0c8ab56e84b6b279ff9b0594131507002e3 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/arrow_reader.py @@ -0,0 +1,653 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +""" Arrow ArrowReader.""" + +import copy +import math +import os +import re +import shutil +from dataclasses import dataclass +from pathlib import Path +from typing import TYPE_CHECKING, List, Optional, Union + +import pyarrow as pa +import pyarrow.parquet as pq + +from .download.download_config import DownloadConfig +from .naming import _split_re, filenames_for_dataset_split +from .table import InMemoryTable, MemoryMappedTable, Table, concat_tables +from .utils import logging +from .utils.file_utils import cached_path + + +if TYPE_CHECKING: + from .info import DatasetInfo # noqa: F401 + from .splits import Split, SplitInfo # noqa: F401 + + +logger = logging.get_logger(__name__) + +HF_GCP_BASE_URL = "https://storage.googleapis.com/huggingface-nlp/cache/datasets" + +_SUB_SPEC_RE = re.compile( + rf""" +^ + (?P{_split_re[1:-1]}) + (\[ + ((?P-?\d+) + (?P%)?)? + : + ((?P-?\d+) + (?P%)?)? + \])?(\((?P[^\)]*)\))? +$ +""", # remove ^ and $ + re.X, +) + +_ADDITION_SEP_RE = re.compile(r"\s*\+\s*") + + +class DatasetNotOnHfGcsError(ConnectionError): + """When you can't get the dataset from the Hf google cloud storage""" + + pass + + +class MissingFilesOnHfGcsError(ConnectionError): + """When some files are missing on the Hf oogle cloud storage""" + + pass + + +@dataclass(frozen=True) +class FileInstructions: + """The file instructions associated with a split ReadInstruction. + + Attributes: + num_examples: `int`, The total number of examples + file_instructions: List[dict(filename, skip, take)], the files information. + The filenames contains the relative path, not absolute. + skip/take indicates which example read in the file: `ds.slice(skip, take)` + """ + + num_examples: int + file_instructions: List[dict] + + +def make_file_instructions( + name: str, + split_infos: List["SplitInfo"], + instruction: Union[str, "ReadInstruction"], + filetype_suffix: Optional[str] = None, + prefix_path: Optional[str] = None, +) -> FileInstructions: + """Returns instructions of the split dict. + + Args: + name (`str`): Name of the dataset. + split_infos (`list` of `[SplitInfo]`): Dataset splits information. + instruction ([`ReadInstruction`] or `str`): Reading instruction for a dataset. + filetype_suffix (`str`, *optional*): Suffix of dataset files, e.g. 'arrow' or 'parquet'. + prefix_path (`str`, *optional*): Prefix of dataset files, e.g. directory name. + + Returns: + [`FileInstructions`] + """ + if not isinstance(name, str): + raise TypeError(f"Expected str 'name', but got: {type(name).__name__}") + elif not name: + raise ValueError("Expected non-empty str 'name'") + name2len = {info.name: info.num_examples for info in split_infos} + name2shard_lengths = {info.name: info.shard_lengths for info in split_infos} + name2filenames = { + info.name: filenames_for_dataset_split( + path=prefix_path, + dataset_name=name, + split=info.name, + filetype_suffix=filetype_suffix, + shard_lengths=name2shard_lengths[info.name], + ) + for info in split_infos + } + if not isinstance(instruction, ReadInstruction): + instruction = ReadInstruction.from_spec(instruction) + # Create the absolute instruction (per split) + absolute_instructions = instruction.to_absolute(name2len) + + # For each split, return the files instruction (skip/take) + file_instructions = [] + num_examples = 0 + for abs_instr in absolute_instructions: + split_length = name2len[abs_instr.splitname] + filenames = name2filenames[abs_instr.splitname] + shard_lengths = name2shard_lengths[abs_instr.splitname] + from_ = 0 if abs_instr.from_ is None else abs_instr.from_ + to = split_length if abs_instr.to is None else abs_instr.to + if shard_lengths is None: # not sharded + for filename in filenames: + num_examples += to - from_ + file_instructions.append({"filename": filename, "skip": from_, "take": to - from_}) + else: # sharded + index_start = 0 # Beginning (included) of moving window. + index_end = 0 # End (excluded) of moving window. + for filename, shard_length in zip(filenames, shard_lengths): + index_end += shard_length + if from_ < index_end and to > index_start: # There is something to take. + skip = from_ - index_start if from_ > index_start else 0 + take = to - index_start - skip if to < index_end else -1 + if take == 0: + continue + file_instructions.append({"filename": filename, "skip": skip, "take": take}) + num_examples += shard_length - skip if take == -1 else take + index_start += shard_length + return FileInstructions( + num_examples=num_examples, + file_instructions=file_instructions, + ) + + +class BaseReader: + """ + Build a Dataset object out of Instruction instance(s). + """ + + def __init__(self, path: str, info: Optional["DatasetInfo"]): + """Initializes ArrowReader. + + Args: + path (str): path where tfrecords are stored. + info (DatasetInfo): info about the dataset. + """ + self._path: str = path + self._info: Optional["DatasetInfo"] = info + self._filetype_suffix: Optional[str] = None + + def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table: + """Returns a Dataset instance from given (filename, skip, take).""" + raise NotImplementedError + + def _read_files(self, files, in_memory=False) -> Table: + """Returns Dataset for given file instructions. + + Args: + files: List[dict(filename, skip, take)], the files information. + The filenames contain the absolute path, not relative. + skip/take indicates which example read in the file: `ds.slice(skip, take)` + in_memory (bool, default False): Whether to copy the data in-memory. + """ + if len(files) == 0 or not all(isinstance(f, dict) for f in files): + raise ValueError("please provide valid file informations") + pa_tables = [] + files = copy.deepcopy(files) + for f in files: + f["filename"] = os.path.join(self._path, f["filename"]) + for f_dict in files: + pa_table: Table = self._get_table_from_filename(f_dict, in_memory=in_memory) + pa_tables.append(pa_table) + pa_tables = [t for t in pa_tables if len(t) > 0] + if not pa_tables and (self._info is None or self._info.features is None): + raise ValueError( + "Tried to read an empty table. Please specify at least info.features to create an empty table with the right type." + ) + pa_tables = pa_tables or [InMemoryTable.from_batches([], schema=pa.schema(self._info.features.type))] + pa_table = concat_tables(pa_tables) if len(pa_tables) != 1 else pa_tables[0] + return pa_table + + def get_file_instructions(self, name, instruction, split_infos): + """Return list of dict {'filename': str, 'skip': int, 'take': int}""" + file_instructions = make_file_instructions( + name, split_infos, instruction, filetype_suffix=self._filetype_suffix, prefix_path=self._path + ) + files = file_instructions.file_instructions + return files + + def read( + self, + name, + instructions, + split_infos, + in_memory=False, + ): + """Returns Dataset instance(s). + + Args: + name (str): name of the dataset. + instructions (ReadInstruction): instructions to read. + Instruction can be string and will then be passed to the Instruction + constructor as it. + split_infos (list of SplitInfo proto): the available splits for dataset. + in_memory (bool, default False): Whether to copy the data in-memory. + + Returns: + kwargs to build a single Dataset instance. + """ + + files = self.get_file_instructions(name, instructions, split_infos) + if not files: + msg = f'Instruction "{instructions}" corresponds to no data!' + raise ValueError(msg) + return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory) + + def read_files( + self, + files: List[dict], + original_instructions: Union[None, "ReadInstruction", "Split"] = None, + in_memory=False, + ): + """Returns single Dataset instance for the set of file instructions. + + Args: + files: List[dict(filename, skip, take)], the files information. + The filenames contains the relative path, not absolute. + skip/take indicates which example read in the file: `ds.skip().take()` + original_instructions: store the original instructions used to build the dataset split in the dataset. + in_memory (bool, default False): Whether to copy the data in-memory. + + Returns: + kwargs to build a Dataset instance. + """ + # Prepend path to filename + pa_table = self._read_files(files, in_memory=in_memory) + # If original_instructions is not None, convert it to a human-readable NamedSplit + if original_instructions is not None: + from .splits import Split # noqa + + split = Split(str(original_instructions)) + else: + split = None + dataset_kwargs = {"arrow_table": pa_table, "info": self._info, "split": split} + return dataset_kwargs + + def download_from_hf_gcs(self, download_config: DownloadConfig, relative_data_dir): + """ + Download the dataset files from the Hf GCS + + Args: + dl_cache_dir: `str`, the local cache directory used to download files + relative_data_dir: `str`, the relative directory of the remote files from + the `datasets` directory on GCS. + + """ + remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/") + try: + remote_dataset_info = os.path.join(remote_cache_dir, "dataset_info.json") + downloaded_dataset_info = cached_path(remote_dataset_info.replace(os.sep, "/")) + shutil.move(downloaded_dataset_info, os.path.join(self._path, "dataset_info.json")) + if self._info is not None: + self._info.update(self._info.from_directory(self._path)) + except FileNotFoundError as err: + raise DatasetNotOnHfGcsError(err) from None + try: + for split in self._info.splits: + file_instructions = self.get_file_instructions( + name=self._info.builder_name, + instruction=split, + split_infos=self._info.splits.values(), + ) + for file_instruction in file_instructions: + file_to_download = str(Path(file_instruction["filename"]).relative_to(self._path)) + remote_prepared_filename = os.path.join(remote_cache_dir, file_to_download) + downloaded_prepared_filename = cached_path( + remote_prepared_filename.replace(os.sep, "/"), download_config=download_config + ) + shutil.move(downloaded_prepared_filename, file_instruction["filename"]) + except FileNotFoundError as err: + raise MissingFilesOnHfGcsError(err) from None + + +class ArrowReader(BaseReader): + """ + Build a Dataset object out of Instruction instance(s). + This Reader uses either memory mapping or file descriptors (in-memory) on arrow files. + """ + + def __init__(self, path: str, info: Optional["DatasetInfo"]): + """Initializes ArrowReader. + + Args: + path (str): path where Arrow files are stored. + info (DatasetInfo): info about the dataset. + """ + super().__init__(path, info) + self._filetype_suffix = "arrow" + + def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table: + """Returns a Dataset instance from given (filename, skip, take).""" + filename, skip, take = ( + filename_skip_take["filename"], + filename_skip_take["skip"] if "skip" in filename_skip_take else None, + filename_skip_take["take"] if "take" in filename_skip_take else None, + ) + table = ArrowReader.read_table(filename, in_memory=in_memory) + if take == -1: + take = len(table) - skip + # here we don't want to slice an empty table, or it may segfault + if skip is not None and take is not None and not (skip == 0 and take == len(table)): + table = table.slice(skip, take) + return table + + @staticmethod + def read_table(filename, in_memory=False) -> Table: + """ + Read table from file. + + Args: + filename (str): File name of the table. + in_memory (bool, default=False): Whether to copy the data in-memory. + + Returns: + pyarrow.Table + """ + table_cls = InMemoryTable if in_memory else MemoryMappedTable + return table_cls.from_file(filename) + + +class ParquetReader(BaseReader): + """ + Build a Dataset object out of Instruction instance(s). + This Reader uses memory mapping on parquet files. + """ + + def __init__(self, path: str, info: Optional["DatasetInfo"]): + """Initializes ParquetReader. + + Args: + path (str): path where tfrecords are stored. + info (DatasetInfo): info about the dataset. + """ + super().__init__(path, info) + self._filetype_suffix = "parquet" + + def _get_table_from_filename(self, filename_skip_take, **kwargs): + """Returns a Dataset instance from given (filename, skip, take).""" + filename, skip, take = ( + filename_skip_take["filename"], + filename_skip_take["skip"] if "skip" in filename_skip_take else None, + filename_skip_take["take"] if "take" in filename_skip_take else None, + ) + # Parquet read_table always loads data in memory, independently of memory_map + pa_table = pq.read_table(filename, memory_map=True) + # here we don't want to slice an empty table, or it may segfault + if skip is not None and take is not None and not (skip == 0 and take == len(pa_table)): + pa_table = pa_table.slice(skip, take) + return pa_table + + +@dataclass(frozen=True) +class _AbsoluteInstruction: + """A machine friendly slice: defined absolute positive boundaries.""" + + splitname: str + from_: int # uint (starting index). + to: int # uint (ending index). + + +@dataclass(frozen=True) +class _RelativeInstruction: + """Represents a single parsed slicing instruction, can use % and negatives.""" + + splitname: str + from_: Optional[int] = None # int (starting index) or None if no lower boundary. + to: Optional[int] = None # int (ending index) or None if no upper boundary. + unit: Optional[str] = None + rounding: Optional[str] = None + + def __post_init__(self): + if self.unit is not None and self.unit not in ["%", "abs"]: + raise ValueError("unit must be either % or abs") + if self.rounding is not None and self.rounding not in ["closest", "pct1_dropremainder"]: + raise ValueError("rounding must be either closest or pct1_dropremainder") + if self.unit != "%" and self.rounding is not None: + raise ValueError("It is forbidden to specify rounding if not using percent slicing.") + if self.unit == "%" and self.from_ is not None and abs(self.from_) > 100: + raise ValueError("Percent slice boundaries must be > -100 and < 100.") + if self.unit == "%" and self.to is not None and abs(self.to) > 100: + raise ValueError("Percent slice boundaries must be > -100 and < 100.") + # Update via __dict__ due to instance being "frozen" + self.__dict__["rounding"] = "closest" if self.rounding is None and self.unit == "%" else self.rounding + + +def _str_to_read_instruction(spec): + """Returns ReadInstruction for given string.""" + res = _SUB_SPEC_RE.match(spec) + if not res: + raise ValueError(f"Unrecognized instruction format: {spec}") + unit = "%" if res.group("from_pct") or res.group("to_pct") else "abs" + return ReadInstruction( + split_name=res.group("split"), + rounding=res.group("rounding"), + from_=int(res.group("from")) if res.group("from") else None, + to=int(res.group("to")) if res.group("to") else None, + unit=unit, + ) + + +def _pct_to_abs_pct1(boundary, num_examples): + # Using math.trunc here, since -99.5% should give -99%, not -100%. + if num_examples < 100: + msg = ( + 'Using "pct1_dropremainder" rounding on a split with less than 100 ' + "elements is forbidden: it always results in an empty dataset." + ) + raise ValueError(msg) + return boundary * math.trunc(num_examples / 100.0) + + +def _pct_to_abs_closest(boundary, num_examples): + return int(round(boundary * num_examples / 100.0)) + + +def _rel_to_abs_instr(rel_instr, name2len): + """Returns _AbsoluteInstruction instance for given RelativeInstruction. + + Args: + rel_instr: RelativeInstruction instance. + name2len: dict {split_name: num_examples}. + """ + pct_to_abs = _pct_to_abs_closest if rel_instr.rounding == "closest" else _pct_to_abs_pct1 + split = rel_instr.splitname + if split not in name2len: + raise ValueError(f'Unknown split "{split}". Should be one of {list(name2len)}.') + num_examples = name2len[split] + from_ = rel_instr.from_ + to = rel_instr.to + if rel_instr.unit == "%": + from_ = 0 if from_ is None else pct_to_abs(from_, num_examples) + to = num_examples if to is None else pct_to_abs(to, num_examples) + else: + from_ = 0 if from_ is None else from_ + to = num_examples if to is None else to + if abs(from_) > num_examples or abs(to) > num_examples: + msg = f'Requested slice [{from_ or ""}:{to or ""}] incompatible with {num_examples} examples.' + raise ValueError(msg) + if from_ < 0: + from_ = num_examples + from_ + elif from_ == 0: + from_ = None + if to < 0: + to = num_examples + to + elif to == num_examples: + to = None + return _AbsoluteInstruction(split, from_, to) + + +class ReadInstruction: + """Reading instruction for a dataset. + + Examples:: + + # The following lines are equivalent: + ds = datasets.load_dataset('mnist', split='test[:33%]') + ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec('test[:33%]')) + ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction('test', to=33, unit='%')) + ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction( + 'test', from_=0, to=33, unit='%')) + + # The following lines are equivalent: + ds = datasets.load_dataset('mnist', split='test[:33%]+train[1:-1]') + ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec( + 'test[:33%]+train[1:-1]')) + ds = datasets.load_dataset('mnist', split=( + datasets.ReadInstruction('test', to=33, unit='%') + + datasets.ReadInstruction('train', from_=1, to=-1, unit='abs'))) + + # The following lines are equivalent: + ds = datasets.load_dataset('mnist', split='test[:33%](pct1_dropremainder)') + ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec( + 'test[:33%](pct1_dropremainder)')) + ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction( + 'test', from_=0, to=33, unit='%', rounding="pct1_dropremainder")) + + # 10-fold validation: + tests = datasets.load_dataset( + 'mnist', + [datasets.ReadInstruction('train', from_=k, to=k+10, unit='%') + for k in range(0, 100, 10)]) + trains = datasets.load_dataset( + 'mnist', + [datasets.ReadInstruction('train', to=k, unit='%') + datasets.ReadInstruction('train', from_=k+10, unit='%') + for k in range(0, 100, 10)]) + + """ + + def _init(self, relative_instructions): + # Private initializer. + self._relative_instructions = relative_instructions + + @classmethod + def _read_instruction_from_relative_instructions(cls, relative_instructions): + """Returns ReadInstruction obj initialized with relative_instructions.""" + # Use __new__ to bypass __init__ used by public API and not conveniant here. + result = cls.__new__(cls) + result._init(relative_instructions) # pylint: disable=protected-access + return result + + def __init__(self, split_name, rounding=None, from_=None, to=None, unit=None): + """Initialize ReadInstruction. + + Args: + split_name (str): name of the split to read. Eg: 'train'. + rounding (str, optional): The rounding behaviour to use when percent slicing is + used. Ignored when slicing with absolute indices. + Possible values: + - 'closest' (default): The specified percentages are rounded to the + closest value. Use this if you want specified percents to be as + much exact as possible. + - 'pct1_dropremainder': the specified percentages are treated as + multiple of 1%. Use this option if you want consistency. Eg: + len(5%) == 5 * len(1%). + Using this option, one might not be able to use the full set of + examples, if the number of those is not a multiple of 100. + from_ (int): + to (int): alternative way of specifying slicing boundaries. If any of + {from_, to, unit} argument is used, slicing cannot be specified as + string. + unit (str): optional, one of: + '%': to set the slicing unit as percents of the split size. + 'abs': to set the slicing unit as absolute numbers. + """ + # This constructor is not always called. See factory method + # `_read_instruction_from_relative_instructions`. Common init instructions + # MUST be placed in the _init method. + self._init([_RelativeInstruction(split_name, from_, to, unit, rounding)]) + + @classmethod + def from_spec(cls, spec): + """Creates a `ReadInstruction` instance out of a string spec. + + Args: + spec (`str`): + Split(s) + optional slice(s) to read + optional rounding + if percents are used as the slicing unit. A slice can be specified, + using absolute numbers (`int`) or percentages (`int`). + + Examples: + + ``` + test: test split. + test + validation: test split + validation split. + test[10:]: test split, minus its first 10 records. + test[:10%]: first 10% records of test split. + test[:20%](pct1_dropremainder): first 10% records, rounded with the pct1_dropremainder rounding. + test[:-5%]+train[40%:60%]: first 95% of test + middle 20% of train. + ``` + + Returns: + ReadInstruction instance. + """ + spec = str(spec) # Need to convert to str in case of NamedSplit instance. + subs = _ADDITION_SEP_RE.split(spec) + if not subs: + raise ValueError(f"No instructions could be built out of {spec}") + instruction = _str_to_read_instruction(subs[0]) + return sum((_str_to_read_instruction(sub) for sub in subs[1:]), instruction) + + def to_spec(self): + rel_instr_specs = [] + for rel_instr in self._relative_instructions: + rel_instr_spec = rel_instr.splitname + if rel_instr.from_ is not None or rel_instr.to is not None: + from_ = rel_instr.from_ + to = rel_instr.to + unit = rel_instr.unit + rounding = rel_instr.rounding + unit = unit if unit == "%" else "" + from_ = str(from_) + unit if from_ is not None else "" + to = str(to) + unit if to is not None else "" + slice_str = f"[{from_}:{to}]" + rounding_str = ( + f"({rounding})" if unit == "%" and rounding is not None and rounding != "closest" else "" + ) + rel_instr_spec += slice_str + rounding_str + rel_instr_specs.append(rel_instr_spec) + return "+".join(rel_instr_specs) + + def __add__(self, other): + """Returns a new ReadInstruction obj, result of appending other to self.""" + if not isinstance(other, ReadInstruction): + msg = "ReadInstruction can only be added to another ReadInstruction obj." + raise TypeError(msg) + self_ris = self._relative_instructions + other_ris = other._relative_instructions # pylint: disable=protected-access + if ( + self_ris[0].unit != "abs" + and other_ris[0].unit != "abs" + and self._relative_instructions[0].rounding != other_ris[0].rounding + ): + raise ValueError("It is forbidden to sum ReadInstruction instances with different rounding values.") + return self._read_instruction_from_relative_instructions(self_ris + other_ris) + + def __str__(self): + return self.to_spec() + + def __repr__(self): + return f"ReadInstruction({self._relative_instructions})" + + def to_absolute(self, name2len): + """Translate instruction into a list of absolute instructions. + + Those absolute instructions are then to be added together. + + Args: + name2len (`dict`): + Associating split names to number of examples. + + Returns: + list of _AbsoluteInstruction instances (corresponds to the + in spec). + """ + return [_rel_to_abs_instr(rel_instr, name2len) for rel_instr in self._relative_instructions] diff --git a/testbed/huggingface__datasets/src/datasets/arrow_writer.py b/testbed/huggingface__datasets/src/datasets/arrow_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..5d6d8141f6d313f28139d3f0a1017d578fdd549c --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/arrow_writer.py @@ -0,0 +1,744 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""To write records into Parquet files.""" +import errno +import json +import os +import sys +from pathlib import Path +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union + +import fsspec +import numpy as np +import pyarrow as pa +import pyarrow.parquet as pq + +from . import config +from .features import Features, Image, Value +from .features.features import ( + FeatureType, + _ArrayXDExtensionType, + cast_to_python_objects, + generate_from_arrow_type, + get_nested_type, + list_of_np_array_to_pyarrow_listarray, + numpy_to_pyarrow_listarray, + to_pyarrow_listarray, +) +from .filesystems import is_remote_filesystem +from .info import DatasetInfo +from .keyhash import DuplicatedKeysError, KeyHasher +from .table import array_cast, array_concat, cast_array_to_feature, embed_table_storage, table_cast +from .utils import logging +from .utils import tqdm as hf_tqdm +from .utils.file_utils import hash_url_to_filename +from .utils.py_utils import asdict, first_non_null_value + + +logger = logging.get_logger(__name__) + +type_ = type # keep python's type function + + +class SchemaInferenceError(ValueError): + pass + + +class TypedSequence: + """ + This data container generalizes the typing when instantiating pyarrow arrays, tables or batches. + + More specifically it adds several features: + - Support extension types like ``datasets.features.Array2DExtensionType``: + By default pyarrow arrays don't return extension arrays. One has to call + ``pa.ExtensionArray.from_storage(type, pa.array(data, type.storage_type))`` + in order to get an extension array. + - Support for ``try_type`` parameter that can be used instead of ``type``: + When an array is transformed, we like to keep the same type as before if possible. + For example when calling :func:`datasets.Dataset.map`, we don't want to change the type + of each column by default. + - Better error message when a pyarrow array overflows. + + Example:: + + from datasets.features import Array2D, Array2DExtensionType, Value + from datasets.arrow_writer import TypedSequence + import pyarrow as pa + + arr = pa.array(TypedSequence([1, 2, 3], type=Value("int32"))) + assert arr.type == pa.int32() + + arr = pa.array(TypedSequence([1, 2, 3], try_type=Value("int32"))) + assert arr.type == pa.int32() + + arr = pa.array(TypedSequence(["foo", "bar"], try_type=Value("int32"))) + assert arr.type == pa.string() + + arr = pa.array(TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64"))) + assert arr.type == Array2DExtensionType((1, 3), "int64") + + table = pa.Table.from_pydict({ + "image": TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64")) + }) + assert table["image"].type == Array2DExtensionType((1, 3), "int64") + + """ + + def __init__( + self, + data: Iterable, + type: Optional[FeatureType] = None, + try_type: Optional[FeatureType] = None, + optimized_int_type: Optional[FeatureType] = None, + ): + # assert type is None or try_type is None, + if type is not None and try_type is not None: + raise ValueError("You cannot specify both type and try_type") + # set attributes + self.data = data + self.type = type + self.try_type = try_type # is ignored if it doesn't match the data + self.optimized_int_type = optimized_int_type + # when trying a type (is ignored if data is not compatible) + self.trying_type = self.try_type is not None + self.trying_int_optimization = optimized_int_type is not None and type is None and try_type is None + # used to get back the inferred type after __arrow_array__() is called once + self._inferred_type = None + + def get_inferred_type(self) -> FeatureType: + """Return the inferred feature type. + This is done by converting the sequence to an Arrow array, and getting the corresponding + feature type. + + Since building the Arrow array can be expensive, the value of the inferred type is cached + as soon as pa.array is called on the typed sequence. + + Returns: + FeatureType: inferred feature type of the sequence. + """ + if self._inferred_type is None: + self._inferred_type = generate_from_arrow_type(pa.array(self).type) + return self._inferred_type + + @staticmethod + def _infer_custom_type_and_encode(data: Iterable) -> Tuple[Iterable, Optional[FeatureType]]: + """Implement type inference for custom objects like PIL.Image.Image -> Image type. + + This function is only used for custom python objects that can't be direclty passed to build + an Arrow array. In such cases is infers the feature type to use, and it encodes the data so + that they can be passed to an Arrow array. + + Args: + data (Iterable): array of data to infer the type, e.g. a list of PIL images. + + Returns: + Tuple[Iterable, Optional[FeatureType]]: a tuple with: + - the (possibly encoded) array, if the inferred feature type requires encoding + - the inferred feature type if the array is made of supported custom objects like + PIL images, else None. + """ + if config.PIL_AVAILABLE and "PIL" in sys.modules: + import PIL.Image + + non_null_idx, non_null_value = first_non_null_value(data) + if isinstance(non_null_value, PIL.Image.Image): + return [Image().encode_example(value) if value is not None else None for value in data], Image() + return data, None + + def __arrow_array__(self, type: Optional[pa.DataType] = None): + """This function is called when calling pa.array(typed_sequence)""" + + if type is not None: + raise ValueError("TypedSequence is supposed to be used with pa.array(typed_sequence, type=None)") + del type # make sure we don't use it + data = self.data + # automatic type inference for custom objects + if self.type is None and self.try_type is None: + data, self._inferred_type = self._infer_custom_type_and_encode(data) + if self._inferred_type is None: + type = self.try_type if self.trying_type else self.type + else: + type = self._inferred_type + pa_type = get_nested_type(type) if type is not None else None + optimized_int_pa_type = ( + get_nested_type(self.optimized_int_type) if self.optimized_int_type is not None else None + ) + trying_cast_to_python_objects = False + try: + # custom pyarrow types + if isinstance(pa_type, _ArrayXDExtensionType): + storage = to_pyarrow_listarray(data, pa_type) + return pa.ExtensionArray.from_storage(pa_type, storage) + + # efficient np array to pyarrow array + if isinstance(data, np.ndarray): + out = numpy_to_pyarrow_listarray(data) + elif isinstance(data, list) and data and isinstance(first_non_null_value(data)[1], np.ndarray): + out = list_of_np_array_to_pyarrow_listarray(data) + else: + trying_cast_to_python_objects = True + out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True)) + # use smaller integer precisions if possible + if self.trying_int_optimization: + if pa.types.is_int64(out.type): + out = out.cast(optimized_int_pa_type) + elif pa.types.is_list(out.type): + if pa.types.is_int64(out.type.value_type): + out = array_cast(out, pa.list_(optimized_int_pa_type)) + elif pa.types.is_list(out.type.value_type) and pa.types.is_int64(out.type.value_type.value_type): + out = array_cast(out, pa.list_(pa.list_(optimized_int_pa_type))) + # otherwise we can finally use the user's type + elif type is not None: + # We use cast_array_to_feature to support casting to custom types like Audio and Image + # Also, when trying type "string", we don't want to convert integers or floats to "string". + # We only do it if trying_type is False - since this is what the user asks for. + out = cast_array_to_feature(out, type, allow_number_to_str=not self.trying_type) + return out + except ( + TypeError, + pa.lib.ArrowInvalid, + pa.lib.ArrowNotImplementedError, + ) as e: # handle type errors and overflows + # Ignore ArrowNotImplementedError caused by trying type, otherwise re-raise + if not self.trying_type and isinstance(e, pa.lib.ArrowNotImplementedError): + raise + + if self.trying_type: + try: # second chance + if isinstance(data, np.ndarray): + return numpy_to_pyarrow_listarray(data) + elif isinstance(data, list) and data and any(isinstance(value, np.ndarray) for value in data): + return list_of_np_array_to_pyarrow_listarray(data) + else: + trying_cast_to_python_objects = True + return pa.array(cast_to_python_objects(data, only_1d_for_numpy=True)) + except pa.lib.ArrowInvalid as e: + if "overflow" in str(e): + raise OverflowError( + f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})" + ) from None + elif self.trying_int_optimization and "not in range" in str(e): + optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name + logger.info( + f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64." + ) + return out + elif trying_cast_to_python_objects and "Could not convert" in str(e): + out = pa.array( + cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False) + ) + if type is not None: + out = cast_array_to_feature(out, type, allow_number_to_str=True) + return out + else: + raise + elif "overflow" in str(e): + raise OverflowError( + f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})" + ) from None + elif self.trying_int_optimization and "not in range" in str(e): + optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name + logger.info(f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64.") + return out + elif trying_cast_to_python_objects and "Could not convert" in str(e): + out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False)) + if type is not None: + out = cast_array_to_feature(out, type, allow_number_to_str=True) + return out + else: + raise + + +class OptimizedTypedSequence(TypedSequence): + def __init__( + self, + data, + type: Optional[FeatureType] = None, + try_type: Optional[FeatureType] = None, + col: Optional[str] = None, + optimized_int_type: Optional[FeatureType] = None, + ): + optimized_int_type_by_col = { + "attention_mask": Value("int8"), # binary tensor + "special_tokens_mask": Value("int8"), + "input_ids": Value("int32"), # typical vocab size: 0-50k (max ~500k, never > 1M) + "token_type_ids": Value( + "int8" + ), # binary mask; some (XLNetModel) use an additional token represented by a 2 + } + if type is None and try_type is None: + optimized_int_type = optimized_int_type_by_col.get(col, None) + super().__init__(data, type=type, try_type=try_type, optimized_int_type=optimized_int_type) + + +class ArrowWriter: + """Shuffles and writes Examples to Arrow files.""" + + _WRITER_CLASS = pa.RecordBatchStreamWriter + + def __init__( + self, + schema: Optional[pa.Schema] = None, + features: Optional[Features] = None, + path: Optional[str] = None, + stream: Optional[pa.NativeFile] = None, + fingerprint: Optional[str] = None, + writer_batch_size: Optional[int] = None, + hash_salt: Optional[str] = None, + check_duplicates: Optional[bool] = False, + disable_nullable: bool = False, + update_features: bool = False, + with_metadata: bool = True, + unit: str = "examples", + embed_local_files: bool = False, + storage_options: Optional[dict] = None, + ): + if path is None and stream is None: + raise ValueError("At least one of path and stream must be provided.") + if features is not None: + self._features = features + self._schema = None + elif schema is not None: + self._schema: pa.Schema = schema + self._features = Features.from_arrow_schema(self._schema) + else: + self._features = None + self._schema = None + + if hash_salt is not None: + # Create KeyHasher instance using split name as hash salt + self._hasher = KeyHasher(hash_salt) + else: + self._hasher = KeyHasher("") + + self._check_duplicates = check_duplicates + self._disable_nullable = disable_nullable + + if stream is None: + fs_token_paths = fsspec.get_fs_token_paths(path, storage_options=storage_options) + self._fs: fsspec.AbstractFileSystem = fs_token_paths[0] + self._path = ( + fs_token_paths[2][0] + if not is_remote_filesystem(self._fs) + else self._fs.unstrip_protocol(fs_token_paths[2][0]) + ) + self.stream = self._fs.open(fs_token_paths[2][0], "wb") + self._closable_stream = True + else: + self._fs = None + self._path = None + self.stream = stream + self._closable_stream = False + + self.fingerprint = fingerprint + self.disable_nullable = disable_nullable + self.writer_batch_size = writer_batch_size or config.DEFAULT_MAX_BATCH_SIZE + self.update_features = update_features + self.with_metadata = with_metadata + self.unit = unit + self.embed_local_files = embed_local_files + + self._num_examples = 0 + self._num_bytes = 0 + self.current_examples: List[Tuple[Dict[str, Any], str]] = [] + self.current_rows: List[pa.Table] = [] + self.pa_writer: Optional[pa.RecordBatchStreamWriter] = None + self.hkey_record = [] + + def __len__(self): + """Return the number of writed and staged examples""" + return self._num_examples + len(self.current_examples) + len(self.current_rows) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def close(self): + # Try closing if opened; if closed: pyarrow.lib.ArrowInvalid: Invalid operation on closed file + if self.pa_writer: # it might be None + try: + self.pa_writer.close() + except Exception: # pyarrow.lib.ArrowInvalid, OSError + pass + if self._closable_stream and not self.stream.closed: + self.stream.close() # This also closes self.pa_writer if it is opened + + def _build_writer(self, inferred_schema: pa.Schema): + schema = self.schema + inferred_features = Features.from_arrow_schema(inferred_schema) + if self._features is not None: + if self.update_features: # keep original features it they match, or update them + fields = {field.name: field for field in self._features.type} + for inferred_field in inferred_features.type: + name = inferred_field.name + if name in fields: + if inferred_field == fields[name]: + inferred_features[name] = self._features[name] + self._features = inferred_features + schema: pa.Schema = inferred_schema + else: + self._features = inferred_features + schema: pa.Schema = inferred_features.arrow_schema + if self.disable_nullable: + schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in schema) + if self.with_metadata: + schema = schema.with_metadata(self._build_metadata(DatasetInfo(features=self._features), self.fingerprint)) + else: + schema = schema.with_metadata({}) + self._schema = schema + self.pa_writer = self._WRITER_CLASS(self.stream, schema) + + @property + def schema(self): + _schema = ( + self._schema + if self._schema is not None + else (pa.schema(self._features.type) if self._features is not None else None) + ) + if self._disable_nullable and _schema is not None: + _schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in _schema) + return _schema if _schema is not None else [] + + @staticmethod + def _build_metadata(info: DatasetInfo, fingerprint: Optional[str] = None) -> Dict[str, str]: + info_keys = ["features"] # we can add support for more DatasetInfo keys in the future + info_as_dict = asdict(info) + metadata = {} + metadata["info"] = {key: info_as_dict[key] for key in info_keys} + if fingerprint is not None: + metadata["fingerprint"] = fingerprint + return {"huggingface": json.dumps(metadata)} + + def write_examples_on_file(self): + """Write stored examples from the write-pool of examples. It makes a table out of the examples and write it.""" + if not self.current_examples: + return + + # order the columns properly + cols = ( + [col for col in self.schema.names if col in self.current_examples[0][0]] + + [col for col in self.current_examples[0][0].keys() if col not in self.schema.names] + if self.schema + else self.current_examples[0][0].keys() + ) + batch_examples = {} + for col in cols: + # We use row[0][col] since current_examples contains (example, key) tuples. + # Morever, examples could be Arrow arrays of 1 element. + # This can happen in `.map()` when we want to re-write the same Arrow data + if all(isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) for row in self.current_examples): + arrays = [row[0][col] for row in self.current_examples] + batch_examples[col] = array_concat(arrays) + else: + batch_examples[col] = [ + row[0][col].to_pylist()[0] if isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) else row[0][col] + for row in self.current_examples + ] + self.write_batch(batch_examples=batch_examples) + self.current_examples = [] + + def write_rows_on_file(self): + """Write stored rows from the write-pool of rows. It concatenates the single-row tables and it writes the resulting table.""" + if not self.current_rows: + return + table = pa.concat_tables(self.current_rows) + self.write_table(table) + self.current_rows = [] + + def write( + self, + example: Dict[str, Any], + key: Optional[Union[str, int, bytes]] = None, + writer_batch_size: Optional[int] = None, + ): + """Add a given (Example,Key) pair to the write-pool of examples which is written to file. + + Args: + example: the Example to add. + key: Optional, a unique identifier(str, int or bytes) associated with each example + """ + # Utilize the keys and duplicate checking when `self._check_duplicates` is passed True + if self._check_duplicates: + # Create unique hash from key and store as (key, example) pairs + hash = self._hasher.hash(key) + self.current_examples.append((example, hash)) + # Maintain record of keys and their respective hashes for checking duplicates + self.hkey_record.append((hash, key)) + else: + # Store example as a tuple so as to keep the structure of `self.current_examples` uniform + self.current_examples.append((example, "")) + + if writer_batch_size is None: + writer_batch_size = self.writer_batch_size + if writer_batch_size is not None and len(self.current_examples) >= writer_batch_size: + if self._check_duplicates: + self.check_duplicate_keys() + # Re-intializing to empty list for next batch + self.hkey_record = [] + + self.write_examples_on_file() + + def check_duplicate_keys(self): + """Raises error if duplicates found in a batch""" + tmp_record = set() + for hash, key in self.hkey_record: + if hash in tmp_record: + duplicate_key_indices = [ + str(self._num_examples + index) + for index, (duplicate_hash, _) in enumerate(self.hkey_record) + if duplicate_hash == hash + ] + + raise DuplicatedKeysError(key, duplicate_key_indices) + else: + tmp_record.add(hash) + + def write_row(self, row: pa.Table, writer_batch_size: Optional[int] = None): + """Add a given single-row Table to the write-pool of rows which is written to file. + + Args: + row: the row to add. + """ + if len(row) != 1: + raise ValueError(f"Only single-row pyarrow tables are allowed but got table with {len(row)} rows.") + self.current_rows.append(row) + if writer_batch_size is None: + writer_batch_size = self.writer_batch_size + if writer_batch_size is not None and len(self.current_rows) >= writer_batch_size: + self.write_rows_on_file() + + def write_batch( + self, + batch_examples: Dict[str, List], + writer_batch_size: Optional[int] = None, + ): + """Write a batch of Example to file. + Ignores the batch if it appears to be empty, + preventing a potential schema update of unknown types. + + Args: + batch_examples: the batch of examples to add. + """ + if batch_examples and len(next(iter(batch_examples.values()))) == 0: + return + features = None if self.pa_writer is None and self.update_features else self._features + try_features = self._features if self.pa_writer is None and self.update_features else None + arrays = [] + inferred_features = Features() + cols = ( + [col for col in self.schema.names if col in batch_examples] + + [col for col in batch_examples.keys() if col not in self.schema.names] + if self.schema + else batch_examples.keys() + ) + for col in cols: + col_values = batch_examples[col] + col_type = features[col] if features else None + if isinstance(col_values, (pa.Array, pa.ChunkedArray)): + array = cast_array_to_feature(col_values, col_type) if col_type is not None else col_values + arrays.append(array) + inferred_features[col] = generate_from_arrow_type(col_values.type) + else: + col_try_type = try_features[col] if try_features is not None and col in try_features else None + typed_sequence = OptimizedTypedSequence(col_values, type=col_type, try_type=col_try_type, col=col) + arrays.append(pa.array(typed_sequence)) + inferred_features[col] = typed_sequence.get_inferred_type() + schema = inferred_features.arrow_schema if self.pa_writer is None else self.schema + pa_table = pa.Table.from_arrays(arrays, schema=schema) + self.write_table(pa_table, writer_batch_size) + + def write_table(self, pa_table: pa.Table, writer_batch_size: Optional[int] = None): + """Write a Table to file. + + Args: + example: the Table to add. + """ + if writer_batch_size is None: + writer_batch_size = self.writer_batch_size + if self.pa_writer is None: + self._build_writer(inferred_schema=pa_table.schema) + pa_table = pa_table.combine_chunks() + pa_table = table_cast(pa_table, self._schema) + if self.embed_local_files: + pa_table = embed_table_storage(pa_table) + self._num_bytes += pa_table.nbytes + self._num_examples += pa_table.num_rows + self.pa_writer.write_table(pa_table, writer_batch_size) + + def finalize(self, close_stream=True): + self.write_rows_on_file() + # In case current_examples < writer_batch_size, but user uses finalize() + if self._check_duplicates: + self.check_duplicate_keys() + # Re-intializing to empty list for next batch + self.hkey_record = [] + self.write_examples_on_file() + # If schema is known, infer features even if no examples were written + if self.pa_writer is None and self.schema: + self._build_writer(self.schema) + if self.pa_writer is not None: + self.pa_writer.close() + self.pa_writer = None + if close_stream: + self.stream.close() + else: + if close_stream: + self.stream.close() + raise SchemaInferenceError("Please pass `features` or at least one example when writing data") + logger.debug( + f"Done writing {self._num_examples} {self.unit} in {self._num_bytes} bytes {self._path if self._path else ''}." + ) + return self._num_examples, self._num_bytes + + +class ParquetWriter(ArrowWriter): + _WRITER_CLASS = pq.ParquetWriter + + +class BeamWriter: + """ + Shuffles and writes Examples to Arrow files. + The Arrow files are converted from Parquet files that are the output of Apache Beam pipelines. + """ + + def __init__( + self, + features: Optional[Features] = None, + schema: Optional[pa.Schema] = None, + path: Optional[str] = None, + namespace: Optional[str] = None, + cache_dir: Optional[str] = None, + ): + if features is None and schema is None: + raise ValueError("At least one of features and schema must be provided.") + if path is None: + raise ValueError("Path must be provided.") + + if features is not None: + self._features: Features = features + self._schema: pa.Schema = features.arrow_schema + else: + self._schema: pa.Schema = schema + self._features: Features = Features.from_arrow_schema(schema) + + self._path = path + self._parquet_path = os.path.splitext(path)[0] # remove extension + self._namespace = namespace or "default" + self._num_examples = None + self._cache_dir = cache_dir or config.HF_DATASETS_CACHE + + def write_from_pcollection(self, pcoll_examples): + """Add the final steps of the beam pipeline: write to parquet files.""" + import apache_beam as beam + + def inc_num_examples(example): + beam.metrics.Metrics.counter(self._namespace, "num_examples").inc() + + # count examples + _ = pcoll_examples | "Count N. Examples" >> beam.Map(inc_num_examples) + + # save dataset + return ( + pcoll_examples + | "Get values" >> beam.Values() + | "Save to parquet" + >> beam.io.parquetio.WriteToParquet( + self._parquet_path, self._schema, shard_name_template="-SSSSS-of-NNNNN.parquet" + ) + ) + + def finalize(self, metrics_query_result: dict): + """ + Run after the pipeline has finished. + It converts the resulting parquet files to arrow and it completes the info from the pipeline metrics. + + Args: + metrics_query_result: `dict` obtained from pipeline_results.metrics().query(m_filter). Make sure + that the filter keeps only the metrics for the considered split, under the namespace `split_name`. + """ + import apache_beam as beam + + from .utils import beam_utils + + # Beam FileSystems require the system's path separator in the older versions + fs, _, [parquet_path] = fsspec.get_fs_token_paths(self._parquet_path) + parquet_path = str(Path(parquet_path)) if not is_remote_filesystem(fs) else fs.unstrip_protocol(parquet_path) + + shards_metadata = list(beam.io.filesystems.FileSystems.match([parquet_path + "*.parquet"])[0].metadata_list) + shards = [metadata.path for metadata in shards_metadata] + num_bytes = sum([metadata.size_in_bytes for metadata in shards_metadata]) + shard_lengths = get_parquet_lengths(shards) + + # Convert to arrow + if self._path.endswith(".arrow"): + logger.info(f"Converting parquet files {self._parquet_path} to arrow {self._path}") + shards = [ + metadata.path + for metadata in beam.io.filesystems.FileSystems.match([parquet_path + "*.parquet"])[0].metadata_list + ] + try: # stream conversion + num_bytes = 0 + for shard in hf_tqdm(shards, unit="shards"): + with beam.io.filesystems.FileSystems.open(shard) as source: + with beam.io.filesystems.FileSystems.create( + shard.replace(".parquet", ".arrow") + ) as destination: + shard_num_bytes, _ = parquet_to_arrow(source, destination) + num_bytes += shard_num_bytes + except OSError as e: # broken pipe can happen if the connection is unstable, do local conversion instead + if e.errno != errno.EPIPE: # not a broken pipe + raise + logger.warning( + "Broken Pipe during stream conversion from parquet to arrow. Using local convert instead" + ) + local_convert_dir = os.path.join(self._cache_dir, "beam_convert") + os.makedirs(local_convert_dir, exist_ok=True) + num_bytes = 0 + for shard in hf_tqdm(shards, unit="shards"): + local_parquet_path = os.path.join(local_convert_dir, hash_url_to_filename(shard) + ".parquet") + beam_utils.download_remote_to_local(shard, local_parquet_path) + local_arrow_path = local_parquet_path.replace(".parquet", ".arrow") + shard_num_bytes, _ = parquet_to_arrow(local_parquet_path, local_arrow_path) + num_bytes += shard_num_bytes + remote_arrow_path = shard.replace(".parquet", ".arrow") + beam_utils.upload_local_to_remote(local_arrow_path, remote_arrow_path) + + # Save metrics + counters_dict = {metric.key.metric.name: metric.result for metric in metrics_query_result["counters"]} + self._num_examples = counters_dict["num_examples"] + self._num_bytes = num_bytes + self._shard_lengths = shard_lengths + return self._num_examples, self._num_bytes + + +def get_parquet_lengths(sources) -> List[int]: + shard_lengths = [] + for source in hf_tqdm(sources, unit="parquet files"): + parquet_file = pa.parquet.ParquetFile(source) + shard_lengths.append(parquet_file.metadata.num_rows) + return shard_lengths + + +def parquet_to_arrow(source, destination) -> List[int]: + """Convert parquet file to arrow file. Inputs can be str paths or file-like objects""" + stream = None if isinstance(destination, str) else destination + with ArrowWriter(path=destination, stream=stream) as writer: + parquet_file = pa.parquet.ParquetFile(source) + for record_batch in parquet_file.iter_batches(): + pa_table = pa.Table.from_batches([record_batch]) + writer.write_table(pa_table) + num_bytes, num_examples = writer.finalize() + return num_bytes, num_examples diff --git a/testbed/huggingface__datasets/src/datasets/builder.py b/testbed/huggingface__datasets/src/datasets/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..a078cb4c2c878cf1930634f1204ad7d0018515a9 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/builder.py @@ -0,0 +1,2203 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""DatasetBuilder base class.""" + +import abc +import contextlib +import copy +import inspect +import os +import posixpath +import shutil +import textwrap +import time +import urllib +import warnings +from dataclasses import dataclass +from functools import partial +from pathlib import Path +from typing import Dict, Iterable, Mapping, Optional, Tuple, Union + +import fsspec +import pyarrow as pa +from multiprocess import Pool +from tqdm.contrib.concurrent import thread_map + +from . import config, utils +from .arrow_dataset import Dataset +from .arrow_reader import ( + HF_GCP_BASE_URL, + ArrowReader, + DatasetNotOnHfGcsError, + MissingFilesOnHfGcsError, + ReadInstruction, +) +from .arrow_writer import ArrowWriter, BeamWriter, ParquetWriter, SchemaInferenceError +from .data_files import DataFilesDict, sanitize_patterns +from .dataset_dict import DatasetDict, IterableDatasetDict +from .download.download_config import DownloadConfig +from .download.download_manager import DownloadManager, DownloadMode +from .download.mock_download_manager import MockDownloadManager +from .download.streaming_download_manager import StreamingDownloadManager, xopen +from .features import Features +from .filesystems import ( + is_remote_filesystem, + rename, +) +from .fingerprint import Hasher +from .info import DatasetInfo, DatasetInfosDict, PostProcessedInfo +from .iterable_dataset import ArrowExamplesIterable, ExamplesIterable, IterableDataset +from .keyhash import DuplicatedKeysError +from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH, camelcase_to_snakecase +from .splits import Split, SplitDict, SplitGenerator, SplitInfo +from .streaming import extend_dataset_builder_for_streaming +from .utils import logging +from .utils import tqdm as hf_tqdm +from .utils._filelock import FileLock +from .utils.file_utils import cached_path, is_remote_url +from .utils.info_utils import VerificationMode, get_size_checksum_dict, verify_checksums, verify_splits +from .utils.py_utils import ( + classproperty, + convert_file_size_to_int, + has_sufficient_disk_space, + iflatmap_unordered, + map_nested, + memoize, + size_str, + temporary_assignment, +) +from .utils.sharding import _number_of_shards_in_gen_kwargs, _split_gen_kwargs + + +logger = logging.get_logger(__name__) + + +class InvalidConfigName(ValueError): + pass + + +class DatasetBuildError(Exception): + pass + + +class ManualDownloadError(DatasetBuildError): + pass + + +class DatasetGenerationError(DatasetBuildError): + pass + + +class FileFormatError(DatasetBuildError): + pass + + +@dataclass +class BuilderConfig: + """Base class for `DatasetBuilder` data configuration. + + `DatasetBuilder` subclasses with data configuration options should subclass + `BuilderConfig` and add their own properties. + + Attributes: + name (`str`, defaults to `default`): + The name of the configuration. + version (`Version` or `str`, defaults to `0.0.0`): + The version of the configuration. + data_dir (`str`, *optional*): + Path to the directory containing the source data. + data_files (`str` or `Sequence` or `Mapping`, *optional*): + Path(s) to source data file(s). + description (`str`, *optional*): + A human description of the configuration. + """ + + name: str = "default" + version: Optional[Union[utils.Version, str]] = utils.Version("0.0.0") + data_dir: Optional[str] = None + data_files: Optional[DataFilesDict] = None + description: Optional[str] = None + + def __post_init__(self): + # The config name is used to name the cache directory. + for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH: + if invalid_char in self.name: + raise InvalidConfigName( + f"Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{self.name}'. " + f"They could create issues when creating a directory for this config on Windows filesystem." + ) + if self.data_files is not None and not isinstance(self.data_files, DataFilesDict): + raise ValueError(f"Expected a DataFilesDict in data_files but got {self.data_files}") + + def __eq__(self, o): + # we need to override the default dataclass __eq__ since it doesn't check for + # other attributes that the ones of the signature. + if set(self.__dict__.keys()) != set(o.__dict__.keys()): + return False + return all((k, getattr(self, k)) == (k, getattr(o, k)) for k in self.__dict__.keys()) + + def create_config_id( + self, + config_kwargs: dict, + custom_features: Optional[Features] = None, + ) -> str: + """ + The config id is used to build the cache directory. + By default it is equal to the config name. + However the name of a config is not sufficient to have a unique identifier for the dataset being generated + since it doesn't take into account: + - the config kwargs that can be used to overwrite attributes + - the custom features used to write the dataset + - the data_files for json/text/csv/pandas datasets + + Therefore the config id is just the config name with an optional suffix based on these. + """ + # Possibly add a suffix to the name to handle custom features/data_files/config_kwargs + suffix: Optional[str] = None + config_kwargs_to_add_to_suffix = config_kwargs.copy() + # name and version are already used to build the cache directory + config_kwargs_to_add_to_suffix.pop("name", None) + config_kwargs_to_add_to_suffix.pop("version", None) + # data dir handling (when specified it points to the manually downloaded data): + # it was previously ignored before the introduction of config id because we didn't want + # to change the config name. Now it's fine to take it into account for the config id. + # config_kwargs_to_add_to_suffix.pop("data_dir", None) + if "data_dir" in config_kwargs_to_add_to_suffix: + if config_kwargs_to_add_to_suffix["data_dir"] is None: + config_kwargs_to_add_to_suffix.pop("data_dir", None) + else: + # canonicalize the data dir to avoid two paths to the same location having different + # hashes + data_dir = config_kwargs_to_add_to_suffix["data_dir"] + data_dir = os.path.normpath(data_dir) + config_kwargs_to_add_to_suffix["data_dir"] = data_dir + if config_kwargs_to_add_to_suffix: + # we don't care about the order of the kwargs + config_kwargs_to_add_to_suffix = { + k: config_kwargs_to_add_to_suffix[k] for k in sorted(config_kwargs_to_add_to_suffix) + } + if all(isinstance(v, (str, bool, int, float)) for v in config_kwargs_to_add_to_suffix.values()): + suffix = ",".join( + str(k) + "=" + urllib.parse.quote_plus(str(v)) for k, v in config_kwargs_to_add_to_suffix.items() + ) + if len(suffix) > 32: # hash if too long + suffix = Hasher.hash(config_kwargs_to_add_to_suffix) + else: + suffix = Hasher.hash(config_kwargs_to_add_to_suffix) + + if custom_features is not None: + m = Hasher() + if suffix: + m.update(suffix) + m.update(custom_features) + suffix = m.hexdigest() + + if suffix: + config_id = self.name + "-" + suffix + if len(config_id) > config.MAX_DATASET_CONFIG_ID_READABLE_LENGTH: + config_id = self.name + "-" + Hasher.hash(suffix) + return config_id + else: + return self.name + + +class DatasetBuilder: + """Abstract base class for all datasets. + + `DatasetBuilder` has 3 key methods: + + - [`DatasetBuilder.info`]: Documents the dataset, including feature + names, types, shapes, version, splits, citation, etc. + - [`DatasetBuilder.download_and_prepare`]: Downloads the source data + and writes it to disk. + - [`DatasetBuilder.as_dataset`]: Generates a [`Dataset`]. + + Some `DatasetBuilder`s expose multiple variants of the + dataset by defining a [`BuilderConfig`] subclass and accepting a + config object (or name) on construction. Configurable datasets expose a + pre-defined set of configurations in [`DatasetBuilder.builder_configs`]. + + Args: + cache_dir (`str`, *optional*): + Directory to cache data. Defaults to `"~/.cache/huggingface/datasets"`. + dataset_name (`str`, *optional*): + Name of the dataset, if different from the builder name. Useful for packaged builders + like csv, imagefolder, audiofolder, etc. to reflect the difference between datasets + that use the same packaged builder. + config_name (`str`, *optional*): + Name of the dataset configuration. + It affects the data generated on disk. Different configurations will have their own subdirectories and + versions. + If not provided, the default configuration is used (if it exists). + + + + Parameter `name` was renamed to `config_name`. + + + hash (`str`, *optional*): + Hash specific to the dataset code. Used to update the caching directory when the + dataset loading script code is updated (to avoid reusing old data). + The typical caching directory (defined in `self._relative_data_dir`) is `name/version/hash/`. + base_path (`str`, *optional*): + Base path for relative paths that are used to download files. + This can be a remote URL. + features ([`Features`], *optional*): + Features types to use with this dataset. + It can be used to change the [`Features`] types of a dataset, for example. + token (`str` or `bool`, *optional*): + String or boolean to use as Bearer token for remote files on the + Datasets Hub. If `True`, will get token from `"~/.huggingface"`. + repo_id (`str`, *optional*): + ID of the dataset repository. + Used to distinguish builders with the same name but not coming from the same namespace, for example "squad" + and "lhoestq/squad" repo IDs. In the latter, the builder name would be "lhoestq___squad". + data_files (`str` or `Sequence` or `Mapping`, *optional*): + Path(s) to source data file(s). + For builders like "csv" or "json" that need the user to specify data files. They can be either + local or remote files. For convenience, you can use a `DataFilesDict`. + data_dir (`str`, *optional*): + Path to directory containing source data file(s). + Use only if `data_files` is not passed, in which case it is equivalent to passing + `os.path.join(data_dir, "**")` as `data_files`. + For builders that require manual download, it must be the path to the local directory containing the + manually downloaded data. + storage_options (`dict`, *optional*): + Key/value pairs to be passed on to the dataset file-system backend, if any. + writer_batch_size (`int`, *optional*): + Batch size used by the ArrowWriter. + It defines the number of samples that are kept in memory before writing them + and also the length of the arrow chunks. + None means that the ArrowWriter will use its default value. + name (`str`): Configuration name for the dataset. + + + + Use `config_name` instead. + + + + **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the corresponding builder + configuration class, set on the class attribute [`DatasetBuilder.BUILDER_CONFIG_CLASS`]. The builder + configuration class is [`BuilderConfig`] or a subclass of it. + """ + + # Default version + VERSION = None # Default version set in BuilderConfig + + # Class for the builder config. + BUILDER_CONFIG_CLASS = BuilderConfig + + # Named configurations that modify the data generated by download_and_prepare. + BUILDER_CONFIGS = [] + + # Optional default config name to be used when name is None + DEFAULT_CONFIG_NAME = None + + # Default batch size used by the ArrowWriter + # It defines the number of samples that are kept in memory before writing them + # and also the length of the arrow chunks + # None means that the ArrowWriter will use its default value + DEFAULT_WRITER_BATCH_SIZE = None + + def __init__( + self, + cache_dir: Optional[str] = None, + dataset_name: Optional[str] = None, + config_name: Optional[str] = None, + hash: Optional[str] = None, + base_path: Optional[str] = None, + info: Optional[DatasetInfo] = None, + features: Optional[Features] = None, + token: Optional[Union[bool, str]] = None, + use_auth_token="deprecated", + repo_id: Optional[str] = None, + data_files: Optional[Union[str, list, dict, DataFilesDict]] = None, + data_dir: Optional[str] = None, + storage_options: Optional[dict] = None, + writer_batch_size: Optional[int] = None, + name="deprecated", + **config_kwargs, + ): + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'token={use_auth_token}' instead.", + FutureWarning, + ) + token = use_auth_token + if name != "deprecated": + warnings.warn( + "Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.", + category=FutureWarning, + ) + config_name = name + # DatasetBuilder name + self.name: str = camelcase_to_snakecase(self.__module__.split(".")[-1]) + self.hash: Optional[str] = hash + self.base_path = base_path + self.token = token + # For backwards compatibility (e.g. if accessed in a dataset script) + self.use_auth_token = token + self.repo_id = repo_id + self.storage_options = storage_options or {} + self.dataset_name = camelcase_to_snakecase(dataset_name) if dataset_name else self.name + self._writer_batch_size = writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE + + if data_files is not None and not isinstance(data_files, DataFilesDict): + data_files = DataFilesDict.from_patterns( + sanitize_patterns(data_files), + base_path=base_path, + download_config=DownloadConfig(token=token, storage_options=self.storage_options), + ) + + # Prepare config: DatasetConfig contains name, version and description but can be extended by each dataset + if "features" in inspect.signature(self.BUILDER_CONFIG_CLASS.__init__).parameters and features is not None: + config_kwargs["features"] = features + if data_files is not None: + config_kwargs["data_files"] = data_files + if data_dir is not None: + config_kwargs["data_dir"] = data_dir + self.config, self.config_id = self._create_builder_config( + config_name=config_name, + custom_features=features, + **config_kwargs, + ) + + # prepare info: DatasetInfo are a standardized dataclass across all datasets + # Prefill datasetinfo + if info is None: + # TODO FOR PACKAGED MODULES IT IMPORTS DATA FROM src/packaged_modules which doesn't make sense + info = self.get_exported_dataset_info() + info.update(self._info()) + info.builder_name = self.name + info.dataset_name = self.dataset_name + info.config_name = self.config.name + info.version = self.config.version + self.info = info + # update info with user specified infos + if features is not None: + self.info.features = features + + # Prepare data dirs: + # cache_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing) + self._cache_dir_root = str(cache_dir or config.HF_DATASETS_CACHE) + self._cache_dir_root = ( + self._cache_dir_root if is_remote_url(self._cache_dir_root) else os.path.expanduser(self._cache_dir_root) + ) + self._cache_downloaded_dir = ( + posixpath.join(self._cache_dir_root, config.DOWNLOADED_DATASETS_DIR) + if cache_dir + else str(config.DOWNLOADED_DATASETS_PATH) + ) + self._cache_downloaded_dir = ( + self._cache_downloaded_dir + if is_remote_url(self._cache_downloaded_dir) + else os.path.expanduser(self._cache_downloaded_dir) + ) + self._cache_dir = self._build_cache_dir() + if not is_remote_url(self._cache_dir_root): + os.makedirs(self._cache_dir_root, exist_ok=True) + lock_path = os.path.join( + self._cache_dir_root, Path(self._cache_dir).as_posix().replace("/", "_") + ".lock" + ) + with FileLock(lock_path): + if os.path.exists(self._cache_dir): # check if data exist + if len(os.listdir(self._cache_dir)) > 0: + if os.path.exists(os.path.join(self._cache_dir, config.DATASET_INFO_FILENAME)): + logger.info("Overwrite dataset info from restored data version if exists.") + self.info = DatasetInfo.from_directory(self._cache_dir) + else: # dir exists but no data, remove the empty dir as data aren't available anymore + logger.warning( + f"Old caching folder {self._cache_dir} for dataset {self.dataset_name} exists but no data were found. Removing it. " + ) + os.rmdir(self._cache_dir) + + # Store in the cache by default unless the user specifies a custom output_dir to download_and_prepare + self._output_dir = self._cache_dir + self._fs: fsspec.AbstractFileSystem = fsspec.filesystem("file") + + # Set download manager + self.dl_manager = None + + # Set to True by "datasets-cli test" to generate file checksums for (deprecated) dataset_infos.json independently of verification_mode value. + self._record_infos = False + + # Set in `.download_and_prepare` once the format of the generated dataset is known + self._file_format = None + + # Enable streaming (e.g. it patches "open" to work with remote files) + extend_dataset_builder_for_streaming(self) + + def __getstate__(self): + return self.__dict__ + + def __setstate__(self, d): + self.__dict__ = d + # Re-enable streaming, since patched functions are not kept when pickling + extend_dataset_builder_for_streaming(self) + + # Must be set for datasets that use 'data_dir' functionality - the ones + # that require users to do additional steps to download the data + # (this is usually due to some external regulations / rules). + # This field should contain a string with user instructions, including + # the list of files that should be present. It will be + # displayed in the dataset documentation. + @property + def manual_download_instructions(self) -> Optional[str]: + return None + + def _has_legacy_cache(self) -> bool: + """Check for the old cache directory template {cache_dir}/{namespace}___{builder_name}""" + if ( + self.__module__.startswith("datasets.") + and not is_remote_url(self._cache_dir_root) + and self.config.name == "default" + ): + namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None + legacy_config_name = self.repo_id.replace("/", "--") if self.repo_id is not None else self.dataset_name + legacy_config_id = legacy_config_name + self.config_id[len(self.config.name) :] + legacy_cache_dir = os.path.join( + self._cache_dir_root, + self.name if namespace is None else f"{namespace}___{self.name}", + legacy_config_id, + ) + return os.path.isdir(legacy_cache_dir) + return False + + @classmethod + def get_all_exported_dataset_infos(cls) -> DatasetInfosDict: + """Empty dict if doesn't exist + + Example: + + ```py + >>> from datasets import load_dataset_builder + >>> ds_builder = load_dataset_builder('rotten_tomatoes') + >>> ds_builder.get_all_exported_dataset_infos() + {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)} + ``` + """ + return DatasetInfosDict.from_directory(cls.get_imported_module_dir()) + + def get_exported_dataset_info(self) -> DatasetInfo: + """Empty `DatasetInfo` if doesn't exist + + Example: + + ```py + >>> from datasets import load_dataset_builder + >>> ds_builder = load_dataset_builder('rotten_tomatoes') + >>> ds_builder.get_exported_dataset_info() + DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231) + ``` + """ + return self.get_all_exported_dataset_infos().get(self.config.name, DatasetInfo()) + + def _create_builder_config( + self, config_name=None, custom_features=None, **config_kwargs + ) -> Tuple[BuilderConfig, str]: + """Create and validate BuilderConfig object as well as a unique config id for this config. + Raises ValueError if there are multiple builder configs and config_name and DEFAULT_CONFIG_NAME are None. + config_kwargs override the defaults kwargs in config + """ + builder_config = None + + # try default config + if config_name is None and self.BUILDER_CONFIGS and not config_kwargs: + if self.DEFAULT_CONFIG_NAME is not None: + builder_config = self.builder_configs.get(self.DEFAULT_CONFIG_NAME) + logger.info(f"No config specified, defaulting to: {self.dataset_name}/{builder_config.name}") + else: + if len(self.BUILDER_CONFIGS) > 1: + example_of_usage = f"load_dataset('{self.dataset_name}', '{self.BUILDER_CONFIGS[0].name}')" + raise ValueError( + "Config name is missing." + f"\nPlease pick one among the available configs: {list(self.builder_configs.keys())}" + + f"\nExample of usage:\n\t`{example_of_usage}`" + ) + builder_config = self.BUILDER_CONFIGS[0] + logger.info( + f"No config specified, defaulting to the single config: {self.dataset_name}/{builder_config.name}" + ) + + # try to get config by name + if isinstance(config_name, str): + builder_config = self.builder_configs.get(config_name) + if builder_config is None and self.BUILDER_CONFIGS: + raise ValueError( + f"BuilderConfig '{config_name}' not found. Available: {list(self.builder_configs.keys())}" + ) + + # if not using an existing config, then create a new config on the fly + if not builder_config: + if config_name is not None: + config_kwargs["name"] = config_name + elif self.DEFAULT_CONFIG_NAME and not config_kwargs: + # Use DEFAULT_CONFIG_NAME only if no config_kwargs are passed + config_kwargs["name"] = self.DEFAULT_CONFIG_NAME + if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION: + config_kwargs["version"] = self.VERSION + builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs) + + # otherwise use the config_kwargs to overwrite the attributes + else: + builder_config = copy.deepcopy(builder_config) + for key, value in config_kwargs.items(): + if value is not None: + if not hasattr(builder_config, key): + raise ValueError(f"BuilderConfig {builder_config} doesn't have a '{key}' key.") + setattr(builder_config, key, value) + + if not builder_config.name: + raise ValueError(f"BuilderConfig must have a name, got {builder_config.name}") + + # compute the config id that is going to be used for caching + config_id = builder_config.create_config_id( + config_kwargs, + custom_features=custom_features, + ) + is_custom = (config_id not in self.builder_configs) and config_id != "default" + if is_custom: + logger.info(f"Using custom data configuration {config_id}") + else: + if ( + builder_config.name in self.builder_configs + and builder_config != self.builder_configs[builder_config.name] + ): + raise ValueError( + "Cannot name a custom BuilderConfig the same as an available " + f"BuilderConfig. Change the name. Available BuilderConfigs: {list(self.builder_configs.keys())}" + ) + if not builder_config.version: + raise ValueError(f"BuilderConfig {builder_config.name} must have a version") + + return builder_config, config_id + + @classproperty + @classmethod + @memoize() + def builder_configs(cls): + """Dictionary of pre-defined configurations for this builder class.""" + configs = {config.name: config for config in cls.BUILDER_CONFIGS} + if len(configs) != len(cls.BUILDER_CONFIGS): + names = [config.name for config in cls.BUILDER_CONFIGS] + raise ValueError(f"Names in BUILDER_CONFIGS must not be duplicated. Got {names}") + return configs + + @property + def cache_dir(self): + return self._cache_dir + + def _relative_data_dir(self, with_version=True, with_hash=True) -> str: + """Relative path of this dataset in cache_dir: + Will be: + self.dataset_name/self.config.version/self.hash/ + or if a repo_id with a namespace has been specified: + self.namespace___self.dataset_name/self.config.version/self.hash/ + If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped. + """ + + # Check for the legacy cache directory template (datasets<3.0.0) + if self._has_legacy_cache(): + # use legacy names + dataset_name = self.name + config_name = self.repo_id.replace("/", "--") if self.repo_id is not None else self.dataset_name + config_id = config_name + self.config_id[len(self.config.name) :] + else: + dataset_name = self.dataset_name + config_name = self.config.name + config_id = self.config_id + + namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None + builder_data_dir = dataset_name if namespace is None else f"{namespace}___{dataset_name}" + builder_data_dir = posixpath.join(builder_data_dir, config_id) + if with_version: + builder_data_dir = posixpath.join(builder_data_dir, str(self.config.version)) + if with_hash and self.hash and isinstance(self.hash, str): + builder_data_dir = posixpath.join(builder_data_dir, self.hash) + return builder_data_dir + + def _build_cache_dir(self): + """Return the data directory for the current version.""" + builder_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=False)) + version_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=True)) + + def _other_versions_on_disk(): + """Returns previous versions on disk.""" + if not os.path.exists(builder_data_dir): + return [] + + version_dirnames = [] + for dir_name in os.listdir(builder_data_dir): + try: + version_dirnames.append((utils.Version(dir_name), dir_name)) + except ValueError: # Invalid version (ex: incomplete data dir) + pass + version_dirnames.sort(reverse=True) + return version_dirnames + + # Check and warn if other versions exist + if not is_remote_url(builder_data_dir): + version_dirs = _other_versions_on_disk() + if version_dirs: + other_version = version_dirs[0][0] + if other_version != self.config.version: + warn_msg = ( + f"Found a different version {str(other_version)} of dataset {self.dataset_name} in " + f"cache_dir {self._cache_dir_root}. Using currently defined version " + f"{str(self.config.version)}." + ) + logger.warning(warn_msg) + + return version_data_dir + + @abc.abstractmethod + def _info(self) -> DatasetInfo: + """Construct the DatasetInfo object. See `DatasetInfo` for details. + + Warning: This function is only called once and the result is cached for all + following .info() calls. + + Returns: + info: (DatasetInfo) The dataset information + """ + raise NotImplementedError + + @classmethod + def get_imported_module_dir(cls): + """Return the path of the module of this class or subclass.""" + return os.path.dirname(inspect.getfile(inspect.getmodule(cls))) + + def _rename(self, src: str, dst: str): + rename(self._fs, src, dst) + + def download_and_prepare( + self, + output_dir: Optional[str] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + verification_mode: Optional[Union[VerificationMode, str]] = None, + ignore_verifications="deprecated", + try_from_hf_gcs: bool = True, + dl_manager: Optional[DownloadManager] = None, + base_path: Optional[str] = None, + use_auth_token="deprecated", + file_format: str = "arrow", + max_shard_size: Optional[Union[int, str]] = None, + num_proc: Optional[int] = None, + storage_options: Optional[dict] = None, + **download_and_prepare_kwargs, + ): + """Downloads and prepares dataset for reading. + + Args: + output_dir (`str`, *optional*): + Output directory for the dataset. + Default to this builder's `cache_dir`, which is inside `~/.cache/huggingface/datasets` by default. + + + download_config (`DownloadConfig`, *optional*): + Specific download configuration parameters. + download_mode ([`DownloadMode`] or `str`, *optional*): + Select the download/generate mode, default to `REUSE_DATASET_IF_EXISTS`. + verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`): + Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...). + + + ignore_verifications (`bool`, defaults to `False`): + Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...). + + + + `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0. + Please use `verification_mode` instead. + + + try_from_hf_gcs (`bool`): + If `True`, it will try to download the already prepared dataset from the HF Google cloud storage. + dl_manager (`DownloadManager`, *optional*): + Specific `DownloadManger` to use. + base_path (`str`, *optional*): + Base path for relative paths that are used to download files. This can be a remote url. + If not specified, the value of the `base_path` attribute (`self.base_path`) will be used instead. + use_auth_token (`Union[str, bool]`, *optional*): + Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. + If True, or not specified, will get token from ~/.huggingface. + + + + Pass `use_auth_token` to `load_dataset_builder` instead. + + + file_format (`str`, *optional*): + Format of the data files in which the dataset will be written. + Supported formats: "arrow", "parquet". Default to "arrow" format. + If the format is "parquet", then image and audio data are embedded into the Parquet files instead of pointing to local files. + + + max_shard_size (`Union[str, int]`, *optional*): + Maximum number of bytes written per shard, default is "500MB". + The size is based on uncompressed data size, so in practice your shard files may be smaller than + `max_shard_size` thanks to Parquet compression for example. + + + num_proc (`int`, *optional*, defaults to `None`): + Number of processes when downloading and generating the dataset locally. + Multiprocessing is disabled by default. + + + storage_options (`dict`, *optional*): + Key/value pairs to be passed on to the caching file-system backend, if any. + + + **download_and_prepare_kwargs (additional keyword arguments): Keyword arguments. + + Example: + + Download and prepare the dataset as Arrow files that can be loaded as a Dataset using `builder.as_dataset()`: + + ```py + >>> from datasets import load_dataset_builder + >>> builder = load_dataset_builder("rotten_tomatoes") + >>> builder.download_and_prepare() + ``` + + Download and prepare the dataset as sharded Parquet files locally: + + ```py + >>> from datasets import load_dataset_builder + >>> builder = load_dataset_builder("rotten_tomatoes") + >>> builder.download_and_prepare("./output_dir", file_format="parquet") + ``` + + Download and prepare the dataset as sharded Parquet files in a cloud storage: + + ```py + >>> from datasets import load_dataset_builder + >>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key} + >>> builder = load_dataset_builder("rotten_tomatoes") + >>> builder.download_and_prepare("s3://my-bucket/my_rotten_tomatoes", storage_options=storage_options, file_format="parquet") + ``` + """ + if ignore_verifications != "deprecated": + verification_mode = VerificationMode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS + warnings.warn( + "'ignore_verifications' was deprecated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.", + FutureWarning, + ) + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in version 2.7.1 and will be removed in 3.0.0. Pass `token` to `load_dataset_builder` instead.", + FutureWarning, + ) + token = use_auth_token + else: + token = self.token + + output_dir = output_dir if output_dir is not None else self._cache_dir + # output_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing) + fs, _, [output_dir] = fsspec.get_fs_token_paths(output_dir, storage_options=storage_options) + self._fs = fs + self._output_dir = output_dir if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(output_dir) + + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS) + base_path = base_path if base_path is not None else self.base_path + + if file_format is not None and file_format not in ["arrow", "parquet"]: + raise ValueError(f"Unsupported file_format: {file_format}. Expected 'arrow' or 'parquet'") + self._file_format = file_format + + if self._fs._strip_protocol(self._output_dir) == "": + # We don't support the root directory, because it has no dirname, + # and we need a dirname to use a .incomplete directory + # when the dataset is being written + raise RuntimeError( + f"Unable to download and prepare the dataset at the root {self._output_dir}. " + f"Please specify a subdirectory, e.g. '{self._output_dir + self.dataset_name}'" + ) + + if dl_manager is None: + if download_config is None: + download_config = DownloadConfig( + cache_dir=self._cache_downloaded_dir, + force_download=download_mode == DownloadMode.FORCE_REDOWNLOAD, + force_extract=download_mode == DownloadMode.FORCE_REDOWNLOAD, + use_etag=False, + num_proc=num_proc, + token=token, + storage_options=self.storage_options, + ) # We don't use etag for data files to speed up the process + + dl_manager = DownloadManager( + dataset_name=self.dataset_name, + download_config=download_config, + data_dir=self.config.data_dir, + base_path=base_path, + record_checksums=(self._record_infos or verification_mode == VerificationMode.ALL_CHECKS), + ) + + is_local = not is_remote_filesystem(self._fs) + + if ( + isinstance(dl_manager, MockDownloadManager) + or not is_local + or file_format != "arrow" + or max_shard_size is not None + ): + try_from_hf_gcs = False + self.dl_manager = dl_manager + + # Prevent parallel local disk operations + if is_local: + # Create parent directory of the output_dir to put the lock file in there + Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True) + lock_path = self._output_dir + "_builder.lock" + + # File locking only with local paths; no file locking on GCS or S3 + with FileLock(lock_path) if is_local else contextlib.nullcontext(): + # Check if the data already exists + data_exists = self._fs.exists(posixpath.join(self._output_dir, config.DATASET_INFO_FILENAME)) + if data_exists and download_mode == DownloadMode.REUSE_DATASET_IF_EXISTS: + logger.info(f"Found cached dataset {self.dataset_name} ({self._output_dir})") + # We need to update the info in case some splits were added in the meantime + # for example when calling load_dataset from multiple workers. + self.info = self._load_info() + self.download_post_processing_resources(dl_manager) + return + + logger.info(f"Generating dataset {self.dataset_name} ({self._output_dir})") + if is_local: # if cache dir is local, check for available space + if not has_sufficient_disk_space( + self.info.size_in_bytes or 0, directory=Path(self._output_dir).parent + ): + raise OSError( + f"Not enough disk space. Needed: {size_str(self.info.size_in_bytes or 0)} (download: {size_str(self.info.download_size or 0)}, generated: {size_str(self.info.dataset_size or 0)}, post-processed: {size_str(self.info.post_processing_size or 0)})" + ) + + @contextlib.contextmanager + def incomplete_dir(dirname): + """Create temporary dir for dirname and rename on exit.""" + if not is_local: + self._fs.makedirs(dirname, exist_ok=True) + yield dirname + else: + tmp_dir = dirname + ".incomplete" + os.makedirs(tmp_dir, exist_ok=True) + try: + yield tmp_dir + if os.path.isdir(dirname): + shutil.rmtree(dirname) + # LocalFileSystem.mv does copy + rm, it is more efficient to simply rename a local directory + shutil.move(tmp_dir, dirname) + finally: + if os.path.exists(tmp_dir): + shutil.rmtree(tmp_dir) + + # Print is intentional: we want this to always go to stdout so user has + # information needed to cancel download/preparation if needed. + # This comes right before the progress bar. + if self.info.size_in_bytes: + logger.info( + f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} " + f"(download: {size_str(self.info.download_size)}, generated: {size_str(self.info.dataset_size)}, " + f"post-processed: {size_str(self.info.post_processing_size)}, " + f"total: {size_str(self.info.size_in_bytes)}) to {self._output_dir}..." + ) + else: + _dest = self._fs._strip_protocol(self._output_dir) if is_local else self._output_dir + logger.info(f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} to {_dest}...") + + self._check_manual_download(dl_manager) + + # Create a tmp dir and rename to self._output_dir on successful exit. + with incomplete_dir(self._output_dir) as tmp_output_dir: + # Temporarily assign _output_dir to tmp_data_dir to avoid having to forward + # it to every sub function. + with temporary_assignment(self, "_output_dir", tmp_output_dir): + # Try to download the already prepared dataset files + downloaded_from_gcs = False + if try_from_hf_gcs: + try: + self._download_prepared_from_hf_gcs(dl_manager.download_config) + downloaded_from_gcs = True + except (DatasetNotOnHfGcsError, MissingFilesOnHfGcsError): + logger.info("Dataset not on Hf google storage. Downloading and preparing it from source") + except ConnectionError: + logger.warning("HF google storage unreachable. Downloading and preparing it from source") + if not downloaded_from_gcs: + prepare_split_kwargs = {"file_format": file_format} + if max_shard_size is not None: + prepare_split_kwargs["max_shard_size"] = max_shard_size + if num_proc is not None: + prepare_split_kwargs["num_proc"] = num_proc + self._download_and_prepare( + dl_manager=dl_manager, + verification_mode=verification_mode, + **prepare_split_kwargs, + **download_and_prepare_kwargs, + ) + # Sync info + self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values()) + self.info.download_checksums = dl_manager.get_recorded_sizes_checksums() + self.info.size_in_bytes = self.info.dataset_size + self.info.download_size + # Save info + self._save_info() + + # Download post processing resources + self.download_post_processing_resources(dl_manager) + + logger.info( + f"Dataset {self.dataset_name} downloaded and prepared to {self._output_dir}. " + f"Subsequent calls will reuse this data." + ) + + def _check_manual_download(self, dl_manager): + if self.manual_download_instructions is not None and dl_manager.manual_dir is None: + raise ManualDownloadError( + textwrap.dedent( + f"""\ + The dataset {self.dataset_name} with config {self.config.name} requires manual data. + Please follow the manual download instructions: + {self.manual_download_instructions} + Manual data can be loaded with: + datasets.load_dataset("{self.dataset_name}", data_dir="")""" + ) + ) + + def _download_prepared_from_hf_gcs(self, download_config: DownloadConfig): + relative_data_dir = self._relative_data_dir(with_version=True, with_hash=False) + reader = ArrowReader(self._output_dir, self.info) + # use reader instructions to download the right files + reader.download_from_hf_gcs(download_config, relative_data_dir) + downloaded_info = DatasetInfo.from_directory(self._output_dir) + self.info.update(downloaded_info) + # download post processing resources + remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/") + for split in self.info.splits: + for resource_file_name in self._post_processing_resources(split).values(): + if os.sep in resource_file_name: + raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}") + try: + resource_path = cached_path(remote_cache_dir + "/" + resource_file_name) + shutil.move(resource_path, os.path.join(self._output_dir, resource_file_name)) + except ConnectionError: + logger.info(f"Couldn't download resourse file {resource_file_name} from Hf google storage.") + logger.info("Dataset downloaded from Hf google storage.") + + def _download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs): + """Downloads and prepares dataset for reading. + + This is the internal implementation to overwrite called when user calls + `download_and_prepare`. It should download all required data and generate + the pre-processed datasets files. + + Args: + dl_manager ([`DownloadManager`]): + `DownloadManager` used to download and cache data. + verification_mode ([`VerificationMode`]): + if `ALL_CHECKS`, perform all the verifications including checksums. + if `BASIC_CHECKS`, do not perform checksums, only perform split tests. + if `NO_CHECKS`, do not perform any verification. + prepare_split_kwargs: Additional options, such as `file_format`, `max_shard_size` + """ + # Generating data for all splits + split_dict = SplitDict(dataset_name=self.dataset_name) + split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs) + split_generators = self._split_generators(dl_manager, **split_generators_kwargs) + + # Checksums verification + if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums: + verify_checksums( + self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files" + ) + + # Build splits + for split_generator in split_generators: + if str(split_generator.split_info.name).lower() == "all": + raise ValueError( + "`all` is a special split keyword corresponding to the " + "union of all splits, so cannot be used as key in " + "._split_generator()." + ) + + logger.info(f"Generating {split_generator.split_info.name} split") + split_dict.add(split_generator.split_info) + + try: + # Prepare split will record examples associated to the split + self._prepare_split(split_generator, **prepare_split_kwargs) + except OSError as e: + raise OSError( + "Cannot find data file. " + + (self.manual_download_instructions or "") + + "\nOriginal error:\n" + + str(e) + ) from None + # If check_duplicates is set to True , then except DuplicatedKeysError + except DuplicatedKeysError as e: + raise DuplicatedKeysError( + e.key, + e.duplicate_key_indices, + fix_msg=f"To avoid duplicate keys, please fix the dataset script {self.name}.py", + ) from None + dl_manager.manage_extracted_files() + + if verification_mode == VerificationMode.BASIC_CHECKS or verification_mode == VerificationMode.ALL_CHECKS: + verify_splits(self.info.splits, split_dict) + + # Update the info object with the splits. + self.info.splits = split_dict + self.info.download_size = dl_manager.downloaded_size + + def download_post_processing_resources(self, dl_manager): + for split in self.info.splits or []: + for resource_name, resource_file_name in self._post_processing_resources(split).items(): + if not not is_remote_filesystem(self._fs): + raise NotImplementedError(f"Post processing is not supported on filesystem {self._fs}") + if os.sep in resource_file_name: + raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}") + resource_path = os.path.join(self._output_dir, resource_file_name) + if not os.path.exists(resource_path): + downloaded_resource_path = self._download_post_processing_resources( + split, resource_name, dl_manager + ) + if downloaded_resource_path: + logger.info(f"Downloaded post-processing resource {resource_name} as {resource_file_name}") + shutil.move(downloaded_resource_path, resource_path) + + def _load_info(self) -> DatasetInfo: + return DatasetInfo.from_directory(self._output_dir, storage_options=self._fs.storage_options) + + def _save_info(self): + file_lock = ( + FileLock(self._output_dir + "_info.lock") + if not is_remote_filesystem(self._fs) + else contextlib.nullcontext() + ) + with file_lock: + self.info.write_to_directory(self._output_dir, storage_options=self._fs.storage_options) + + def _save_infos(self): + file_lock = ( + FileLock(self._output_dir + "_infos.lock") + if not is_remote_filesystem(self._fs) + else contextlib.nullcontext() + ) + with file_lock: + DatasetInfosDict(**{self.config.name: self.info}).write_to_directory(self.get_imported_module_dir()) + + def _make_split_generators_kwargs(self, prepare_split_kwargs): + """Get kwargs for `self._split_generators()` from `prepare_split_kwargs`.""" + del prepare_split_kwargs + return {} + + def as_dataset( + self, + split: Optional[Split] = None, + run_post_process=True, + verification_mode: Optional[Union[VerificationMode, str]] = None, + ignore_verifications="deprecated", + in_memory=False, + ) -> Union[Dataset, DatasetDict]: + """Return a Dataset for the specified split. + + Args: + split (`datasets.Split`): + Which subset of the data to return. + run_post_process (`bool`, defaults to `True`): + Whether to run post-processing dataset transforms and/or add + indexes. + verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`): + Verification mode determining the checks to run on the + downloaded/processed dataset information (checksums/size/splits/...). + + + ignore_verifications (`bool`, defaults to `False`): + Whether to ignore the verifications of the + downloaded/processed dataset information (checksums/size/splits/...). + + + + `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0. + Please use `verification_mode` instead. + + + in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + + Returns: + datasets.Dataset + + Example: + + ```py + >>> from datasets import load_dataset_builder + >>> builder = load_dataset_builder('rotten_tomatoes') + >>> builder.download_and_prepare() + >>> ds = builder.as_dataset(split='train') + >>> ds + Dataset({ + features: ['text', 'label'], + num_rows: 8530 + }) + ``` + """ + if ignore_verifications != "deprecated": + verification_mode = verification_mode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS + warnings.warn( + "'ignore_verifications' was deprecated in favor of 'verification' in version 2.9.1 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.", + FutureWarning, + ) + if self._file_format is not None and self._file_format != "arrow": + raise FileFormatError('Loading a dataset not written in the "arrow" format is not supported.') + if is_remote_filesystem(self._fs): + raise NotImplementedError(f"Loading a dataset cached in a {type(self._fs).__name__} is not supported.") + if not os.path.exists(self._output_dir): + raise FileNotFoundError( + f"Dataset {self.dataset_name}: could not find data in {self._output_dir}. Please make sure to call " + "builder.download_and_prepare(), or use " + "datasets.load_dataset() before trying to access the Dataset object." + ) + + logger.debug(f'Constructing Dataset for split {split or ", ".join(self.info.splits)}, from {self._output_dir}') + + # By default, return all splits + if split is None: + split = {s: s for s in self.info.splits} + + verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS) + + # Create a dataset for each of the given splits + datasets = map_nested( + partial( + self._build_single_dataset, + run_post_process=run_post_process, + verification_mode=verification_mode, + in_memory=in_memory, + ), + split, + map_tuple=True, + disable_tqdm=True, + ) + if isinstance(datasets, dict): + datasets = DatasetDict(datasets) + return datasets + + def _build_single_dataset( + self, + split: Union[str, ReadInstruction, Split], + run_post_process: bool, + verification_mode: VerificationMode, + in_memory: bool = False, + ): + """as_dataset for a single split.""" + if not isinstance(split, ReadInstruction): + split = str(split) + if split == "all": + split = "+".join(self.info.splits.keys()) + split = Split(split) + + # Build base dataset + ds = self._as_dataset( + split=split, + in_memory=in_memory, + ) + if run_post_process: + for resource_file_name in self._post_processing_resources(split).values(): + if os.sep in resource_file_name: + raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}") + resources_paths = { + resource_name: os.path.join(self._output_dir, resource_file_name) + for resource_name, resource_file_name in self._post_processing_resources(split).items() + } + post_processed = self._post_process(ds, resources_paths) + if post_processed is not None: + ds = post_processed + recorded_checksums = {} + record_checksums = False + for resource_name, resource_path in resources_paths.items(): + size_checksum = get_size_checksum_dict(resource_path) + recorded_checksums[resource_name] = size_checksum + if verification_mode == VerificationMode.ALL_CHECKS and record_checksums: + if self.info.post_processed is None or self.info.post_processed.resources_checksums is None: + expected_checksums = None + else: + expected_checksums = self.info.post_processed.resources_checksums.get(split) + verify_checksums(expected_checksums, recorded_checksums, "post processing resources") + if self.info.post_processed is None: + self.info.post_processed = PostProcessedInfo() + if self.info.post_processed.resources_checksums is None: + self.info.post_processed.resources_checksums = {} + self.info.post_processed.resources_checksums[str(split)] = recorded_checksums + self.info.post_processing_size = sum( + checksums_dict["num_bytes"] + for split_checksums_dicts in self.info.post_processed.resources_checksums.values() + for checksums_dict in split_checksums_dicts.values() + ) + if self.info.dataset_size is not None and self.info.download_size is not None: + self.info.size_in_bytes = ( + self.info.dataset_size + self.info.download_size + self.info.post_processing_size + ) + self._save_info() + ds._info.post_processed = self.info.post_processed + ds._info.post_processing_size = self.info.post_processing_size + ds._info.size_in_bytes = self.info.size_in_bytes + if self.info.post_processed.features is not None: + if self.info.post_processed.features.type != ds.features.type: + raise ValueError( + f"Post-processed features info don't match the dataset:\nGot\n{self.info.post_processed.features}\nbut expected something like\n{ds.features}" + ) + else: + ds.info.features = self.info.post_processed.features + + return ds + + def _as_dataset(self, split: Union[ReadInstruction, Split] = Split.TRAIN, in_memory: bool = False) -> Dataset: + """Constructs a `Dataset`. + + This is the internal implementation to overwrite called when user calls + `as_dataset`. It should read the pre-processed datasets files and generate + the `Dataset` object. + + Args: + split (`datasets.Split`): + which subset of the data to read. + in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + + Returns: + `Dataset` + """ + cache_dir = self._fs._strip_protocol(self._output_dir) + dataset_name = self.dataset_name + if self._has_legacy_cache(): + dataset_name = self.name + dataset_kwargs = ArrowReader(cache_dir, self.info).read( + name=dataset_name, + instructions=split, + split_infos=self.info.splits.values(), + in_memory=in_memory, + ) + fingerprint = self._get_dataset_fingerprint(split) + return Dataset(fingerprint=fingerprint, **dataset_kwargs) + + def _get_dataset_fingerprint(self, split: Union[ReadInstruction, Split]) -> str: + """The dataset fingerprint is the hash of the relative directory dataset_name/config_name/version/hash, as well as the split specs.""" + hasher = Hasher() + hasher.update(Path(self._relative_data_dir()).as_posix()) + hasher.update(str(split)) # for example: train, train+test, train[:10%], test[:33%](pct1_dropremainder) + fingerprint = hasher.hexdigest() + return fingerprint + + def as_streaming_dataset( + self, + split: Optional[str] = None, + base_path: Optional[str] = None, + ) -> Union[Dict[str, IterableDataset], IterableDataset]: + if is_remote_filesystem(self._fs): + raise NotImplementedError( + f"Loading a streaming dataset cached in a {type(self._fs).__name__} is not supported yet." + ) + + dl_manager = StreamingDownloadManager( + base_path=base_path or self.base_path, + download_config=DownloadConfig(token=self.token, storage_options=self.storage_options), + dataset_name=self.dataset_name, + data_dir=self.config.data_dir, + ) + self._check_manual_download(dl_manager) + splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)} + # By default, return all splits + if split is None: + splits_generator = splits_generators + elif split in splits_generators: + splits_generator = splits_generators[split] + else: + raise ValueError(f"Bad split: {split}. Available splits: {list(splits_generators)}") + + # Create a dataset for each of the given splits + datasets = map_nested( + self._as_streaming_dataset_single, + splits_generator, + map_tuple=True, + ) + if isinstance(datasets, dict): + datasets = IterableDatasetDict(datasets) + return datasets + + def _as_streaming_dataset_single( + self, + splits_generator, + ) -> IterableDataset: + ex_iterable = self._get_examples_iterable_for_split(splits_generator) + # add auth to be able to access and decode audio/image files from private repositories. + token_per_repo_id = {self.repo_id: self.token} if self.repo_id else {} + return IterableDataset( + ex_iterable, info=self.info, split=splits_generator.name, token_per_repo_id=token_per_repo_id + ) + + def _post_process(self, dataset: Dataset, resources_paths: Mapping[str, str]) -> Optional[Dataset]: + """Run dataset transforms or add indexes""" + return None + + def _post_processing_resources(self, split: str) -> Dict[str, str]: + """Mapping resource_name -> resource_file_name""" + return {} + + def _download_post_processing_resources( + self, split: str, resource_name: str, dl_manager: DownloadManager + ) -> Optional[str]: + """Download the resource using the download manager and return the downloaded path.""" + return None + + @abc.abstractmethod + def _split_generators(self, dl_manager: DownloadManager): + """Specify feature dictionary generators and dataset splits. + + This function returns a list of `SplitGenerator`s defining how to generate + data and what splits to use. + + Example: + + return [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={'file': 'train_data.zip'}, + ), + datasets.SplitGenerator( + name=datasets.Split.TEST, + gen_kwargs={'file': 'test_data.zip'}, + ), + ] + + The above code will first call `_generate_examples(file='train_data.zip')` + to write the train data, then `_generate_examples(file='test_data.zip')` to + write the test data. + + Datasets are typically split into different subsets to be used at various + stages of training and evaluation. + + Note that for datasets without a `VALIDATION` split, you can use a + fraction of the `TRAIN` data for evaluation as you iterate on your model + so as not to overfit to the `TEST` data. + + For downloads and extractions, use the given `download_manager`. + Note that the `DownloadManager` caches downloads, so it is fine to have each + generator attempt to download the source data. + + A good practice is to download all data in this function, and then + distribute the relevant parts to each split with the `gen_kwargs` argument + + Args: + dl_manager (`DownloadManager`): + Download manager to download the data + + Returns: + `list`. + """ + raise NotImplementedError() + + @abc.abstractmethod + def _prepare_split( + self, + split_generator: SplitGenerator, + file_format: str = "arrow", + max_shard_size: Optional[Union[str, int]] = None, + num_proc: Optional[int] = None, + **kwargs, + ): + """Generate the examples and record them on disk. + + Args: + split_generator (`SplitGenerator`): + Split generator to process + file_format (`str`, *optional*): + format of the data files in which the dataset will be written. + Supported formats: "arrow", "parquet". Default to "arrow" format. + max_shard_size (`Union[str, int]`, *optional*): + Maximum number of bytes written per shard, default is "500MB". + The size is based on uncompressed data size, so in practice your shard files may be smaller than + `max_shard_size` thanks to Parquet compression for example. + num_proc (`int`, *optional*, defaults to `None`): + Number of processes when downloading and generating the dataset locally. + Multiprocessing is disabled by default. + + + **kwargs: Additional kwargs forwarded from _download_and_prepare (ex: + beam pipeline) + """ + raise NotImplementedError() + + def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable: + """Generate the examples on the fly. + + Args: + split_generator (`SplitGenerator`): + Split generator to process + """ + raise NotImplementedError() + + +class GeneratorBasedBuilder(DatasetBuilder): + """Base class for datasets with data generation based on dict generators. + + `GeneratorBasedBuilder` is a convenience class that abstracts away much + of the data writing and reading of `DatasetBuilder`. It expects subclasses to + implement generators of feature dictionaries across the dataset splits + (`_split_generators`). See the method docstrings for details. + """ + + @abc.abstractmethod + def _generate_examples(self, **kwargs): + """Default function generating examples for each `SplitGenerator`. + + This function preprocess the examples from the raw data to the preprocessed + dataset files. + This function is called once for each `SplitGenerator` defined in + `_split_generators`. The examples yielded here will be written on + disk. + + Args: + **kwargs (additional keyword arguments): + Arguments forwarded from the SplitGenerator.gen_kwargs + + Yields: + key: `str` or `int`, a unique deterministic example identification key. + * Unique: An error will be raised if two examples are yield with the + same key. + * Deterministic: When generating the dataset twice, the same example + should have the same key. + Good keys can be the image id, or line number if examples are extracted + from a text file. + The key will be hashed and sorted to shuffle examples deterministically, + such as generating the dataset multiple times keep examples in the + same order. + example: `dict`, a feature dictionary + ready to be encoded and written to disk. The example will be + encoded with `self.info.features.encode_example({...})`. + """ + raise NotImplementedError() + + def _prepare_split( + self, + split_generator: SplitGenerator, + check_duplicate_keys: bool, + file_format="arrow", + num_proc: Optional[int] = None, + max_shard_size: Optional[Union[int, str]] = None, + ): + max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) + + if self.info.splits is not None: + split_info = self.info.splits[split_generator.name] + else: + split_info = split_generator.split_info + + SUFFIX = "-JJJJJ-SSSSS-of-NNNNN" + fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}" + fpath = posixpath.join(self._output_dir, fname) + + if num_proc and num_proc > 1: + num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs) + if num_input_shards <= 1: + logger.warning( + f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard." + ) + num_proc = 1 + elif num_input_shards < num_proc: + logger.warning( + f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards." + ) + num_proc = num_input_shards + + pbar = hf_tqdm( + unit=" examples", + total=split_info.num_examples, + desc=f"Generating {split_info.name} split", + ) + + _prepare_split_args = { + "fpath": fpath, + "file_format": file_format, + "max_shard_size": max_shard_size, + "split_info": split_info, + "check_duplicate_keys": check_duplicate_keys, + } + + if num_proc is None or num_proc == 1: + result = None + gen_kwargs = split_generator.gen_kwargs + job_id = 0 + with pbar: + for job_id, done, content in self._prepare_split_single( + gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args + ): + if done: + result = content + else: + pbar.update(content) + # wrapping everything into lists for consistency with the multiprocessed code path + assert result is not None, "Failed to retrieve results from prepare_split" + examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [ + [item] for item in result + ] + else: + kwargs_per_job = [ + {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args} + for job_id, gen_kwargs in enumerate( + _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc) + ) + ] + num_jobs = len(kwargs_per_job) + + examples_per_job = [None] * num_jobs + bytes_per_job = [None] * num_jobs + features_per_job = [None] * num_jobs + shards_per_job = [None] * num_jobs + shard_lengths_per_job = [None] * num_jobs + + with Pool(num_proc) as pool: + with pbar: + for job_id, done, content in iflatmap_unordered( + pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job + ): + if done: + # the content is the result of the job + ( + examples_per_job[job_id], + bytes_per_job[job_id], + features_per_job[job_id], + shards_per_job[job_id], + shard_lengths_per_job[job_id], + ) = content + else: + # the content is the number of examples progress update + pbar.update(content) + + assert ( + None not in examples_per_job + ), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results" + + total_shards = sum(shards_per_job) + total_num_examples = sum(examples_per_job) + total_num_bytes = sum(bytes_per_job) + features = features_per_job[0] + + split_generator.split_info.num_examples = total_num_examples + split_generator.split_info.num_bytes = total_num_bytes + + # should rename everything at the end + logger.debug(f"Renaming {total_shards} shards.") + if total_shards > 1: + # use the -SSSSS-of-NNNNN pattern + + def _rename_shard(shard_and_job: Tuple[int]): + shard_id, job_id = shard_and_job + global_shard_id = sum(shards_per_job[:job_id]) + shard_id + self._rename( + fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), + fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"), + ) + + shards_and_jobs = [ + (shard_id, job_id) + for job_id, num_shards in enumerate(shards_per_job) + for shard_id in range(num_shards) + ] + thread_map(_rename_shard, shards_and_jobs, disable=True, max_workers=64) + + split_generator.split_info.shard_lengths = [ + shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths + ] + else: + # don't use any pattern + shard_id, job_id = 0, 0 + self._rename( + fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), + fpath.replace(SUFFIX, ""), + ) + + if self.info.features is None: + self.info.features = features + + def _prepare_split_single( + self, + gen_kwargs: dict, + fpath: str, + file_format: str, + max_shard_size: int, + split_info: SplitInfo, + check_duplicate_keys: bool, + job_id: int, + ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: + generator = self._generate_examples(**gen_kwargs) + writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter + embed_local_files = file_format == "parquet" + shard_lengths = [] + total_num_examples, total_num_bytes = 0, 0 + + shard_id = 0 + num_examples_progress_update = 0 + try: + writer = writer_class( + features=self.info.features, + path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), + writer_batch_size=self._writer_batch_size, + hash_salt=split_info.name, + check_duplicates=check_duplicate_keys, + storage_options=self._fs.storage_options, + embed_local_files=embed_local_files, + ) + try: + _time = time.time() + for key, record in generator: + if max_shard_size is not None and writer._num_bytes > max_shard_size: + num_examples, num_bytes = writer.finalize() + writer.close() + shard_lengths.append(num_examples) + total_num_examples += num_examples + total_num_bytes += num_bytes + shard_id += 1 + writer = writer_class( + features=writer._features, + path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), + writer_batch_size=self._writer_batch_size, + hash_salt=split_info.name, + check_duplicates=check_duplicate_keys, + storage_options=self._fs.storage_options, + embed_local_files=embed_local_files, + ) + example = self.info.features.encode_example(record) if self.info.features is not None else record + writer.write(example, key) + num_examples_progress_update += 1 + if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: + _time = time.time() + yield job_id, False, num_examples_progress_update + num_examples_progress_update = 0 + finally: + yield job_id, False, num_examples_progress_update + num_shards = shard_id + 1 + num_examples, num_bytes = writer.finalize() + writer.close() + shard_lengths.append(num_examples) + total_num_examples += num_examples + total_num_bytes += num_bytes + except Exception as e: + # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded + if isinstance(e, SchemaInferenceError) and e.__context__ is not None: + e = e.__context__ + raise DatasetGenerationError("An error occurred while generating the dataset") from e + + yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths) + + def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs): + super()._download_and_prepare( + dl_manager, + verification_mode, + check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS + or verification_mode == VerificationMode.ALL_CHECKS, + **prepare_splits_kwargs, + ) + + def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable: + return ExamplesIterable(self._generate_examples, split_generator.gen_kwargs) + + +class ArrowBasedBuilder(DatasetBuilder): + """Base class for datasets with data generation based on Arrow loading functions (CSV/JSON/Parquet).""" + + @abc.abstractmethod + def _generate_tables(self, **kwargs): + """Default function generating examples for each `SplitGenerator`. + + This function preprocess the examples from the raw data to the preprocessed + dataset files. + This function is called once for each `SplitGenerator` defined in + `_split_generators`. The examples yielded here will be written on + disk. + + Args: + **kwargs (additional keyword arguments): + Arguments forwarded from the SplitGenerator.gen_kwargs + + Yields: + key: `str` or `int`, a unique deterministic example identification key. + * Unique: An error will be raised if two examples are yield with the + same key. + * Deterministic: When generating the dataset twice, the same example + should have the same key. + Good keys can be the image id, or line number if examples are extracted + from a text file. + The key will be hashed and sorted to shuffle examples deterministically, + such as generating the dataset multiple times keep examples in the + same order. + example: `pyarrow.Table`, a feature table + ready to be encoded and written to disk. + """ + raise NotImplementedError() + + def _prepare_split( + self, + split_generator: SplitGenerator, + file_format: str = "arrow", + num_proc: Optional[int] = None, + max_shard_size: Optional[Union[str, int]] = None, + ): + max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) + + try: + split_info = self.info.splits[split_generator.name] + except Exception: + split_info = split_generator.split_info + + SUFFIX = "-JJJJJ-SSSSS-of-NNNNN" + fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}" + fpath = posixpath.join(self._output_dir, fname) + + if num_proc and num_proc > 1: + num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs) + if num_input_shards <= 1: + logger.warning( + f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard." + ) + num_proc = 1 + elif num_input_shards < num_proc: + logger.warning( + f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards." + ) + num_proc = num_input_shards + + pbar = hf_tqdm( + unit=" examples", + total=split_info.num_examples, + desc=f"Generating {split_info.name} split", + ) + + _prepare_split_args = { + "fpath": fpath, + "file_format": file_format, + "max_shard_size": max_shard_size, + } + + if num_proc is None or num_proc == 1: + result = None + gen_kwargs = split_generator.gen_kwargs + job_id = 0 + with pbar: + for job_id, done, content in self._prepare_split_single( + gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args + ): + if done: + result = content + else: + pbar.update(content) + # wrapping everything into lists for consistency with the multiprocessed code path + assert result is not None, "Failed to retrieve results from prepare_split" + examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [ + [item] for item in result + ] + else: + kwargs_per_job = [ + {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args} + for job_id, gen_kwargs in enumerate( + _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc) + ) + ] + num_jobs = len(kwargs_per_job) + + examples_per_job = [None] * num_jobs + bytes_per_job = [None] * num_jobs + features_per_job = [None] * num_jobs + shards_per_job = [None] * num_jobs + shard_lengths_per_job = [None] * num_jobs + + with Pool(num_proc) as pool: + with pbar: + for job_id, done, content in iflatmap_unordered( + pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job + ): + if done: + # the content is the result of the job + ( + examples_per_job[job_id], + bytes_per_job[job_id], + features_per_job[job_id], + shards_per_job[job_id], + shard_lengths_per_job[job_id], + ) = content + else: + # the content is the number of examples progress update + pbar.update(content) + + assert ( + None not in examples_per_job + ), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results" + + total_shards = sum(shards_per_job) + total_num_examples = sum(examples_per_job) + total_num_bytes = sum(bytes_per_job) + features = features_per_job[0] + + split_generator.split_info.num_examples = total_num_examples + split_generator.split_info.num_bytes = total_num_bytes + + # should rename everything at the end + logger.debug(f"Renaming {total_shards} shards.") + if total_shards > 1: + # use the -SSSSS-of-NNNNN pattern + + def _rename_shard(shard_id_and_job: Tuple[int]): + shard_id, job_id = shard_id_and_job + global_shard_id = sum(shards_per_job[:job_id]) + shard_id + self._rename( + fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), + fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"), + ) + + shard_ids_and_jobs = [ + (shard_id, job_id) + for job_id, num_shards in enumerate(shards_per_job) + for shard_id in range(num_shards) + ] + thread_map(_rename_shard, shard_ids_and_jobs, disable=True, max_workers=64) + + split_generator.split_info.shard_lengths = [ + shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths + ] + else: + # don't use any pattern + shard_id, job_id = 0, 0 + self._rename( + fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), + fpath.replace(SUFFIX, ""), + ) + + if self.info.features is None: + self.info.features = features + + def _prepare_split_single( + self, gen_kwargs: dict, fpath: str, file_format: str, max_shard_size: int, job_id: int + ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: + generator = self._generate_tables(**gen_kwargs) + writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter + embed_local_files = file_format == "parquet" + shard_lengths = [] + total_num_examples, total_num_bytes = 0, 0 + + shard_id = 0 + num_examples_progress_update = 0 + try: + writer = writer_class( + features=self.info.features, + path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), + writer_batch_size=self._writer_batch_size, + storage_options=self._fs.storage_options, + embed_local_files=embed_local_files, + ) + try: + _time = time.time() + for _, table in generator: + if max_shard_size is not None and writer._num_bytes > max_shard_size: + num_examples, num_bytes = writer.finalize() + writer.close() + shard_lengths.append(num_examples) + total_num_examples += num_examples + total_num_bytes += num_bytes + shard_id += 1 + writer = writer_class( + features=writer._features, + path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"), + writer_batch_size=self._writer_batch_size, + storage_options=self._fs.storage_options, + embed_local_files=embed_local_files, + ) + writer.write_table(table) + num_examples_progress_update += len(table) + if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: + _time = time.time() + yield job_id, False, num_examples_progress_update + num_examples_progress_update = 0 + finally: + yield job_id, False, num_examples_progress_update + num_shards = shard_id + 1 + num_examples, num_bytes = writer.finalize() + writer.close() + shard_lengths.append(num_examples) + total_num_examples += num_examples + total_num_bytes += num_bytes + except Exception as e: + # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded + if isinstance(e, SchemaInferenceError) and e.__context__ is not None: + e = e.__context__ + raise DatasetGenerationError("An error occurred while generating the dataset") from e + + yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths) + + def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable: + return ArrowExamplesIterable(self._generate_tables, kwargs=split_generator.gen_kwargs) + + +class MissingBeamOptions(ValueError): + pass + + +class BeamBasedBuilder(DatasetBuilder): + """Beam-based Builder.""" + + def __init__(self, *args, beam_runner=None, beam_options=None, **kwargs): + self._beam_runner = beam_runner + self._beam_options = beam_options + self._beam_writers = {} # {split: beam_writer} mapping. + super().__init__(*args, **kwargs) + + def _make_split_generators_kwargs(self, prepare_split_kwargs): + # Pass `pipeline` into `_split_generators()` from `prepare_split_kwargs` if + # it's in the call signature of `_split_generators()`. + # This allows for global preprocessing in beam. + split_generators_kwargs = {} + split_generators_arg_names = inspect.signature(self._split_generators).parameters.keys() + if "pipeline" in split_generators_arg_names: + split_generators_kwargs["pipeline"] = prepare_split_kwargs["pipeline"] + return split_generators_kwargs + + @abc.abstractmethod + def _build_pcollection(self, pipeline, **kwargs): + """Build the beam pipeline examples for each `SplitGenerator`. + + This function extracts examples from the raw data with parallel transforms + in a Beam pipeline. It is called once for each `SplitGenerator` defined in + `_split_generators`. The examples from the PCollection will be + encoded and written to disk. + + + Warning: When running in a distributed setup, make sure that the data + which will be read (download_dir, manual_dir,...) and written (cache_dir) + can be accessed by the workers jobs. The data should be located in a + shared filesystem, like GCS. + + + Args: + pipeline ([`utils.beam_utils.BeamPipeline`]): + Apache Beam pipeline. + **kwargs (additional keyword arguments): + Arguments forwarded from the SplitGenerator.gen_kwargs. + + Returns: + `beam.PCollection`: Apache Beam PCollection containing the + example to send to `self.info.features.encode_example(...)`. + + Example: + + ``` + def _build_pcollection(pipeline, extracted_dir=None): + return ( + pipeline + | beam.Create(gfile.io.listdir(extracted_dir)) + | beam.Map(_process_file) + ) + ``` + """ + raise NotImplementedError() + + def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs): + # Create the Beam pipeline and forward it to `_prepare_split` + import apache_beam as beam + + import datasets.utils.beam_utils as beam_utils + + beam_runner = self._beam_runner + beam_options = self._beam_options + + if not beam_runner and not beam_options: + usage_example = f"load_dataset('{self.name}', '{self.config.name}', beam_runner='DirectRunner')" + raise MissingBeamOptions( + "Trying to generate a dataset using Apache Beam, yet no Beam Runner " + "or PipelineOptions() has been provided in `load_dataset` or in the " + "builder arguments. For big datasets it has to run on large-scale data " + "processing tools like Dataflow, Spark, etc. More information about " + "Apache Beam runners at " + "https://beam.apache.org/documentation/runners/capability-matrix/" + "\nIf you really want to run it locally because you feel like the " + "Dataset is small enough, you can use the local beam runner called " + "`DirectRunner` (you may run out of memory). \nExample of usage: " + f"\n\t`{usage_example}`" + ) + if self._writer_batch_size is not None: + logger.warning( + "`writer_batch_size` is not supported for beam pipelines yet. Using the default chunk size for writing." + ) + + # Beam type checking assumes transforms multiple outputs are of same type, + # which is not our case. Plus it doesn't handle correctly all types, so we + # are better without it. + pipeline_options = {"pipeline_type_check": False} + if "num_proc" in prepare_splits_kwargs: + num_workers = prepare_splits_kwargs.pop("num_proc") + pipeline_options["direct_num_workers"] = num_workers + pipeline_options["num_workers"] = num_workers + pipeline_options["direct_running_mode"] = "multi_processing" + # TODO: Fix ModuleNotFoundError: No module named 'datasets_modules' when running multiprocessed DirectRunner + raise NotImplementedError("Using a DirectRunner with `num_proc` for multiprocessing it not supported yet.") + beam_options = beam_options or beam.options.pipeline_options.PipelineOptions.from_dictionary(pipeline_options) + # Use a single pipeline for all splits + pipeline = beam_utils.BeamPipeline( + runner=beam_runner, + options=beam_options, + ) + super()._download_and_prepare( + dl_manager, verification_mode=VerificationMode.NO_CHECKS, pipeline=pipeline, **prepare_splits_kwargs + ) # TODO handle verification_mode in beam datasets + # Run pipeline + pipeline_results = pipeline.run() + pipeline_results.wait_until_finish() + metrics = pipeline_results.metrics() + # Update `info.splits`. + split_dict = self.info.splits + for split_name, beam_writer in self._beam_writers.items(): + m_filter = beam.metrics.MetricsFilter().with_namespace(namespace=split_name) + num_examples, num_bytes = beam_writer.finalize(metrics.query(m_filter)) + split_info = split_dict[split_name] + split_info.num_examples = num_examples + split_info.num_bytes = num_bytes + if hasattr(beam_writer, "_shard_lengths") and len(beam_writer._shard_lengths) > 1: + # keep the -SSSSS-of-NNNNN pattern + split_info.shard_lengths = beam_writer._shard_lengths + else: + # don't use any pattern + file_format = prepare_splits_kwargs.get("file_format", "arrow") + src_fname = f"{self.dataset_name}-{split_name}-00000-of-00001.{file_format}" + dst_fname = f"{self.dataset_name}-{split_name}.{file_format}" + src_fpath = posixpath.join(self._output_dir, src_fname) + dst_fpath = posixpath.join(self._output_dir, dst_fname) + self._rename(src_fpath, dst_fpath) + + def _save_info(self): + download_config = ( + self.dl_manager.download_config + if self.dl_manager + else DownloadConfig(token=self.token, storage_options=self._fs.storage_options) + ) + with xopen(f"{self._output_dir}/{config.DATASET_INFO_FILENAME}", "wb", download_config=download_config) as f: + self.info._dump_info(f) + if self.info.license: + with xopen(f"{self._output_dir}/{config.LICENSE_FILENAME}", "wb", download_config=download_config) as f: + self.info._dump_license(f) + + def _prepare_split( + self, split_generator, pipeline, file_format="arrow", max_shard_size: Optional[Union[str, int]] = None + ): + import apache_beam as beam + + if max_shard_size is not None: + raise NotImplementedError( + "max_shard_size is not supported for Beam datasets." + "Please set it to None to use the default Apache Beam sharding and get the best performance." + ) + + # To write examples in filesystem: + split_name = split_generator.split_info.name + fname = f"{self.dataset_name}-{split_name}.{file_format}" + fpath = posixpath.join(self._output_dir, fname) + beam_writer = BeamWriter( + features=self.info.features, path=fpath, namespace=split_name, cache_dir=self._output_dir + ) + self._beam_writers[split_name] = beam_writer + + encode_example = self.info.features.encode_example + + # Note: We need to wrap the pipeline in a PTransform to avoid re-using the + # same label names for each split + @beam.ptransform_fn + def _build_pcollection(pipeline): + """PTransformation which build a single split.""" + # Encode the PCollection + pcoll_examples = self._build_pcollection(pipeline, **split_generator.gen_kwargs) + pcoll_examples |= "Encode" >> beam.Map(lambda key_ex: (key_ex[0], encode_example(key_ex[1]))) + return beam_writer.write_from_pcollection(pcoll_examples) + + # Add the PCollection to the pipeline + _ = pipeline | split_name >> _build_pcollection() # pylint: disable=no-value-for-parameter max_bytes_per_shard + + def as_streaming_dataset( + self, + split: Optional[str] = None, + ) -> Union[Dict[str, IterableDataset], IterableDataset]: + self._request_info_from_hf_gcs() + datasets = { + split.name: IterableDataset(self._get_examples_iterable_for_split(split), info=self.info, split=split.name) + for split in self.info.splits.values() + } + if split: + try: + datasets = datasets[split] + except KeyError: + raise ValueError(f"Bad split: {split}. Available splits: {list(datasets)}") + if isinstance(datasets, dict): + datasets = IterableDatasetDict(datasets) + return datasets + + def _get_examples_iterable_for_split(self, split: SplitInfo) -> ExamplesIterable: + return ExamplesIterable(self._generate_examples_from_hf_gcs, {"split": split}) + + def _generate_examples_from_hf_gcs(self, split: SplitInfo): + if split.shard_lengths: + num_shards = len(split.shard_lengths) + remote_prepared_urls = [ + f"{self._remote_cache_dir_from_hf_gcs}/{self.name}-{split.name}-{shard_id:05d}-of-{num_shards:05d}.arrow" + for shard_id in range(num_shards) + ] + else: + remote_prepared_urls = [f"{self._remote_cache_dir_from_hf_gcs}/{self.name}-{split.name}.arrow"] + key = 0 + download_config = ( + self.dl_manager.download_config + if self.dl_manager + else DownloadConfig(token=self.token, storage_options=self._fs.storage_options) + ) + for remote_prepared_url in remote_prepared_urls: + with xopen(remote_prepared_url, "rb", download_config=download_config) as f: + with pa.ipc.open_stream(f) as reader: + for record_batch in reader: + for record in record_batch.to_pylist(): + yield key, record + key += 1 + + def _request_info_from_hf_gcs(self): + from .download.streaming_download_manager import xopen + + remote_dataset_info = f"{self._remote_cache_dir_from_hf_gcs}/{config.DATASET_INFO_FILENAME}" + try: + download_config = download_config = ( + self.dl_manager.download_config + if self.dl_manager + else DownloadConfig(token=self.token, storage_options=self._fs.storage_options) + ) + with xopen(remote_dataset_info, download_config=download_config) as f: + import json + + _info = json.load(f) + except FileNotFoundError as err: + raise DatasetNotOnHfGcsError(err) from None + self.info.update(DatasetInfo.from_dict(_info)) + + @property + def _remote_cache_dir_from_hf_gcs(self): + relative_data_dir = self._relative_data_dir(with_hash=False) + return HF_GCP_BASE_URL + "/" + Path(relative_data_dir).as_posix() diff --git a/testbed/huggingface__datasets/src/datasets/combine.py b/testbed/huggingface__datasets/src/datasets/combine.py new file mode 100644 index 0000000000000000000000000000000000000000..d2aad87f0cc9278626d0be5111f91b6de49ef935 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/combine.py @@ -0,0 +1,215 @@ +from typing import List, Optional, TypeVar + +from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets +from .dataset_dict import DatasetDict, IterableDatasetDict +from .info import DatasetInfo +from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets +from .splits import NamedSplit +from .utils import logging +from .utils.py_utils import Literal + + +logger = logging.get_logger(__name__) + + +DatasetType = TypeVar("DatasetType", Dataset, IterableDataset) + + +def interleave_datasets( + datasets: List[DatasetType], + probabilities: Optional[List[float]] = None, + seed: Optional[int] = None, + info: Optional[DatasetInfo] = None, + split: Optional[NamedSplit] = None, + stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", +) -> DatasetType: + """ + Interleave several datasets (sources) into a single dataset. + The new dataset is constructed by alternating between the sources to get the examples. + + You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects. + + - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples. + - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities. + + The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`, + in which case, the resulting dataset ends when all datasets have ran out of examples at least one time. + + Note for iterable datasets: + + In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process. + Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker). + + Args: + datasets (`List[Dataset]` or `List[IterableDataset]`): + List of datasets to interleave. + probabilities (`List[float]`, *optional*, defaults to `None`): + If specified, the new dataset is constructed by sampling + examples from one source at a time according to these probabilities. + seed (`int`, *optional*, defaults to `None`): + The random seed used to choose a source for each example. + info ([`DatasetInfo`], *optional*): + Dataset information, like description, citation, etc. + + split ([`NamedSplit`], *optional*): + Name of the dataset split. + + stopping_strategy (`str`, defaults to `first_exhausted`): + Two strategies are proposed right now, `first_exhausted` and `all_exhausted`. + By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples. + If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once. + Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous: + - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples. + - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting. + Returns: + [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets` + parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of + `IterableDataset`. + + Example: + + For regular datasets (map-style): + + ```python + >>> from datasets import Dataset, interleave_datasets + >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) + >>> d2 = Dataset.from_dict({"a": [10, 11, 12]}) + >>> d3 = Dataset.from_dict({"a": [20, 21, 22]}) + >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") + >>> dataset["a"] + [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22] + >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) + >>> dataset["a"] + [10, 0, 11, 1, 2] + >>> dataset = interleave_datasets([d1, d2, d3]) + >>> dataset["a"] + [0, 10, 20, 1, 11, 21, 2, 12, 22] + >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") + >>> dataset["a"] + [0, 10, 20, 1, 11, 21, 2, 12, 22] + >>> d1 = Dataset.from_dict({"a": [0, 1, 2]}) + >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) + >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]}) + >>> dataset = interleave_datasets([d1, d2, d3]) + >>> dataset["a"] + [0, 10, 20, 1, 11, 21, 2, 12, 22] + >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") + >>> dataset["a"] + [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24] + >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42) + >>> dataset["a"] + [10, 0, 11, 1, 2] + >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted") + >>> dataset["a"] + [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24] + For datasets in streaming mode (iterable): + + >>> from datasets import load_dataset, interleave_datasets + >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True) + >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True) + >>> dataset = interleave_datasets([d1, d2]) + >>> iterator = iter(dataset) + >>> next(iterator) + {'text': 'Mtendere Village was inspired by the vision...} + >>> next(iterator) + {'text': "Média de débat d'idées, de culture...} + ``` + """ + from .arrow_dataset import Dataset + from .iterable_dataset import IterableDataset + + if not datasets: + raise ValueError("Unable to interleave an empty list of datasets.") + for i, dataset in enumerate(datasets): + if not isinstance(dataset, (Dataset, IterableDataset)): + if isinstance(dataset, (DatasetDict, IterableDatasetDict)): + if not dataset: + raise ValueError( + f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " + "is an empty dataset dictionary." + ) + raise ValueError( + f"Dataset at position {i} has at least one split: {list(dataset)}\n" + f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']" + ) + raise ValueError( + f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}." + ) + if i == 0: + dataset_type, other_type = ( + (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset) + ) + elif not isinstance(dataset, dataset_type): + raise ValueError( + f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." + ) + if stopping_strategy not in ["first_exhausted", "all_exhausted"]: + raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.") + if dataset_type is Dataset: + return _interleave_map_style_datasets( + datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy + ) + else: + return _interleave_iterable_datasets( + datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy + ) + + +def concatenate_datasets( + dsets: List[DatasetType], + info: Optional[DatasetInfo] = None, + split: Optional[NamedSplit] = None, + axis: int = 0, +) -> DatasetType: + """ + Converts a list of [`Dataset`] with the same schema into a single [`Dataset`]. + + Args: + dsets (`List[datasets.Dataset]`): + List of Datasets to concatenate. + info (`DatasetInfo`, *optional*): + Dataset information, like description, citation, etc. + split (`NamedSplit`, *optional*): + Name of the dataset split. + axis (`{0, 1}`, defaults to `0`): + Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns + (horizontally). + + + + Example: + + ```py + >>> ds3 = concatenate_datasets([ds1, ds2]) + ``` + """ + + if not dsets: + raise ValueError("Unable to concatenate an empty list of datasets.") + for i, dataset in enumerate(dsets): + if not isinstance(dataset, (Dataset, IterableDataset)): + if isinstance(dataset, (DatasetDict, IterableDatasetDict)): + if not dataset: + raise ValueError( + f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " + "is an empty dataset dictionary." + ) + raise ValueError( + f"Dataset at position {i} has at least one split: {list(dataset)}\n" + f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']" + ) + raise ValueError( + f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}." + ) + if i == 0: + dataset_type, other_type = ( + (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset) + ) + elif not isinstance(dataset, dataset_type): + raise ValueError( + f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." + ) + if dataset_type is Dataset: + return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis) + else: + return _concatenate_iterable_datasets(dsets, info=info, split=split, axis=axis) diff --git a/testbed/huggingface__datasets/src/datasets/commands/__init__.py b/testbed/huggingface__datasets/src/datasets/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..905e753955a348a8e486302e1b6f5e8f53ec7bf4 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/commands/__init__.py @@ -0,0 +1,13 @@ +from abc import ABC, abstractmethod +from argparse import ArgumentParser + + +class BaseDatasetsCLICommand(ABC): + @staticmethod + @abstractmethod + def register_subcommand(parser: ArgumentParser): + raise NotImplementedError() + + @abstractmethod + def run(self): + raise NotImplementedError() diff --git a/testbed/huggingface__datasets/src/datasets/commands/run_beam.py b/testbed/huggingface__datasets/src/datasets/commands/run_beam.py new file mode 100644 index 0000000000000000000000000000000000000000..3843a5568f283a3cc8274f85dc206e42de272074 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/commands/run_beam.py @@ -0,0 +1,165 @@ +import os +from argparse import ArgumentParser +from pathlib import Path +from shutil import copyfile +from typing import List + +from datasets import config +from datasets.builder import DatasetBuilder +from datasets.commands import BaseDatasetsCLICommand +from datasets.download.download_config import DownloadConfig +from datasets.download.download_manager import DownloadMode +from datasets.load import dataset_module_factory, import_main_class +from datasets.utils.info_utils import VerificationMode + + +def run_beam_command_factory(args, **kwargs): + return RunBeamCommand( + args.dataset, + args.name, + args.cache_dir, + args.beam_pipeline_options, + args.data_dir, + args.all_configs, + args.save_info or args.save_infos, + args.ignore_verifications, + args.force_redownload, + **kwargs, + ) + + +class RunBeamCommand(BaseDatasetsCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + run_beam_parser = parser.add_parser("run_beam", help="Run a Beam dataset processing pipeline") + run_beam_parser.add_argument("dataset", type=str, help="Name of the dataset to download") + run_beam_parser.add_argument("--name", type=str, default=None, help="Dataset config name") + run_beam_parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="Cache directory where the datasets are stored", + ) + run_beam_parser.add_argument( + "--beam_pipeline_options", + type=str, + default="", + help="Beam pipeline options, separated by commas. Example:: `--beam_pipeline_options=job_name=my-job,project=my-project`", + ) + run_beam_parser.add_argument( + "--data_dir", + type=str, + default=None, + help="Can be used to specify a manual directory to get the files from", + ) + run_beam_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations") + run_beam_parser.add_argument("--save_info", action="store_true", help="Save the dataset infos file") + run_beam_parser.add_argument( + "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks" + ) + run_beam_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload") + # aliases + run_beam_parser.add_argument("--save_infos", action="store_true", help="alias for save_info") + run_beam_parser.set_defaults(func=run_beam_command_factory) + + def __init__( + self, + dataset: str, + name: str, + cache_dir: str, + beam_pipeline_options: str, + data_dir: str, + all_configs: bool, + save_infos: bool, + ignore_verifications: bool, + force_redownload: bool, + **config_kwargs, + ): + self._dataset = dataset + self._name = name + self._cache_dir = cache_dir + self._beam_pipeline_options = beam_pipeline_options + self._data_dir = data_dir + self._all_configs = all_configs + self._save_infos = save_infos + self._ignore_verifications = ignore_verifications + self._force_redownload = force_redownload + self._config_kwargs = config_kwargs + + def run(self): + import apache_beam as beam + + if self._name is not None and self._all_configs: + print("Both parameters `name` and `all_configs` can't be used at once.") + exit(1) + path, config_name = self._dataset, self._name + dataset_module = dataset_module_factory(path) + builder_cls = import_main_class(dataset_module.module_path) + builders: List[DatasetBuilder] = [] + if self._beam_pipeline_options: + beam_options = beam.options.pipeline_options.PipelineOptions( + flags=[f"--{opt.strip()}" for opt in self._beam_pipeline_options.split(",") if opt] + ) + else: + beam_options = None + if self._all_configs and len(builder_cls.BUILDER_CONFIGS) > 0: + for builder_config in builder_cls.BUILDER_CONFIGS: + builders.append( + builder_cls( + config_name=builder_config.name, + data_dir=self._data_dir, + hash=dataset_module.hash, + beam_options=beam_options, + cache_dir=self._cache_dir, + base_path=dataset_module.builder_kwargs.get("base_path"), + ) + ) + else: + builders.append( + builder_cls( + config_name=config_name, + data_dir=self._data_dir, + beam_options=beam_options, + cache_dir=self._cache_dir, + base_path=dataset_module.builder_kwargs.get("base_path"), + **self._config_kwargs, + ) + ) + + for builder in builders: + builder.download_and_prepare( + download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS + if not self._force_redownload + else DownloadMode.FORCE_REDOWNLOAD, + download_config=DownloadConfig(cache_dir=config.DOWNLOADED_DATASETS_PATH), + verification_mode=VerificationMode.NO_CHECKS + if self._ignore_verifications + else VerificationMode.ALL_CHECKS, + try_from_hf_gcs=False, + ) + if self._save_infos: + builder._save_infos() + + print("Apache beam run successful.") + + # If save_infos=True, the dataset infos file is created next to the loaded module file. + # Let's move it to the original directory of the dataset script, to allow the user to + # upload them on S3 at the same time afterwards. + if self._save_infos: + dataset_infos_path = os.path.join(builder_cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME) + + name = Path(path).name + ".py" + + combined_path = os.path.join(path, name) + if os.path.isfile(path): + dataset_dir = os.path.dirname(path) + elif os.path.isfile(combined_path): + dataset_dir = path + else: # in case of a remote dataset + print(f"Dataset Infos file saved at {dataset_infos_path}") + exit(1) + + # Move datasetinfo back to the user + user_dataset_infos_path = os.path.join(dataset_dir, config.DATASETDICT_INFOS_FILENAME) + copyfile(dataset_infos_path, user_dataset_infos_path) + print(f"Dataset Infos file saved at {user_dataset_infos_path}") diff --git a/testbed/huggingface__datasets/src/datasets/config.py b/testbed/huggingface__datasets/src/datasets/config.py new file mode 100644 index 0000000000000000000000000000000000000000..6ed96b6334fc76522c6a7f08a19348d68e1be853 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/config.py @@ -0,0 +1,238 @@ +import importlib +import importlib.metadata +import logging +import os +import platform +from pathlib import Path +from typing import Optional + +from packaging import version + + +logger = logging.getLogger(__name__.split(".", 1)[0]) # to avoid circular import from .utils.logging + +# Datasets +S3_DATASETS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets" +CLOUDFRONT_DATASETS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/datasets" +REPO_DATASETS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/datasets/{path}/{name}" + +# Metrics +S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics" +CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric" +REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/metrics/{path}/{name}" + +# Hub +HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") +HUB_DATASETS_URL = HF_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}" +HUB_DATASETS_HFFS_URL = "hf://datasets/{repo_id}@{revision}/{path}" +HUB_DEFAULT_VERSION = "main" + +PY_VERSION = version.parse(platform.python_version()) + +# General environment variables accepted values for booleans +ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} +ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) + + +# Imports +DILL_VERSION = version.parse(importlib.metadata.version("dill")) +FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec")) +PANDAS_VERSION = version.parse(importlib.metadata.version("pandas")) +PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow")) + +USE_TF = os.environ.get("USE_TF", "AUTO").upper() +USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() +USE_JAX = os.environ.get("USE_JAX", "AUTO").upper() + +TORCH_VERSION = "N/A" +TORCH_AVAILABLE = False + +if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: + TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None + if TORCH_AVAILABLE: + try: + TORCH_VERSION = version.parse(importlib.metadata.version("torch")) + logger.info(f"PyTorch version {TORCH_VERSION} available.") + except importlib.metadata.PackageNotFoundError: + pass +else: + logger.info("Disabling PyTorch because USE_TF is set") + +TF_VERSION = "N/A" +TF_AVAILABLE = False + +if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: + TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None + if TF_AVAILABLE: + # For the metadata, we have to look for both tensorflow and tensorflow-cpu + for package in [ + "tensorflow", + "tensorflow-cpu", + "tensorflow-gpu", + "tf-nightly", + "tf-nightly-cpu", + "tf-nightly-gpu", + "intel-tensorflow", + "tensorflow-rocm", + "tensorflow-macos", + ]: + try: + TF_VERSION = version.parse(importlib.metadata.version(package)) + except importlib.metadata.PackageNotFoundError: + continue + else: + break + else: + TF_AVAILABLE = False + if TF_AVAILABLE: + if TF_VERSION.major < 2: + logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.") + TF_AVAILABLE = False + else: + logger.info(f"TensorFlow version {TF_VERSION} available.") +else: + logger.info("Disabling Tensorflow because USE_TORCH is set") + + +JAX_VERSION = "N/A" +JAX_AVAILABLE = False + +if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: + JAX_AVAILABLE = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("jaxlib") is not None + if JAX_AVAILABLE: + try: + JAX_VERSION = version.parse(importlib.metadata.version("jax")) + logger.info(f"JAX version {JAX_VERSION} available.") + except importlib.metadata.PackageNotFoundError: + pass +else: + logger.info("Disabling JAX because USE_JAX is set to False") + + +USE_BEAM = os.environ.get("USE_BEAM", "AUTO").upper() +BEAM_VERSION = "N/A" +BEAM_AVAILABLE = False +if USE_BEAM in ENV_VARS_TRUE_AND_AUTO_VALUES: + try: + BEAM_VERSION = version.parse(importlib.metadata.version("apache_beam")) + BEAM_AVAILABLE = True + logger.info(f"Apache Beam version {BEAM_VERSION} available.") + except importlib.metadata.PackageNotFoundError: + pass +else: + logger.info("Disabling Apache Beam because USE_BEAM is set to False") + + +# Optional tools for data loading +SQLALCHEMY_AVAILABLE = importlib.util.find_spec("sqlalchemy") is not None + +# Optional tools for feature decoding +PIL_AVAILABLE = importlib.util.find_spec("PIL") is not None +IS_OPUS_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse( + importlib.import_module("soundfile").__libsndfile_version__ +) >= version.parse("1.0.31") +IS_MP3_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse( + importlib.import_module("soundfile").__libsndfile_version__ +) >= version.parse("1.1.0") + +# Optional compression tools +RARFILE_AVAILABLE = importlib.util.find_spec("rarfile") is not None +ZSTANDARD_AVAILABLE = importlib.util.find_spec("zstandard") is not None +LZ4_AVAILABLE = importlib.util.find_spec("lz4") is not None +PY7ZR_AVAILABLE = importlib.util.find_spec("py7zr") is not None + + +# Cache location +DEFAULT_XDG_CACHE_HOME = "~/.cache" +XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME) +DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface") +HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME)) + +DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets") +HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE)) + +DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics") +HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE)) + +DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules") +HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE)) + +DOWNLOADED_DATASETS_DIR = "downloads" +DEFAULT_DOWNLOADED_DATASETS_PATH = os.path.join(HF_DATASETS_CACHE, DOWNLOADED_DATASETS_DIR) +DOWNLOADED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_DATASETS_PATH", DEFAULT_DOWNLOADED_DATASETS_PATH)) + +EXTRACTED_DATASETS_DIR = "extracted" +DEFAULT_EXTRACTED_DATASETS_PATH = os.path.join(DEFAULT_DOWNLOADED_DATASETS_PATH, EXTRACTED_DATASETS_DIR) +EXTRACTED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_DATASETS_PATH", DEFAULT_EXTRACTED_DATASETS_PATH)) + +# Download count for the website +HF_UPDATE_DOWNLOAD_COUNTS = ( + os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES +) + +# Batch size constants. For more info, see: +# https://github.com/apache/arrow/blob/master/docs/source/cpp/arrays.rst#size-limitations-and-recommendations) +DEFAULT_MAX_BATCH_SIZE = 1000 + +# Size of the preloaded record batch in `Dataset.__iter__` +ARROW_READER_BATCH_SIZE_IN_DATASET_ITER = 10 + +# Max shard size in bytes (e.g. to shard parquet datasets in push_to_hub or download_and_prepare) +MAX_SHARD_SIZE = "500MB" + +# Parquet configuration +PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100 +PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100 +PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100 + +# Offline mode +HF_DATASETS_OFFLINE = os.environ.get("HF_DATASETS_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES + +# Here, `True` will disable progress bars globally without possibility of enabling it +# programmatically. `False` will enable them without possibility of disabling them. +# If environment variable is not set (None), then the user is free to enable/disable +# them programmatically. +# TL;DR: env variable has priority over code +__HF_DATASETS_DISABLE_PROGRESS_BARS = os.environ.get("HF_DATASETS_DISABLE_PROGRESS_BARS") +HF_DATASETS_DISABLE_PROGRESS_BARS: Optional[bool] = ( + __HF_DATASETS_DISABLE_PROGRESS_BARS.upper() in ENV_VARS_TRUE_VALUES + if __HF_DATASETS_DISABLE_PROGRESS_BARS is not None + else None +) + +# In-memory +DEFAULT_IN_MEMORY_MAX_SIZE = 0 # Disabled +IN_MEMORY_MAX_SIZE = float(os.environ.get("HF_DATASETS_IN_MEMORY_MAX_SIZE", DEFAULT_IN_MEMORY_MAX_SIZE)) + +# File names +DATASET_ARROW_FILENAME = "dataset.arrow" +DATASET_INDICES_FILENAME = "indices.arrow" +DATASET_STATE_JSON_FILENAME = "state.json" +DATASET_INFO_FILENAME = "dataset_info.json" +DATASETDICT_INFOS_FILENAME = "dataset_infos.json" +LICENSE_FILENAME = "LICENSE" +METRIC_INFO_FILENAME = "metric_info.json" +DATASETDICT_JSON_FILENAME = "dataset_dict.json" +METADATA_CONFIGS_FIELD = "configs" + +MODULE_NAME_FOR_DYNAMIC_MODULES = "datasets_modules" + +MAX_DATASET_CONFIG_ID_READABLE_LENGTH = 255 + +# Streaming +STREAMING_READ_MAX_RETRIES = 20 +STREAMING_READ_RETRY_INTERVAL = 5 + +# Datasets without script +DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200 +GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 10 +ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200 + +# Progress bars +PBAR_REFRESH_TIME_INTERVAL = 0.05 # 20 progress updates per sec + +# Maximum number of uploaded files per commit +UPLOADS_MAX_NUMBER_PER_COMMIT = 50 + +# Backward compatibiliy +MAX_TABLE_NBYTES_FOR_PICKLING = 4 << 30 diff --git a/testbed/huggingface__datasets/src/datasets/data_files.py b/testbed/huggingface__datasets/src/datasets/data_files.py new file mode 100644 index 0000000000000000000000000000000000000000..f318e89373801215c066a5e9e0d239fd1296ad42 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/data_files.py @@ -0,0 +1,700 @@ +import os +import re +from functools import partial +from glob import has_magic +from pathlib import Path, PurePath +from typing import Callable, Dict, List, Optional, Set, Tuple, Union + +import huggingface_hub +from fsspec import get_fs_token_paths +from fsspec.implementations.http import HTTPFileSystem +from huggingface_hub import HfFileSystem +from packaging import version +from tqdm.contrib.concurrent import thread_map + +from . import config +from .download import DownloadConfig +from .download.streaming_download_manager import _prepare_path_and_storage_options, xbasename, xjoin +from .splits import Split +from .utils import logging +from .utils import tqdm as hf_tqdm +from .utils.file_utils import is_local_path, is_relative_path +from .utils.py_utils import glob_pattern_to_regex, string_to_dict + + +SANITIZED_DEFAULT_SPLIT = str(Split.TRAIN) + + +logger = logging.get_logger(__name__) + + +class Url(str): + pass + + +class EmptyDatasetError(FileNotFoundError): + pass + + +SPLIT_PATTERN_SHARDED = "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*" + +SPLIT_KEYWORDS = { + Split.TRAIN: ["train", "training"], + Split.VALIDATION: ["validation", "valid", "dev", "val"], + Split.TEST: ["test", "testing", "eval", "evaluation"], +} +NON_WORDS_CHARS = "-._ 0-9" +if config.FSSPEC_VERSION < version.parse("2023.9.0"): + KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**[{sep}/]{keyword}[{sep}/]**"] +else: + KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**/*[{sep}/]{keyword}[{sep}/]**"] + +DEFAULT_SPLITS = [Split.TRAIN, Split.VALIDATION, Split.TEST] +DEFAULT_PATTERNS_SPLIT_IN_PATH_NAME = { + split: [ + pattern.format(keyword=keyword, sep=NON_WORDS_CHARS) + for keyword in SPLIT_KEYWORDS[split] + for pattern in KEYWORDS_IN_PATH_NAME_BASE_PATTERNS + ] + for split in DEFAULT_SPLITS +} + +DEFAULT_PATTERNS_ALL = { + Split.TRAIN: ["**"], +} + +ALL_SPLIT_PATTERNS = [SPLIT_PATTERN_SHARDED] +ALL_DEFAULT_PATTERNS = [ + DEFAULT_PATTERNS_SPLIT_IN_PATH_NAME, + DEFAULT_PATTERNS_ALL, +] +if config.FSSPEC_VERSION < version.parse("2023.9.0"): + METADATA_PATTERNS = [ + "metadata.csv", + "**/metadata.csv", + "metadata.jsonl", + "**/metadata.jsonl", + ] # metadata file for ImageFolder and AudioFolder +else: + METADATA_PATTERNS = [ + "**/metadata.csv", + "**/metadata.jsonl", + ] # metadata file for ImageFolder and AudioFolder +WILDCARD_CHARACTERS = "*[]" +FILES_TO_IGNORE = [ + "README.md", + "config.json", + "dataset_info.json", + "dataset_infos.json", + "dummy_data.zip", + "dataset_dict.json", +] + + +def contains_wildcards(pattern: str) -> bool: + return any(wilcard_character in pattern for wilcard_character in WILDCARD_CHARACTERS) + + +def sanitize_patterns(patterns: Union[Dict, List, str]) -> Dict[str, Union[List[str], "DataFilesList"]]: + """ + Take the data_files patterns from the user, and format them into a dictionary. + Each key is the name of the split, and each value is a list of data files patterns (paths or urls). + The default split is "train". + + Returns: + patterns: dictionary of split_name -> list of patterns + """ + if isinstance(patterns, dict): + return {str(key): value if isinstance(value, list) else [value] for key, value in patterns.items()} + elif isinstance(patterns, str): + return {SANITIZED_DEFAULT_SPLIT: [patterns]} + elif isinstance(patterns, list): + if any(isinstance(pattern, dict) for pattern in patterns): + for pattern in patterns: + if not ( + isinstance(pattern, dict) + and len(pattern) == 2 + and "split" in pattern + and isinstance(pattern.get("path"), (str, list)) + ): + raise ValueError( + f"Expected each split to have a 'path' key which can be a string or a list of strings, but got {pattern}" + ) + splits = [pattern["split"] for pattern in patterns] + if len(set(splits)) != len(splits): + raise ValueError(f"Some splits are duplicated in data_files: {splits}") + return { + str(pattern["split"]): pattern["path"] if isinstance(pattern["path"], list) else [pattern["path"]] + for pattern in patterns + } + else: + return {SANITIZED_DEFAULT_SPLIT: patterns} + else: + return sanitize_patterns(list(patterns)) + + +def _is_inside_unrequested_special_dir(matched_rel_path: str, pattern: str) -> bool: + """ + When a path matches a pattern, we additionnally check if it's inside a special directory + we ignore by default (if it starts with a double underscore). + + Users can still explicitly request a filepath inside such a directory if "__pycache__" is + mentioned explicitly in the requested pattern. + + Some examples: + + base directory: + + ./ + └── __pycache__ + └── b.txt + + >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "**") + True + >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "*/b.txt") + True + >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__pycache__/*") + False + >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__*/*") + False + """ + # We just need to check if every special directories from the path is present explicly in the pattern. + # Since we assume that the path matches the pattern, it's equivalent to counting that both + # the parent path and the parent pattern have the same number of special directories. + data_dirs_to_ignore_in_path = [part for part in PurePath(matched_rel_path).parent.parts if part.startswith("__")] + data_dirs_to_ignore_in_pattern = [part for part in PurePath(pattern).parent.parts if part.startswith("__")] + return len(data_dirs_to_ignore_in_path) != len(data_dirs_to_ignore_in_pattern) + + +def _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(matched_rel_path: str, pattern: str) -> bool: + """ + When a path matches a pattern, we additionnally check if it's a hidden file or if it's inside + a hidden directory we ignore by default, i.e. if the file name or a parent directory name starts with a dot. + + Users can still explicitly request a filepath that is hidden or is inside a hidden directory + if the hidden part is mentioned explicitly in the requested pattern. + + Some examples: + + base directory: + + ./ + └── .hidden_file.txt + + >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", "**") + True + >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", ".*") + False + + base directory: + + ./ + └── .hidden_dir + └── a.txt + + >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", "**") + True + >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".*/*") + False + >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".hidden_dir/*") + False + + base directory: + + ./ + └── .hidden_dir + └── .hidden_file.txt + + >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", "**") + True + >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/*") + True + >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/.*") + False + >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/*") + True + >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/.*") + False + """ + # We just need to check if every hidden part from the path is present explicly in the pattern. + # Since we assume that the path matches the pattern, it's equivalent to counting that both + # the path and the pattern have the same number of hidden parts. + hidden_directories_in_path = [ + part for part in PurePath(matched_rel_path).parts if part.startswith(".") and not set(part) == {"."} + ] + hidden_directories_in_pattern = [ + part for part in PurePath(pattern).parts if part.startswith(".") and not set(part) == {"."} + ] + return len(hidden_directories_in_path) != len(hidden_directories_in_pattern) + + +def _get_data_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> Dict[str, List[str]]: + """ + Get the default pattern from a directory or repository by testing all the supported patterns. + The first patterns to return a non-empty list of data files is returned. + + In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS. + """ + # first check the split patterns like data/{split}-00000-of-00001.parquet + for split_pattern in ALL_SPLIT_PATTERNS: + pattern = split_pattern.replace("{split}", "*") + try: + data_files = pattern_resolver(pattern) + except FileNotFoundError: + continue + if len(data_files) > 0: + splits: Set[str] = { + string_to_dict(xbasename(p), glob_pattern_to_regex(xbasename(split_pattern)))["split"] + for p in data_files + } + sorted_splits = [str(split) for split in DEFAULT_SPLITS if split in splits] + sorted( + splits - set(DEFAULT_SPLITS) + ) + return {split: [split_pattern.format(split=split)] for split in sorted_splits} + # then check the default patterns based on train/valid/test splits + for patterns_dict in ALL_DEFAULT_PATTERNS: + non_empty_splits = [] + for split, patterns in patterns_dict.items(): + for pattern in patterns: + try: + data_files = pattern_resolver(pattern) + except FileNotFoundError: + continue + if len(data_files) > 0: + non_empty_splits.append(split) + break + if non_empty_splits: + return {split: patterns_dict[split] for split in non_empty_splits} + raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}") + + +def _get_metadata_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> List[str]: + """ + Get the supported metadata patterns from a directory or repository. + """ + non_empty_patterns = [] + for pattern in METADATA_PATTERNS: + try: + metadata_files = pattern_resolver(pattern) + if len(metadata_files) > 0: + non_empty_patterns.append(pattern) + except FileNotFoundError: + pass + if non_empty_patterns: + return non_empty_patterns + raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}") + + +def resolve_pattern( + pattern: str, + base_path: str, + allowed_extensions: Optional[List[str]] = None, + download_config: Optional[DownloadConfig] = None, +) -> List[str]: + """ + Resolve the paths and URLs of the data files from the pattern passed by the user. + + You can use patterns to resolve multiple local files. Here are a few examples: + - *.csv to match all the CSV files at the first level + - **.csv to match all the CSV files at any level + - data/* to match all the files inside "data" + - data/** to match all the files inside "data" and its subdirectories + + The patterns are resolved using the fsspec glob. + + glob.glob, Path.glob, Path.match or fnmatch do not support ** with a prefix/suffix other than a forward slash /. + For instance, this means **.json is the same as *.json. On the contrary, the fsspec glob has no limits regarding the ** prefix/suffix, + resulting in **.json being equivalent to **/*.json. + + More generally: + - '*' matches any character except a forward-slash (to match just the file or directory name) + - '**' matches any character including a forward-slash / + + Hidden files and directories (i.e. whose names start with a dot) are ignored, unless they are explicitly requested. + The same applies to special directories that start with a double underscore like "__pycache__". + You can still include one if the pattern explicilty mentions it: + - to include a hidden file: "*/.hidden.txt" or "*/.*" + - to include a hidden directory: ".hidden/*" or ".*/*" + - to include a special directory: "__special__/*" or "__*/*" + + Example:: + + >>> from datasets.data_files import resolve_pattern + >>> base_path = "." + >>> resolve_pattern("docs/**/*.py", base_path) + [/Users/mariosasko/Desktop/projects/datasets/docs/source/_config.py'] + + Args: + pattern (str): Unix pattern or paths or URLs of the data files to resolve. + The paths can be absolute or relative to base_path. + Remote filesystems using fsspec are supported, e.g. with the hf:// protocol. + base_path (str): Base path to use when resolving relative paths. + allowed_extensions (Optional[list], optional): White-list of file extensions to use. Defaults to None (all extensions). + For example: allowed_extensions=[".csv", ".json", ".txt", ".parquet"] + Returns: + List[str]: List of paths or URLs to the local or remote files that match the patterns. + """ + if is_relative_path(pattern): + pattern = xjoin(base_path, pattern) + elif is_local_path(pattern): + base_path = os.path.splitdrive(pattern)[0] + os.sep + else: + base_path = "" + pattern, storage_options = _prepare_path_and_storage_options(pattern, download_config=download_config) + fs, _, _ = get_fs_token_paths(pattern, storage_options=storage_options) + fs_base_path = base_path.split("::")[0].split("://")[-1] or fs.root_marker + fs_pattern = pattern.split("::")[0].split("://")[-1] + files_to_ignore = set(FILES_TO_IGNORE) - {xbasename(pattern)} + protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0] + protocol_prefix = protocol + "://" if protocol != "file" else "" + matched_paths = [ + filepath if filepath.startswith(protocol_prefix) else protocol_prefix + filepath + for filepath, info in fs.glob(pattern, detail=True).items() + if info["type"] == "file" + and (xbasename(filepath) not in files_to_ignore) + and not _is_inside_unrequested_special_dir( + os.path.relpath(filepath, fs_base_path), os.path.relpath(fs_pattern, fs_base_path) + ) + and not _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir( + os.path.relpath(filepath, fs_base_path), os.path.relpath(fs_pattern, fs_base_path) + ) + ] # ignore .ipynb and __pycache__, but keep /../ + if allowed_extensions is not None: + out = [ + filepath + for filepath in matched_paths + if any("." + suffix in allowed_extensions for suffix in xbasename(filepath).split(".")[1:]) + ] + if len(out) < len(matched_paths): + invalid_matched_files = list(set(matched_paths) - set(out)) + logger.info( + f"Some files matched the pattern '{pattern}' but don't have valid data file extensions: {invalid_matched_files}" + ) + else: + out = matched_paths + if not out: + error_msg = f"Unable to find '{pattern}'" + if allowed_extensions is not None: + error_msg += f" with any supported extension {list(allowed_extensions)}" + raise FileNotFoundError(error_msg) + return out + + +def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig] = None) -> Dict[str, List[str]]: + """ + Get the default pattern from a directory testing all the supported patterns. + The first patterns to return a non-empty list of data files is returned. + + Some examples of supported patterns: + + Input: + + my_dataset_repository/ + ├── README.md + └── dataset.csv + + Output: + + {"train": ["**"]} + + Input: + + my_dataset_repository/ + ├── README.md + ├── train.csv + └── test.csv + + my_dataset_repository/ + ├── README.md + └── data/ + ├── train.csv + └── test.csv + + my_dataset_repository/ + ├── README.md + ├── train_0.csv + ├── train_1.csv + ├── train_2.csv + ├── train_3.csv + ├── test_0.csv + └── test_1.csv + + Output: + + {'train': ['train[-._ 0-9/]**', '**/*[-._ 0-9/]train[-._ 0-9/]**', 'training[-._ 0-9/]**', '**/*[-._ 0-9/]training[-._ 0-9/]**'], + 'test': ['test[-._ 0-9/]**', '**/*[-._ 0-9/]test[-._ 0-9/]**', 'testing[-._ 0-9/]**', '**/*[-._ 0-9/]testing[-._ 0-9/]**', ...]} + + Input: + + my_dataset_repository/ + ├── README.md + └── data/ + ├── train/ + │ ├── shard_0.csv + │ ├── shard_1.csv + │ ├── shard_2.csv + │ └── shard_3.csv + └── test/ + ├── shard_0.csv + └── shard_1.csv + + Output: + + {'train': ['train[-._ 0-9/]**', '**/*[-._ 0-9/]train[-._ 0-9/]**', 'training[-._ 0-9/]**', '**/*[-._ 0-9/]training[-._ 0-9/]**'], + 'test': ['test[-._ 0-9/]**', '**/*[-._ 0-9/]test[-._ 0-9/]**', 'testing[-._ 0-9/]**', '**/*[-._ 0-9/]testing[-._ 0-9/]**', ...]} + + Input: + + my_dataset_repository/ + ├── README.md + └── data/ + ├── train-00000-of-00003.csv + ├── train-00001-of-00003.csv + ├── train-00002-of-00003.csv + ├── test-00000-of-00001.csv + ├── random-00000-of-00003.csv + ├── random-00001-of-00003.csv + └── random-00002-of-00003.csv + + Output: + + {'train': ['data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'], + 'test': ['data/test-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'], + 'random': ['data/random-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*']} + + In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS. + """ + resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config) + try: + return _get_data_files_patterns(resolver) + except FileNotFoundError: + raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None + + +def get_metadata_patterns( + base_path: str, + download_config: Optional[DownloadConfig] = None, +) -> List[str]: + """ + Get the supported metadata patterns from a local directory. + """ + resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config) + try: + return _get_metadata_files_patterns(resolver) + except FileNotFoundError: + raise FileNotFoundError(f"The directory at {base_path} doesn't contain any metadata file") from None + + +def _get_single_origin_metadata( + data_file: str, + download_config: Optional[DownloadConfig] = None, +) -> Tuple[str]: + data_file, storage_options = _prepare_path_and_storage_options(data_file, download_config=download_config) + fs, _, _ = get_fs_token_paths(data_file, storage_options=storage_options) + if isinstance(fs, HfFileSystem): + resolved_path = fs.resolve_path(data_file) + return (resolved_path.repo_id, resolved_path.revision) + elif isinstance(fs, HTTPFileSystem) and data_file.startswith(config.HF_ENDPOINT): + hffs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token) + data_file = "hf://" + data_file[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1) + resolved_path = hffs.resolve_path(data_file) + return (resolved_path.repo_id, resolved_path.revision) + info = fs.info(data_file) + # s3fs uses "ETag", gcsfs uses "etag", and for local we simply check mtime + for key in ["ETag", "etag", "mtime"]: + if key in info: + return (str(info[key]),) + return () + + +def _get_origin_metadata( + data_files: List[str], + max_workers=64, + download_config: Optional[DownloadConfig] = None, +) -> Tuple[str]: + return thread_map( + partial(_get_single_origin_metadata, download_config=download_config), + data_files, + max_workers=max_workers, + tqdm_class=hf_tqdm, + desc="Resolving data files", + disable=len(data_files) <= 16, + ) + + +class DataFilesList(List[str]): + """ + List of data files (absolute local paths or URLs). + It has two construction methods given the user's data files patterns : + - ``from_hf_repo``: resolve patterns inside a dataset repository + - ``from_local_or_remote``: resolve patterns from a local path + + Moreover DataFilesList has an additional attribute ``origin_metadata``. + It can store: + - the last modified time of local files + - ETag of remote files + - commit sha of a dataset repository + + Thanks to this additional attribute, it is possible to hash the list + and get a different hash if and only if at least one file changed. + This is useful for caching Dataset objects that are obtained from a list of data files. + """ + + def __init__(self, data_files: List[str], origin_metadata: List[Tuple[str]]): + super().__init__(data_files) + self.origin_metadata = origin_metadata + + def __add__(self, other): + return DataFilesList([*self, *other], self.origin_metadata + other.origin_metadata) + + @classmethod + def from_hf_repo( + cls, + patterns: List[str], + dataset_info: huggingface_hub.hf_api.DatasetInfo, + base_path: Optional[str] = None, + allowed_extensions: Optional[List[str]] = None, + download_config: Optional[DownloadConfig] = None, + ) -> "DataFilesList": + base_path = f"hf://datasets/{dataset_info.id}@{dataset_info.sha}/{base_path or ''}".rstrip("/") + return cls.from_patterns( + patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config + ) + + @classmethod + def from_local_or_remote( + cls, + patterns: List[str], + base_path: Optional[str] = None, + allowed_extensions: Optional[List[str]] = None, + download_config: Optional[DownloadConfig] = None, + ) -> "DataFilesList": + base_path = base_path if base_path is not None else Path().resolve().as_posix() + return cls.from_patterns( + patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config + ) + + @classmethod + def from_patterns( + cls, + patterns: List[str], + base_path: Optional[str] = None, + allowed_extensions: Optional[List[str]] = None, + download_config: Optional[DownloadConfig] = None, + ) -> "DataFilesList": + base_path = base_path if base_path is not None else Path().resolve().as_posix() + data_files = [] + for pattern in patterns: + try: + data_files.extend( + resolve_pattern( + pattern, + base_path=base_path, + allowed_extensions=allowed_extensions, + download_config=download_config, + ) + ) + except FileNotFoundError: + if not has_magic(pattern): + raise + origin_metadata = _get_origin_metadata(data_files, download_config=download_config) + return cls(data_files, origin_metadata) + + def filter_extensions(self, extensions: List[str]) -> "DataFilesList": + pattern = "|".join("\\" + ext for ext in extensions) + pattern = re.compile(f".*({pattern})(\\..+)?$") + return DataFilesList( + [data_file for data_file in self if pattern.match(data_file)], + origin_metadata=self.origin_metadata, + ) + + +class DataFilesDict(Dict[str, DataFilesList]): + """ + Dict of split_name -> list of data files (absolute local paths or URLs). + It has two construction methods given the user's data files patterns : + - ``from_hf_repo``: resolve patterns inside a dataset repository + - ``from_local_or_remote``: resolve patterns from a local path + + Moreover each list is a DataFilesList. It is possible to hash the dictionary + and get a different hash if and only if at least one file changed. + For more info, see ``DataFilesList``. + + This is useful for caching Dataset objects that are obtained from a list of data files. + + Changing the order of the keys of this dictionary also doesn't change its hash. + """ + + @classmethod + def from_local_or_remote( + cls, + patterns: Dict[str, Union[List[str], DataFilesList]], + base_path: Optional[str] = None, + allowed_extensions: Optional[List[str]] = None, + download_config: Optional[DownloadConfig] = None, + ) -> "DataFilesDict": + out = cls() + for key, patterns_for_key in patterns.items(): + out[key] = ( + DataFilesList.from_local_or_remote( + patterns_for_key, + base_path=base_path, + allowed_extensions=allowed_extensions, + download_config=download_config, + ) + if not isinstance(patterns_for_key, DataFilesList) + else patterns_for_key + ) + return out + + @classmethod + def from_hf_repo( + cls, + patterns: Dict[str, Union[List[str], DataFilesList]], + dataset_info: huggingface_hub.hf_api.DatasetInfo, + base_path: Optional[str] = None, + allowed_extensions: Optional[List[str]] = None, + download_config: Optional[DownloadConfig] = None, + ) -> "DataFilesDict": + out = cls() + for key, patterns_for_key in patterns.items(): + out[key] = ( + DataFilesList.from_hf_repo( + patterns_for_key, + dataset_info=dataset_info, + base_path=base_path, + allowed_extensions=allowed_extensions, + download_config=download_config, + ) + if not isinstance(patterns_for_key, DataFilesList) + else patterns_for_key + ) + return out + + @classmethod + def from_patterns( + cls, + patterns: Dict[str, Union[List[str], DataFilesList]], + base_path: Optional[str] = None, + allowed_extensions: Optional[List[str]] = None, + download_config: Optional[DownloadConfig] = None, + ) -> "DataFilesDict": + out = cls() + for key, patterns_for_key in patterns.items(): + out[key] = ( + DataFilesList.from_patterns( + patterns_for_key, + base_path=base_path, + allowed_extensions=allowed_extensions, + download_config=download_config, + ) + if not isinstance(patterns_for_key, DataFilesList) + else patterns_for_key + ) + return out + + def filter_extensions(self, extensions: List[str]) -> "DataFilesDict": + out = type(self)() + for key, data_files_list in self.items(): + out[key] = data_files_list.filter_extensions(extensions) + return out diff --git a/testbed/huggingface__datasets/src/datasets/dataset_dict.py b/testbed/huggingface__datasets/src/datasets/dataset_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..5cad2261723d96f8fabe2220b3242580050c15a2 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/dataset_dict.py @@ -0,0 +1,2225 @@ +import contextlib +import copy +import fnmatch +import json +import math +import posixpath +import re +import warnings +from io import BytesIO +from pathlib import Path +from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union + +import fsspec +import numpy as np +from huggingface_hub import ( + CommitOperationAdd, + CommitOperationDelete, + DatasetCard, + DatasetCardData, + HfApi, +) + +from . import config +from .arrow_dataset import PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED, Dataset +from .features import Features +from .features.features import FeatureType +from .info import DatasetInfo, DatasetInfosDict +from .naming import _split_re +from .splits import NamedSplit, Split, SplitDict, SplitInfo +from .table import Table +from .tasks import TaskTemplate +from .utils import logging +from .utils.deprecation_utils import deprecated +from .utils.doc_utils import is_documented_by +from .utils.metadata import MetadataConfigs +from .utils.py_utils import asdict, glob_pattern_to_regex, string_to_dict +from .utils.typing import PathLike + + +logger = logging.get_logger(__name__) + + +class DatasetDict(dict): + """A dictionary (dict of str: datasets.Dataset) with dataset transforms methods (map, filter, etc.)""" + + def _check_values_type(self): + for dataset in self.values(): + if not isinstance(dataset, Dataset): + raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'") + + def _check_values_features(self): + items = list(self.items()) + for item_a, item_b in zip(items[:-1], items[1:]): + if item_a[1].features != item_b[1].features: + raise ValueError( + f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}" + ) + + def __getitem__(self, k) -> Dataset: + if isinstance(k, (str, NamedSplit)) or len(self) == 0: + return super().__getitem__(k) + else: + available_suggested_splits = [ + split for split in (Split.TRAIN, Split.TEST, Split.VALIDATION) if split in self + ] + suggested_split = available_suggested_splits[0] if available_suggested_splits else list(self)[0] + raise KeyError( + f"Invalid key: {k}. Please first select a split. For example: " + f"`my_dataset_dictionary['{suggested_split}'][{k}]`. " + f"Available splits: {sorted(self)}" + ) + + @property + def data(self) -> Dict[str, Table]: + """The Apache Arrow tables backing each split. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds.data + ``` + """ + self._check_values_type() + return {k: dataset.data for k, dataset in self.items()} + + @property + def cache_files(self) -> Dict[str, Dict]: + """The cache files containing the Apache Arrow table backing each split. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds.cache_files + {'test': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-test.arrow'}], + 'train': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-train.arrow'}], + 'validation': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]} + ``` + """ + self._check_values_type() + return {k: dataset.cache_files for k, dataset in self.items()} + + @property + def num_columns(self) -> Dict[str, int]: + """Number of columns in each split of the dataset. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds.num_columns + {'test': 2, 'train': 2, 'validation': 2} + ``` + """ + self._check_values_type() + return {k: dataset.num_columns for k, dataset in self.items()} + + @property + def num_rows(self) -> Dict[str, int]: + """Number of rows in each split of the dataset (same as :func:`datasets.Dataset.__len__`). + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds.num_rows + {'test': 1066, 'train': 8530, 'validation': 1066} + ``` + """ + self._check_values_type() + return {k: dataset.num_rows for k, dataset in self.items()} + + @property + def column_names(self) -> Dict[str, List[str]]: + """Names of the columns in each split of the dataset. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds.column_names + {'test': ['text', 'label'], + 'train': ['text', 'label'], + 'validation': ['text', 'label']} + ``` + """ + self._check_values_type() + return {k: dataset.column_names for k, dataset in self.items()} + + @property + def shape(self) -> Dict[str, Tuple[int]]: + """Shape of each split of the dataset (number of columns, number of rows). + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds.shape + {'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)} + ``` + """ + self._check_values_type() + return {k: dataset.shape for k, dataset in self.items()} + + def flatten(self, max_depth=16) -> "DatasetDict": + """Flatten the Apache Arrow Table of each split (nested features are flatten). + Each column with a struct type is flattened into one column per struct field. + Other columns are left unchanged. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("squad") + >>> ds["train"].features + {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), + 'context': Value(dtype='string', id=None), + 'id': Value(dtype='string', id=None), + 'question': Value(dtype='string', id=None), + 'title': Value(dtype='string', id=None)} + >>> ds.flatten() + DatasetDict({ + train: Dataset({ + features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], + num_rows: 87599 + }) + validation: Dataset({ + features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], + num_rows: 10570 + }) + }) + ``` + """ + self._check_values_type() + return DatasetDict({k: dataset.flatten(max_depth=max_depth) for k, dataset in self.items()}) + + def unique(self, column: str) -> Dict[str, List]: + """Return a list of the unique elements in a column for each split. + + This is implemented in the low-level backend and as such, very fast. + + Args: + column (`str`): + column name (list all the column names with [`~datasets.Dataset.column_names`]) + + Returns: + Dict[`str`, `list`]: Dictionary of unique elements in the given column. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds.unique("label") + {'test': [1, 0], 'train': [1, 0], 'validation': [1, 0]} + ``` + """ + self._check_values_type() + return {k: dataset.unique(column) for k, dataset in self.items()} + + def cleanup_cache_files(self) -> Dict[str, int]: + """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one. + Be careful when running this command that no other process is currently using other cache files. + + Return: + `Dict` with the number of removed files for each split + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds.cleanup_cache_files() + {'test': 0, 'train': 0, 'validation': 0} + ``` + """ + self._check_values_type() + return {k: dataset.cleanup_cache_files() for k, dataset in self.items()} + + def __repr__(self): + repr = "\n".join([f"{k}: {v}" for k, v in self.items()]) + repr = re.sub(r"^", " " * 4, repr, 0, re.M) + return f"DatasetDict({{\n{repr}\n}})" + + def cast(self, features: Features) -> "DatasetDict": + """ + Cast the dataset to a new set of features. + The transformation is applied to all the datasets of the dataset dictionary. + + You can also remove a column using [`Dataset.map`] with `feature` but `cast` + is in-place (doesn't copy the data to a new dataset) and is thus faster. + + Args: + features ([`Features`]): + New features to cast the dataset to. + The name and order of the fields in the features must match the current column names. + The type of the data must also be convertible from one type to the other. + For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds["train"].features + {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), + 'text': Value(dtype='string', id=None)} + >>> new_features = ds["train"].features.copy() + >>> new_features['label'] = ClassLabel(names=['bad', 'good']) + >>> new_features['text'] = Value('large_string') + >>> ds = ds.cast(new_features) + >>> ds["train"].features + {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), + 'text': Value(dtype='large_string', id=None)} + ``` + """ + self._check_values_type() + return DatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()}) + + def cast_column(self, column: str, feature) -> "DatasetDict": + """Cast column to feature for decoding. + + Args: + column (`str`): + Column name. + feature ([`Feature`]): + Target feature. + + Returns: + [`DatasetDict`] + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds["train"].features + {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), + 'text': Value(dtype='string', id=None)} + >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good'])) + >>> ds["train"].features + {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), + 'text': Value(dtype='string', id=None)} + ``` + """ + self._check_values_type() + return DatasetDict({k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()}) + + def remove_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict": + """ + Remove one or several column(s) from each split in the dataset + and the features associated to the column(s). + + The transformation is applied to all the splits of the dataset dictionary. + + You can also remove a column using [`Dataset.map`] with `remove_columns` but the present method + is in-place (doesn't copy the data to a new dataset) and is thus faster. + + Args: + column_names (`Union[str, List[str]]`): + Name of the column(s) to remove. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds.remove_columns("label") + DatasetDict({ + train: Dataset({ + features: ['text'], + num_rows: 8530 + }) + validation: Dataset({ + features: ['text'], + num_rows: 1066 + }) + test: Dataset({ + features: ['text'], + num_rows: 1066 + }) + }) + ``` + """ + self._check_values_type() + return DatasetDict({k: dataset.remove_columns(column_names=column_names) for k, dataset in self.items()}) + + def rename_column(self, original_column_name: str, new_column_name: str) -> "DatasetDict": + """ + Rename a column in the dataset and move the features associated to the original column under the new column name. + The transformation is applied to all the datasets of the dataset dictionary. + + You can also rename a column using [`~Dataset.map`] with `remove_columns` but the present method: + - takes care of moving the original features under the new column name. + - doesn't copy the data to a new dataset and is thus much faster. + + Args: + original_column_name (`str`): + Name of the column to rename. + new_column_name (`str`): + New name for the column. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds.rename_column("label", "label_new") + DatasetDict({ + train: Dataset({ + features: ['text', 'label_new'], + num_rows: 8530 + }) + validation: Dataset({ + features: ['text', 'label_new'], + num_rows: 1066 + }) + test: Dataset({ + features: ['text', 'label_new'], + num_rows: 1066 + }) + }) + ``` + """ + self._check_values_type() + return DatasetDict( + { + k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name) + for k, dataset in self.items() + } + ) + + def rename_columns(self, column_mapping: Dict[str, str]) -> "DatasetDict": + """ + Rename several columns in the dataset, and move the features associated to the original columns under + the new column names. + The transformation is applied to all the datasets of the dataset dictionary. + + Args: + column_mapping (`Dict[str, str]`): + A mapping of columns to rename to their new names. + + Returns: + [`DatasetDict`]: A copy of the dataset with renamed columns. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'}) + DatasetDict({ + train: Dataset({ + features: ['text_new', 'label_new'], + num_rows: 8530 + }) + validation: Dataset({ + features: ['text_new', 'label_new'], + num_rows: 1066 + }) + test: Dataset({ + features: ['text_new', 'label_new'], + num_rows: 1066 + }) + }) + ``` + """ + self._check_values_type() + return DatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()}) + + def select_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict": + """Select one or several column(s) from each split in the dataset and + the features associated to the column(s). + + The transformation is applied to all the splits of the dataset + dictionary. + + Args: + column_names (`Union[str, List[str]]`): + Name of the column(s) to keep. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds.select_columns("text") + DatasetDict({ + train: Dataset({ + features: ['text'], + num_rows: 8530 + }) + validation: Dataset({ + features: ['text'], + num_rows: 1066 + }) + test: Dataset({ + features: ['text'], + num_rows: 1066 + }) + }) + ``` + """ + self._check_values_type() + return DatasetDict({k: dataset.select_columns(column_names=column_names) for k, dataset in self.items()}) + + def class_encode_column(self, column: str, include_nulls: bool = False) -> "DatasetDict": + """Casts the given column as [`~datasets.features.ClassLabel`] and updates the tables. + + Args: + column (`str`): + The name of the column to cast. + include_nulls (`bool`, defaults to `False`): + Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label. + + + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("boolq") + >>> ds["train"].features + {'answer': Value(dtype='bool', id=None), + 'passage': Value(dtype='string', id=None), + 'question': Value(dtype='string', id=None)} + >>> ds = ds.class_encode_column("answer") + >>> ds["train"].features + {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None), + 'passage': Value(dtype='string', id=None), + 'question': Value(dtype='string', id=None)} + ``` + """ + self._check_values_type() + return DatasetDict( + {k: dataset.class_encode_column(column=column, include_nulls=include_nulls) for k, dataset in self.items()} + ) + + @contextlib.contextmanager + def formatted_as( + self, + type: Optional[str] = None, + columns: Optional[List] = None, + output_all_columns: bool = False, + **format_kwargs, + ): + """To be used in a `with` statement. Set `__getitem__` return format (type and columns). + The transformation is applied to all the datasets of the dataset dictionary. + + Args: + type (`str`, *optional*): + Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. + `None` means `__getitem__` returns python objects (default). + columns (`List[str]`, *optional*): + Columns to format in the output. + `None` means `__getitem__` returns all columns (default). + output_all_columns (`bool`, defaults to False): + Keep un-formatted columns as well in the output (as python objects). + **format_kwargs (additional keyword arguments): + Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. + """ + self._check_values_type() + old_format_type = {k: dataset._format_type for k, dataset in self.items()} + old_format_kwargs = {k: dataset._format_kwargs for k, dataset in self.items()} + old_format_columns = {k: dataset._format_columns for k, dataset in self.items()} + old_output_all_columns = {k: dataset._output_all_columns for k, dataset in self.items()} + try: + self.set_format(type, columns, output_all_columns, **format_kwargs) + yield + finally: + for k, dataset in self.items(): + dataset.set_format( + old_format_type[k], old_format_columns[k], old_output_all_columns[k], **old_format_kwargs[k] + ) + + def set_format( + self, + type: Optional[str] = None, + columns: Optional[List] = None, + output_all_columns: bool = False, + **format_kwargs, + ): + """Set `__getitem__` return format (type and columns). + The format is set for every dataset in the dataset dictionary. + + Args: + type (`str`, *optional*): + Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. + `None` means `__getitem__` returns python objects (default). + columns (`List[str]`, *optional*): + Columns to format in the output. + `None` means `__getitem__` returns all columns (default). + output_all_columns (`bool`, defaults to False): + Keep un-formatted columns as well in the output (as python objects), + **format_kwargs (additional keyword arguments): + Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. + + It is possible to call `map` after calling `set_format`. Since `map` may add new columns, then the list of formatted columns + gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted: + + `new formatted columns = (all columns - previously unformatted columns)` + + Example: + + ```py + >>> from datasets import load_dataset + >>> from transformers import AutoTokenizer + >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True) + >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) + >>> ds["train"].format + {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], + 'format_kwargs': {}, + 'output_all_columns': False, + 'type': 'numpy'} + ``` + """ + self._check_values_type() + for dataset in self.values(): + dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) + + def reset_format(self): + """Reset `__getitem__` return format to python objects and all columns. + The transformation is applied to all the datasets of the dataset dictionary. + + Same as `self.set_format()` + + Example: + + ```py + >>> from datasets import load_dataset + >>> from transformers import AutoTokenizer + >>> ds = load_dataset("rotten_tomatoes") + >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True) + >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) + >>> ds["train"].format + {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], + 'format_kwargs': {}, + 'output_all_columns': False, + 'type': 'numpy'} + >>> ds.reset_format() + >>> ds["train"].format + {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], + 'format_kwargs': {}, + 'output_all_columns': False, + 'type': None} + ``` + """ + self._check_values_type() + for dataset in self.values(): + dataset.set_format() + + def set_transform( + self, + transform: Optional[Callable], + columns: Optional[List] = None, + output_all_columns: bool = False, + ): + """Set ``__getitem__`` return format using this transform. The transform is applied on-the-fly on batches when ``__getitem__`` is called. + The transform is set for every dataset in the dataset dictionary + As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format` + + Args: + transform (`Callable`, optional): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format` + A formatting function is a callable that takes a batch (as a dict) as input and returns a batch. + This function is applied right before returning the objects in ``__getitem__``. + columns (`List[str]`, optional): columns to format in the output + If specified, then the input batch of the transform only contains those columns. + output_all_columns (`bool`, default to False): keep un-formatted columns as well in the output (as python objects) + If set to True, then the other un-formatted columns are kept with the output of the transform. + + """ + self._check_values_type() + for dataset in self.values(): + dataset.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform) + + def with_format( + self, + type: Optional[str] = None, + columns: Optional[List] = None, + output_all_columns: bool = False, + **format_kwargs, + ) -> "DatasetDict": + """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. + The format `type` (for example "numpy") is used to format batches when using `__getitem__`. + The format is set for every dataset in the dataset dictionary. + + It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`]. + + Contrary to [`~datasets.DatasetDict.set_format`], `with_format` returns a new [`DatasetDict`] object with new [`Dataset`] objects. + + Args: + type (`str`, *optional*): + Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. + `None` means `__getitem__` returns python objects (default). + columns (`List[str]`, *optional*): + Columns to format in the output. + `None` means `__getitem__` returns all columns (default). + output_all_columns (`bool`, defaults to `False`): + Keep un-formatted columns as well in the output (as python objects). + **format_kwargs (additional keyword arguments): + Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. + + Example: + + ```py + >>> from datasets import load_dataset + >>> from transformers import AutoTokenizer + >>> ds = load_dataset("rotten_tomatoes") + >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) + >>> ds["train"].format + {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], + 'format_kwargs': {}, + 'output_all_columns': False, + 'type': None} + >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) + >>> ds["train"].format + {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], + 'format_kwargs': {}, + 'output_all_columns': False, + 'type': 'tensorflow'} + ``` + """ + dataset = copy.deepcopy(self) + dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) + return dataset + + def with_transform( + self, + transform: Optional[Callable], + columns: Optional[List] = None, + output_all_columns: bool = False, + ) -> "DatasetDict": + """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called. + The transform is set for every dataset in the dataset dictionary + + As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`]. + + Contrary to [`~datasets.DatasetDict.set_transform`], `with_transform` returns a new [`DatasetDict`] object with new [`Dataset`] objects. + + Args: + transform (`Callable`, *optional*): + User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`]. + A formatting function is a callable that takes a batch (as a dict) as input and returns a batch. + This function is applied right before returning the objects in `__getitem__`. + columns (`List[str]`, *optional*): + Columns to format in the output. + If specified, then the input batch of the transform only contains those columns. + output_all_columns (`bool`, defaults to False): + Keep un-formatted columns as well in the output (as python objects). + If set to `True`, then the other un-formatted columns are kept with the output of the transform. + + Example: + + ```py + >>> from datasets import load_dataset + >>> from transformers import AutoTokenizer + >>> ds = load_dataset("rotten_tomatoes") + >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + >>> def encode(example): + ... return tokenizer(example['text'], truncation=True, padding=True, return_tensors="pt") + >>> ds = ds.with_transform(encode) + >>> ds["train"][0] + {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 'input_ids': tensor([ 101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432, + 112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119, + 112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190, + 170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117, + 179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137, + 188, 1566, 7912, 14516, 6997, 119, 102]), + 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0])} + ``` + """ + dataset = copy.deepcopy(self) + dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns) + return dataset + + def map( + self, + function: Optional[Callable] = None, + with_indices: bool = False, + with_rank: bool = False, + input_columns: Optional[Union[str, List[str]]] = None, + batched: bool = False, + batch_size: Optional[int] = 1000, + drop_last_batch: bool = False, + remove_columns: Optional[Union[str, List[str]]] = None, + keep_in_memory: bool = False, + load_from_cache_file: Optional[bool] = None, + cache_file_names: Optional[Dict[str, Optional[str]]] = None, + writer_batch_size: Optional[int] = 1000, + features: Optional[Features] = None, + disable_nullable: bool = False, + fn_kwargs: Optional[dict] = None, + num_proc: Optional[int] = None, + desc: Optional[str] = None, + ) -> "DatasetDict": + """Apply a function to all the elements in the table (individually or in batches) + and update the table (if function does updated examples). + The transformation is applied to all the datasets of the dataset dictionary. + + Args: + function (`callable`): with one of the following signature: + - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` + - `function(example: Dict[str, Any], indices: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True` + - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` + - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True` + + For advanced usage, the function can also return a `pyarrow.Table`. + Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. + + with_indices (`bool`, defaults to `False`): + Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. + with_rank (`bool`, defaults to `False`): + Provide process rank to `function`. Note that in this case the + signature of `function` should be `def function(example[, idx], rank): ...`. + input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`): + The columns to be passed into `function` as + positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. + batched (`bool`, defaults to `False`): + Provide batch of examples to `function`. + batch_size (`int`, *optional*, defaults to `1000`): + Number of examples per batch provided to `function` if `batched=True`, + `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`. + drop_last_batch (`bool`, defaults to `False`): + Whether a last batch smaller than the batch_size should be + dropped instead of being processed by the function. + remove_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`): + Remove a selection of columns while doing the mapping. + Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding + columns with names in `remove_columns`, these columns will be kept. + keep_in_memory (`bool`, defaults to `False`): + Keep the dataset in memory instead of writing it to a cache file. + load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): + If a cache file storing the current computation from `function` + can be identified, use it instead of recomputing. + cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`): + Provide the name of a path for the cache file. It is used to store the + results of the computation instead of the automatically generated cache file name. + You have to provide one `cache_file_name` per dataset in the dataset dictionary. + writer_batch_size (`int`, default `1000`): + Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. + features (`[datasets.Features]`, *optional*, defaults to `None`): + Use a specific [`Features`] to store the cache file + instead of the automatically generated one. + disable_nullable (`bool`, defaults to `False`): + Disallow null values in the table. + fn_kwargs (`Dict`, *optional*, defaults to `None`): + Keyword arguments to be passed to `function` + num_proc (`int`, *optional*, defaults to `None`): + Number of processes for multiprocessing. By default it doesn't + use multiprocessing. + desc (`str`, *optional*, defaults to `None`): + Meaningful description to be displayed alongside with the progress bar while mapping examples. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> def add_prefix(example): + ... example["text"] = "Review: " + example["text"] + ... return example + >>> ds = ds.map(add_prefix) + >>> ds["train"][0:3]["text"] + ['Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', + 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .', + 'Review: effective but too-tepid biopic'] + + # process a batch of examples + >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True) + # set number of processors + >>> ds = ds.map(add_prefix, num_proc=4) + ``` + """ + self._check_values_type() + if cache_file_names is None: + cache_file_names = {k: None for k in self} + return DatasetDict( + { + k: dataset.map( + function=function, + with_indices=with_indices, + with_rank=with_rank, + input_columns=input_columns, + batched=batched, + batch_size=batch_size, + drop_last_batch=drop_last_batch, + remove_columns=remove_columns, + keep_in_memory=keep_in_memory, + load_from_cache_file=load_from_cache_file, + cache_file_name=cache_file_names[k], + writer_batch_size=writer_batch_size, + features=features, + disable_nullable=disable_nullable, + fn_kwargs=fn_kwargs, + num_proc=num_proc, + desc=desc, + ) + for k, dataset in self.items() + } + ) + + def filter( + self, + function, + with_indices=False, + input_columns: Optional[Union[str, List[str]]] = None, + batched: bool = False, + batch_size: Optional[int] = 1000, + keep_in_memory: bool = False, + load_from_cache_file: Optional[bool] = None, + cache_file_names: Optional[Dict[str, Optional[str]]] = None, + writer_batch_size: Optional[int] = 1000, + fn_kwargs: Optional[dict] = None, + num_proc: Optional[int] = None, + desc: Optional[str] = None, + ) -> "DatasetDict": + """Apply a filter function to all the elements in the table in batches + and update the table so that the dataset only includes examples according to the filter function. + The transformation is applied to all the datasets of the dataset dictionary. + + Args: + function (`callable`): + With one of the following signature: + - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False` + - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False` + - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True` + - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if ``with_indices=True, batched=True` + with_indices (`bool`, defaults to `False`): + Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. + input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`): + The columns to be passed into `function` as + positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. + batched (`bool`, defaults to `False`): + Provide batch of examples to `function`. + batch_size (`int`, *optional*, defaults to `1000`): + Number of examples per batch provided to `function` if `batched=True` + `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`. + keep_in_memory (`bool`, defaults to `False`): + Keep the dataset in memory instead of writing it to a cache file. + load_from_cache_file (`Optional[bool]`, defaults to `True` if chaching is enabled): + If a cache file storing the current computation from `function` + can be identified, use it instead of recomputing. + cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`): + Provide the name of a path for the cache file. It is used to store the + results of the computation instead of the automatically generated cache file name. + You have to provide one `cache_file_name` per dataset in the dataset dictionary. + writer_batch_size (`int`, defaults to `1000`): + Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. + fn_kwargs (`Dict`, *optional*, defaults to `None`): + Keyword arguments to be passed to `function` + num_proc (`int`, *optional*, defaults to `None`): + Number of processes for multiprocessing. By default it doesn't + use multiprocessing. + desc (`str`, *optional*, defaults to `None`): + Meaningful description to be displayed alongside with the progress bar while filtering examples. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds.filter(lambda x: x["label"] == 1) + DatasetDict({ + train: Dataset({ + features: ['text', 'label'], + num_rows: 4265 + }) + validation: Dataset({ + features: ['text', 'label'], + num_rows: 533 + }) + test: Dataset({ + features: ['text', 'label'], + num_rows: 533 + }) + }) + ``` + """ + self._check_values_type() + if cache_file_names is None: + cache_file_names = {k: None for k in self} + return DatasetDict( + { + k: dataset.filter( + function=function, + with_indices=with_indices, + input_columns=input_columns, + batched=batched, + batch_size=batch_size, + keep_in_memory=keep_in_memory, + load_from_cache_file=load_from_cache_file, + cache_file_name=cache_file_names[k], + writer_batch_size=writer_batch_size, + fn_kwargs=fn_kwargs, + num_proc=num_proc, + desc=desc, + ) + for k, dataset in self.items() + } + ) + + def flatten_indices( + self, + keep_in_memory: bool = False, + cache_file_names: Optional[Dict[str, Optional[str]]] = None, + writer_batch_size: Optional[int] = 1000, + features: Optional[Features] = None, + disable_nullable: bool = False, + num_proc: Optional[int] = None, + new_fingerprint: Optional[str] = None, + ) -> "DatasetDict": + """Create and cache a new Dataset by flattening the indices mapping. + + Args: + keep_in_memory (`bool`, defaults to `False`): + Keep the dataset in memory instead of writing it to a cache file. + cache_file_names (`Dict[str, str]`, *optional*, default `None`): + Provide the name of a path for the cache file. It is used to store the + results of the computation instead of the automatically generated cache file name. + You have to provide one `cache_file_name` per dataset in the dataset dictionary. + writer_batch_size (`int`, defaults to `1000`): + Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. + features (`Optional[datasets.Features]`, defaults to `None`): + Use a specific [`Features`] to store the cache file + instead of the automatically generated one. + disable_nullable (`bool`, defaults to `False`): + Allow null values in the table. + num_proc (`int`, optional, default `None`): + Max number of processes when generating cache. Already cached shards are loaded sequentially + new_fingerprint (`str`, *optional*, defaults to `None`): + The new fingerprint of the dataset after transform. + If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments + """ + self._check_values_type() + if cache_file_names is None: + cache_file_names = {k: None for k in self} + return DatasetDict( + { + k: dataset.flatten_indices( + keep_in_memory=keep_in_memory, + cache_file_name=cache_file_names[k], + writer_batch_size=writer_batch_size, + features=features, + disable_nullable=disable_nullable, + num_proc=num_proc, + new_fingerprint=new_fingerprint, + ) + for k, dataset in self.items() + } + ) + + def sort( + self, + column_names: Union[str, Sequence[str]], + reverse: Union[bool, Sequence[bool]] = False, + kind="deprecated", + null_placement: str = "at_end", + keep_in_memory: bool = False, + load_from_cache_file: Optional[bool] = None, + indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None, + writer_batch_size: Optional[int] = 1000, + ) -> "DatasetDict": + """Create a new dataset sorted according to a single or multiple columns. + + Args: + column_names (`Union[str, Sequence[str]]`): + Column name(s) to sort by. + reverse (`Union[bool, Sequence[bool]]`, defaults to `False`): + If `True`, sort by descending order rather than ascending. If a single bool is provided, + the value is applied to the sorting of all column names. Otherwise a list of bools with the + same length and order as column_names must be provided. + kind (`str`, *optional*): + Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`, + The default is `quicksort`. Note that both `stable` and `mergesort` use timsort under the covers and, in general, + the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility. + + + `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0. + + + null_placement (`str`, defaults to `at_end`): + Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last` + keep_in_memory (`bool`, defaults to `False`): + Keep the sorted indices in memory instead of writing it to a cache file. + load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): + If a cache file storing the sorted indices + can be identified, use it instead of recomputing. + indices_cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`): + Provide the name of a path for the cache file. It is used to store the + indices mapping instead of the automatically generated cache file name. + You have to provide one `cache_file_name` per dataset in the dataset dictionary. + writer_batch_size (`int`, defaults to `1000`): + Number of rows per write operation for the cache file writer. + Higher value gives smaller cache files, lower value consume less temporary memory. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset('rotten_tomatoes') + >>> ds['train']['label'][:10] + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + >>> sorted_ds = ds.sort('label') + >>> sorted_ds['train']['label'][:10] + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False]) + >>> another_sorted_ds['train']['label'][:10] + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + ``` + """ + self._check_values_type() + if indices_cache_file_names is None: + indices_cache_file_names = {k: None for k in self} + return DatasetDict( + { + k: dataset.sort( + column_names=column_names, + reverse=reverse, + kind=kind, + null_placement=null_placement, + keep_in_memory=keep_in_memory, + load_from_cache_file=load_from_cache_file, + indices_cache_file_name=indices_cache_file_names[k], + writer_batch_size=writer_batch_size, + ) + for k, dataset in self.items() + } + ) + + def shuffle( + self, + seeds: Optional[Union[int, Dict[str, Optional[int]]]] = None, + seed: Optional[int] = None, + generators: Optional[Dict[str, np.random.Generator]] = None, + keep_in_memory: bool = False, + load_from_cache_file: Optional[bool] = None, + indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None, + writer_batch_size: Optional[int] = 1000, + ) -> "DatasetDict": + """Create a new Dataset where the rows are shuffled. + + The transformation is applied to all the datasets of the dataset dictionary. + + Currently shuffling uses numpy random generators. + You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64). + + Args: + seeds (`Dict[str, int]` or `int`, *optional*): + A seed to initialize the default BitGenerator if `generator=None`. + If `None`, then fresh, unpredictable entropy will be pulled from the OS. + If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state. + You can provide one `seed` per dataset in the dataset dictionary. + seed (`int`, *optional*): + A seed to initialize the default BitGenerator if `generator=None`. Alias for seeds (a `ValueError` is raised if both are provided). + generators (`Dict[str, *optional*, np.random.Generator]`): + Numpy random Generator to use to compute the permutation of the dataset rows. + If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). + You have to provide one `generator` per dataset in the dataset dictionary. + keep_in_memory (`bool`, defaults to `False`): + Keep the dataset in memory instead of writing it to a cache file. + load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): + If a cache file storing the current computation from `function` + can be identified, use it instead of recomputing. + indices_cache_file_names (`Dict[str, str]`, *optional*): + Provide the name of a path for the cache file. It is used to store the + indices mappings instead of the automatically generated cache file name. + You have to provide one `cache_file_name` per dataset in the dataset dictionary. + writer_batch_size (`int`, defaults to `1000`): + Number of rows per write operation for the cache file writer. + This value is a good trade-off between memory usage during the processing, and processing speed. + Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes") + >>> ds["train"]["label"][:10] + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + + # set a seed + >>> shuffled_ds = ds.shuffle(seed=42) + >>> shuffled_ds["train"]["label"][:10] + [0, 1, 0, 1, 0, 0, 0, 0, 0, 0] + ``` + """ + self._check_values_type() + if seed is not None and seeds is not None: + raise ValueError("Please specify seed or seeds, but not both") + seeds = seed if seed is not None else seeds + if seeds is None: + seeds = {k: None for k in self} + elif not isinstance(seeds, dict): + seeds = {k: seeds for k in self} + if generators is None: + generators = {k: None for k in self} + if indices_cache_file_names is None: + indices_cache_file_names = {k: None for k in self} + return DatasetDict( + { + k: dataset.shuffle( + seed=seeds[k], + generator=generators[k], + keep_in_memory=keep_in_memory, + load_from_cache_file=load_from_cache_file, + indices_cache_file_name=indices_cache_file_names[k], + writer_batch_size=writer_batch_size, + ) + for k, dataset in self.items() + } + ) + + def save_to_disk( + self, + dataset_dict_path: PathLike, + fs="deprecated", + max_shard_size: Optional[Union[str, int]] = None, + num_shards: Optional[Dict[str, int]] = None, + num_proc: Optional[int] = None, + storage_options: Optional[dict] = None, + ): + """ + Saves a dataset dict to a filesystem using `fsspec.spec.AbstractFileSystem`. + + For [`Image`] and [`Audio`] data: + + All the Image() and Audio() data are stored in the arrow files. + If you want to store paths or urls, please use the Value("string") type. + + Args: + dataset_dict_path (`str`): + Path (e.g. `dataset/train`) or remote URI + (e.g. `s3://my-bucket/dataset/train`) of the dataset dict directory where the dataset dict will be + saved to. + fs (`fsspec.spec.AbstractFileSystem`, *optional*): + Instance of the remote filesystem where the dataset will be saved to. + + + + `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. + Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` + + + + max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): + The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit + (like `"50MB"`). + num_shards (`Dict[str, int]`, *optional*): + Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`. + You need to provide the number of shards for each dataset in the dataset dictionary. + Use a dictionary to define a different num_shards for each split. + + + num_proc (`int`, *optional*, default `None`): + Number of processes when downloading and generating the dataset locally. + Multiprocessing is disabled by default. + + + storage_options (`dict`, *optional*): + Key/value pairs to be passed on to the file-system backend, if any. + + + + Example: + + ```python + >>> dataset_dict.save_to_disk("path/to/dataset/directory") + >>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB") + >>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8}) + ``` + """ + if fs != "deprecated": + warnings.warn( + "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" + "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", + FutureWarning, + ) + storage_options = fs.storage_options + + fs: fsspec.AbstractFileSystem + fs, _, _ = fsspec.get_fs_token_paths(dataset_dict_path, storage_options=storage_options) + + if num_shards is None: + num_shards = {k: None for k in self} + elif not isinstance(num_shards, dict): + raise ValueError( + "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}" + ) + + fs.makedirs(dataset_dict_path, exist_ok=True) + + with fs.open(posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME), "w", encoding="utf-8") as f: + json.dump({"splits": list(self)}, f) + for k, dataset in self.items(): + dataset.save_to_disk( + posixpath.join(dataset_dict_path, k), + num_shards=num_shards.get(k), + max_shard_size=max_shard_size, + num_proc=num_proc, + storage_options=storage_options, + ) + + @staticmethod + def load_from_disk( + dataset_dict_path: PathLike, + fs="deprecated", + keep_in_memory: Optional[bool] = None, + storage_options: Optional[dict] = None, + ) -> "DatasetDict": + """ + Load a dataset that was previously saved using [`save_to_disk`] from a filesystem using `fsspec.spec.AbstractFileSystem`. + + Args: + dataset_dict_path (`str`): + Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`) + of the dataset dict directory where the dataset dict will be loaded from. + fs (`fsspec.spec.AbstractFileSystem`, *optional*): + Instance of the remote filesystem where the dataset will be saved to. + + + + `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0. + Please use `storage_options` instead, e.g. `storage_options=fs.storage_options` + + + + keep_in_memory (`bool`, defaults to `None`): + Whether to copy the dataset in-memory. If `None`, the + dataset will not be copied in-memory unless explicitly enabled by setting + `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the + [improve performance](../cache#improve-performance) section. + storage_options (`dict`, *optional*): + Key/value pairs to be passed on to the file-system backend, if any. + + + + Returns: + [`DatasetDict`] + + Example: + + ```py + >>> ds = load_from_disk('path/to/dataset/directory') + ``` + """ + if fs != "deprecated": + warnings.warn( + "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n" + "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", + FutureWarning, + ) + storage_options = fs.storage_options + + fs: fsspec.AbstractFileSystem + fs, _, [dataset_dict_path] = fsspec.get_fs_token_paths(dataset_dict_path, storage_options=storage_options) + + dataset_dict_json_path = posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME) + dataset_state_json_path = posixpath.join(dataset_dict_path, config.DATASET_STATE_JSON_FILENAME) + dataset_info_path = posixpath.join(dataset_dict_path, config.DATASET_INFO_FILENAME) + if not fs.isfile(dataset_dict_json_path): + if fs.isfile(dataset_info_path) and fs.isfile(dataset_state_json_path): + raise FileNotFoundError( + f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but got a `Dataset`. Please use either `datasets.load_from_disk` or `Dataset.load_from_disk` instead." + ) + raise FileNotFoundError( + f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but provided path is not a `DatasetDict`." + ) + + with fs.open(dataset_dict_json_path, "r", encoding="utf-8") as f: + splits = json.load(f)["splits"] + + dataset_dict = DatasetDict() + for k in splits: + dataset_dict_split_path = posixpath.join(fs.unstrip_protocol(dataset_dict_path), k) + dataset_dict[k] = Dataset.load_from_disk( + dataset_dict_split_path, keep_in_memory=keep_in_memory, storage_options=storage_options + ) + return dataset_dict + + @staticmethod + def from_csv( + path_or_paths: Dict[str, PathLike], + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + **kwargs, + ) -> "DatasetDict": + """Create [`DatasetDict`] from CSV file(s). + + Args: + path_or_paths (`dict` of path-like): + Path(s) of the CSV file(s). + features ([`Features`], *optional*): + Dataset features. + cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`): + Directory to cache data. + keep_in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + **kwargs (additional keyword arguments): + Keyword arguments to be passed to [`pandas.read_csv`]. + + Returns: + [`DatasetDict`] + + Example: + + ```py + >>> from datasets import DatasetDict + >>> ds = DatasetDict.from_csv({'train': 'path/to/dataset.csv'}) + ``` + """ + # Dynamic import to avoid circular dependency + from .io.csv import CsvDatasetReader + + return CsvDatasetReader( + path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs + ).read() + + @staticmethod + def from_json( + path_or_paths: Dict[str, PathLike], + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + **kwargs, + ) -> "DatasetDict": + """Create [`DatasetDict`] from JSON Lines file(s). + + Args: + path_or_paths (`path-like` or list of `path-like`): + Path(s) of the JSON Lines file(s). + features ([`Features`], *optional*): + Dataset features. + cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`): + Directory to cache data. + keep_in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + **kwargs (additional keyword arguments): + Keyword arguments to be passed to [`JsonConfig`]. + + Returns: + [`DatasetDict`] + + Example: + + ```py + >>> from datasets import DatasetDict + >>> ds = DatasetDict.from_json({'train': 'path/to/dataset.json'}) + ``` + """ + # Dynamic import to avoid circular dependency + from .io.json import JsonDatasetReader + + return JsonDatasetReader( + path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs + ).read() + + @staticmethod + def from_parquet( + path_or_paths: Dict[str, PathLike], + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + columns: Optional[List[str]] = None, + **kwargs, + ) -> "DatasetDict": + """Create [`DatasetDict`] from Parquet file(s). + + Args: + path_or_paths (`dict` of path-like): + Path(s) of the CSV file(s). + features ([`Features`], *optional*): + Dataset features. + cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): + Directory to cache data. + keep_in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + columns (`List[str]`, *optional*): + If not `None`, only these columns will be read from the file. + A column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e'. + **kwargs (additional keyword arguments): + Keyword arguments to be passed to [`ParquetConfig`]. + + Returns: + [`DatasetDict`] + + Example: + + ```py + >>> from datasets import DatasetDict + >>> ds = DatasetDict.from_parquet({'train': 'path/to/dataset/parquet'}) + ``` + """ + # Dynamic import to avoid circular dependency + from .io.parquet import ParquetDatasetReader + + return ParquetDatasetReader( + path_or_paths, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + columns=columns, + **kwargs, + ).read() + + @staticmethod + def from_text( + path_or_paths: Dict[str, PathLike], + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + **kwargs, + ) -> "DatasetDict": + """Create [`DatasetDict`] from text file(s). + + Args: + path_or_paths (`dict` of path-like): + Path(s) of the text file(s). + features ([`Features`], *optional*): + Dataset features. + cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): + Directory to cache data. + keep_in_memory (`bool`, defaults to `False`): + Whether to copy the data in-memory. + **kwargs (additional keyword arguments): + Keyword arguments to be passed to [`TextConfig`]. + + Returns: + [`DatasetDict`] + + Example: + + ```py + >>> from datasets import DatasetDict + >>> ds = DatasetDict.from_text({'train': 'path/to/dataset.txt'}) + ``` + """ + # Dynamic import to avoid circular dependency + from .io.text import TextDatasetReader + + return TextDatasetReader( + path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs + ).read() + + @deprecated() + @is_documented_by(Dataset.prepare_for_task) + def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "DatasetDict": + self._check_values_type() + return DatasetDict({k: dataset.prepare_for_task(task=task, id=id) for k, dataset in self.items()}) + + @is_documented_by(Dataset.align_labels_with_mapping) + def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "DatasetDict": + self._check_values_type() + return DatasetDict( + { + k: dataset.align_labels_with_mapping(label2id=label2id, label_column=label_column) + for k, dataset in self.items() + } + ) + + def push_to_hub( + self, + repo_id, + config_name: str = "default", + commit_message: Optional[str] = None, + private: Optional[bool] = False, + token: Optional[str] = None, + revision: Optional[str] = None, + branch="deprecated", + create_pr: Optional[bool] = False, + max_shard_size: Optional[Union[int, str]] = None, + num_shards: Optional[Dict[str, int]] = None, + embed_external_files: bool = True, + ): + """Pushes the [`DatasetDict`] to the hub as a Parquet dataset. + The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed. + + Each dataset split will be pushed independently. The pushed dataset will keep the original split names. + + The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`] + data, the Parquet files will store the bytes of your images or audio files. + You can disable this by setting `embed_external_files` to False. + + Args: + repo_id (`str`): + The ID of the repository to push to in the following format: `/` or + `/`. Also accepts ``, which will default to the namespace + of the logged-in user. + config_name (`str`): + Configuration name of a dataset. Defaults to "default". + commit_message (`str`, *optional*): + Message to commit while pushing. Will default to `"Upload dataset"`. + private (`bool`, *optional*): + Whether the dataset repository should be set to private or not. Only affects repository creation: + a repository that already exists will not be affected by that parameter. + token (`str`, *optional*): + An optional authentication token for the Hugging Face Hub. If no token is passed, will default + to the token saved locally when logging in with `huggingface-cli login`. Will raise an error + if no token is passed and the user is not logged-in. + revision (`str`, *optional*): + Branch to push the uploaded files to. Defaults to the `"main"` branch. + + + branch (`str`, *optional*): + The git branch on which to push the dataset. This defaults to the default branch as specified + in your repository, which defaults to `"main"`. + + + + `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0. + + + create_pr (`bool`, *optional*, defaults to `False`): + Whether or not to create a PR with the uploaded files or directly commit. + + + max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): + The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit + (like `"500MB"` or `"1GB"`). + num_shards (`Dict[str, int]`, *optional*): + Number of shards to write. By default the number of shards depends on `max_shard_size`. + Use a dictionary to define a different num_shards for each split. + + + embed_external_files (`bool`, defaults to `True`): + Whether to embed file bytes in the shards. + In particular, this will do the following before the push for the fields of type: + + - [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files. + + Example: + + ```python + >>> dataset_dict.push_to_hub("/") + >>> dataset_dict.push_to_hub("/", private=True) + >>> dataset_dict.push_to_hub("/", max_shard_size="1GB") + >>> dataset_dict.push_to_hub("/", num_shards={"train": 1024, "test": 8}) + ``` + + If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages): + + ```python + >>> english_dataset.push_to_hub("/", "en") + >>> french_dataset.push_to_hub("/", "fr") + >>> # later + >>> english_dataset = load_dataset("/", "en") + >>> french_dataset = load_dataset("/", "fr") + ``` + """ + + if num_shards is None: + num_shards = {k: None for k in self} + elif not isinstance(num_shards, dict): + raise ValueError( + "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}" + ) + + if branch != "deprecated": + warnings.warn( + "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'revision={branch}' instead.", + FutureWarning, + ) + revision = branch + + self._check_values_type() + self._check_values_features() + total_uploaded_size = 0 + total_dataset_nbytes = 0 + info_to_dump: DatasetInfo = next(iter(self.values())).info.copy() + info_to_dump.config_name = config_name + info_to_dump.splits = SplitDict() + + for split in self.keys(): + if not re.match(_split_re, split): + raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.") + + api = HfApi(endpoint=config.HF_ENDPOINT, token=token) + + repo_url = api.create_repo( + repo_id, + token=token, + repo_type="dataset", + private=private, + exist_ok=True, + ) + repo_id = repo_url.repo_id + + if revision is not None: + api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True) + + data_dir = config_name if config_name != "default" else "data" # for backward compatibility + + additions = [] + for split in self.keys(): + logger.info(f"Pushing split {split} to the Hub.") + # The split=key needs to be removed before merging + split_additions, uploaded_size, dataset_nbytes = self[split]._push_parquet_shards_to_hub( + repo_id, + data_dir=data_dir, + split=split, + token=token, + revision=revision, + create_pr=create_pr, + max_shard_size=max_shard_size, + num_shards=num_shards.get(split), + embed_external_files=embed_external_files, + ) + additions += split_additions + total_uploaded_size += uploaded_size + total_dataset_nbytes += dataset_nbytes + info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split])) + info_to_dump.download_checksums = None + info_to_dump.download_size = total_uploaded_size + info_to_dump.dataset_size = total_dataset_nbytes + info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes + + metadata_config_to_dump = { + "data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()], + } + + # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern) + # and delete old split shards (if they exist) + repo_with_dataset_card, repo_with_dataset_infos = False, False + repo_splits = [] # use a list to keep the order of the splits + deletions = [] + repo_files_to_add = [addition.path_in_repo for addition in additions] + for repo_file in api.list_files_info(repo_id, revision=revision, repo_type="dataset", token=token): + if repo_file.rfilename == "README.md": + repo_with_dataset_card = True + elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME: + repo_with_dataset_infos = True + elif ( + repo_file.rfilename.startswith(tuple(f"{data_dir}/{split}-" for split in self.keys())) + and repo_file.rfilename not in repo_files_to_add + ): + deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename)) + elif fnmatch.fnmatch( + repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*") + ): + repo_split = string_to_dict( + repo_file.rfilename, + glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED), + )["split"] + if repo_split not in repo_splits: + repo_splits.append(split) + + # get the info from the README to update them + if repo_with_dataset_card: + dataset_card_path = api.hf_hub_download(repo_id, "README.md", repo_type="dataset", revision=revision) + dataset_card = DatasetCard.load(Path(dataset_card_path)) + dataset_card_data = dataset_card.data + metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) + # get the deprecated dataset_infos.json to update them + elif repo_with_dataset_infos: + dataset_card = None + dataset_card_data = DatasetCardData() + metadata_configs = MetadataConfigs() + else: + dataset_card = None + dataset_card_data = DatasetCardData() + metadata_configs = MetadataConfigs() + # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed + if not metadata_configs and repo_splits: + default_metadata_configs_to_dump = { + "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits] + } + MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data) + # push to the deprecated dataset_infos.json + if repo_with_dataset_infos: + dataset_infos_path = api.hf_hub_download( + repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision + ) + with open(dataset_infos_path, encoding="utf-8") as f: + dataset_infos: dict = json.load(f) + dataset_infos[config_name] = asdict(info_to_dump) + buffer = BytesIO() + buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8")) + additions.append( + CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer) + ) + # push to README + DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data) + MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data) + dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card + additions.append(CommitOperationAdd(path_in_repo="README.md", path_or_fileobj=str(dataset_card).encode())) + + commit_message = commit_message if commit_message is not None else "Upload dataset" + if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT: + api.create_commit( + repo_id, + operations=additions + deletions, + commit_message=commit_message, + token=token, + repo_type="dataset", + revision=revision, + create_pr=create_pr, + ) + else: + logger.info( + f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits." + ) + num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT) + for i in range(0, num_commits): + operations = additions[ + i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT + ] + (deletions if i == 0 else []) + api.create_commit( + repo_id, + operations=operations, + commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})", + token=token, + repo_type="dataset", + revision=revision, + create_pr=create_pr, + ) + logger.info( + f"Commit #{i+1} completed" + + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "") + + "." + ) + + +class IterableDatasetDict(dict): + def with_format( + self, + type: Optional[str] = None, + ) -> "IterableDatasetDict": + """ + Return a dataset with the specified format. + This method only supports the "torch" format for now. + The format is set to all the datasets of the dataset dictionary. + + Args: + type (`str`, *optional*, defaults to `None`): + If set to "torch", the returned dataset + will be a subclass of `torch.utils.data.IterableDataset` to be used in a `DataLoader`. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", streaming=True) + >>> from transformers import AutoTokenizer + >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") + >>> def encode(example): + ... return tokenizer(examples["text"], truncation=True, padding="max_length") + >>> ds = ds.map(encode, batched=True, remove_columns=["text"]) + >>> ds = ds.with_format("torch") + ``` + """ + return IterableDatasetDict({k: dataset.with_format(type=type) for k, dataset in self.items()}) + + def map( + self, + function: Optional[Callable] = None, + with_indices: bool = False, + input_columns: Optional[Union[str, List[str]]] = None, + batched: bool = False, + batch_size: int = 1000, + drop_last_batch: bool = False, + remove_columns: Optional[Union[str, List[str]]] = None, + fn_kwargs: Optional[dict] = None, + ) -> "IterableDatasetDict": + """ + Apply a function to all the examples in the iterable dataset (individually or in batches) and update them. + If your function returns a column that already exists, then it overwrites it. + The function is applied on-the-fly on the examples when iterating over the dataset. + The transformation is applied to all the datasets of the dataset dictionary. + + You can specify whether the function should be batched or not with the `batched` parameter: + + - If batched is `False`, then the function takes 1 example in and should return 1 example. + An example is a dictionary, e.g. `{"text": "Hello there !"}`. + - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. + A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`. + - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. + Note that the last batch may have less than `n` examples. + A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. + + Args: + function (`Callable`, *optional*, defaults to `None`): + Function applied on-the-fly on the examples when you iterate on the dataset. + It must have one of the following signatures: + + - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` + - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True` + - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` + - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True` + + For advanced usage, the function can also return a `pyarrow.Table`. + Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. + If no function is provided, default to identity function: `lambda x: x`. + with_indices (`bool`, defaults to `False`): + Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. + input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`): + The columns to be passed into `function` + as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. + batched (`bool`, defaults to `False`): + Provide batch of examples to `function`. + batch_size (`int`, *optional*, defaults to `1000`): + Number of examples per batch provided to `function` if `batched=True`. + drop_last_batch (`bool`, defaults to `False`): + Whether a last batch smaller than the `batch_size` should be + dropped instead of being processed by the function. + remove_columns (`[List[str]]`, *optional*, defaults to `None`): + Remove a selection of columns while doing the mapping. + Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding + columns with names in `remove_columns`, these columns will be kept. + fn_kwargs (`Dict`, *optional*, defaults to `None`): + Keyword arguments to be passed to `function` + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", streaming=True) + >>> def add_prefix(example): + ... example["text"] = "Review: " + example["text"] + ... return example + >>> ds = ds.map(add_prefix) + >>> next(iter(ds["train"])) + {'label': 1, + 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} + ``` + """ + return IterableDatasetDict( + { + k: dataset.map( + function=function, + with_indices=with_indices, + input_columns=input_columns, + batched=batched, + batch_size=batch_size, + drop_last_batch=drop_last_batch, + remove_columns=remove_columns, + fn_kwargs=fn_kwargs, + ) + for k, dataset in self.items() + } + ) + + def filter( + self, + function: Optional[Callable] = None, + with_indices=False, + input_columns: Optional[Union[str, List[str]]] = None, + batched: bool = False, + batch_size: Optional[int] = 1000, + fn_kwargs: Optional[dict] = None, + ) -> "IterableDatasetDict": + """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function. + The filtering is done on-the-fly when iterating over the dataset. + The filtering is applied to all the datasets of the dataset dictionary. + + Args: + function (`Callable`): + Callable with one of the following signatures: + + - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False` + - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False` + - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True` + - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True` + + If no function is provided, defaults to an always True function: `lambda x: True`. + with_indices (`bool`, defaults to `False`): + Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. + input_columns (`str` or `List[str]`, *optional*): + The columns to be passed into `function` as + positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. + batched (`bool`, defaults to `False`): + Provide batch of examples to `function` + batch_size (`int`, *optional*, defaults to `1000`): + Number of examples per batch provided to `function` if `batched=True`. + fn_kwargs (`Dict`, *optional*, defaults to `None`): + Keyword arguments to be passed to `function` + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", streaming=True) + >>> ds = ds.filter(lambda x: x["label"] == 0) + >>> list(ds["train"].take(3)) + [{'label': 0, 'text': 'Review: simplistic , silly and tedious .'}, + {'label': 0, + 'text': "Review: it's so laddish and juvenile , only teenage boys could possibly find it funny ."}, + {'label': 0, + 'text': 'Review: exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}] + ``` + """ + return IterableDatasetDict( + { + k: dataset.filter( + function=function, + with_indices=with_indices, + input_columns=input_columns, + batched=batched, + batch_size=batch_size, + fn_kwargs=fn_kwargs, + ) + for k, dataset in self.items() + } + ) + + def shuffle( + self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000 + ) -> "IterableDatasetDict": + """ + Randomly shuffles the elements of this dataset. + The shuffling is applied to all the datasets of the dataset dictionary. + + This dataset fills a buffer with buffer_size elements, then randomly samples elements from this buffer, + replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or + equal to the full size of the dataset is required. + + For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will + initially select a random element from only the first 1000 elements in the buffer. Once an element is + selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element, + maintaining the 1000 element buffer. + + If the dataset is made of several shards, it also does `shuffle` the order of the shards. + However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`] + then the order of the shards is kept unchanged. + + Args: + seed (`int`, *optional*, defaults to `None`): + Random seed that will be used to shuffle the dataset. + It is used to sample from the shuffle buffer and also to shuffle the data shards. + generator (`numpy.random.Generator`, *optional*): + Numpy random Generator to use to compute the permutation of the dataset rows. + If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). + buffer_size (`int`, defaults to `1000`): + Size of the buffer. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", streaming=True) + >>> list(ds["train"].take(3)) + [{'label': 1, + 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, + {'label': 1, + 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, + {'label': 1, 'text': 'effective but too-tepid biopic'}] + >>> ds = ds.shuffle(seed=42) + >>> list(ds["train"].take(3)) + [{'label': 1, + 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."}, + {'label': 1, + 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'}, + {'label': 1, + 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}] + ``` + """ + return IterableDatasetDict( + { + k: dataset.shuffle(seed=seed, generator=generator, buffer_size=buffer_size) + for k, dataset in self.items() + } + ) + + def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDatasetDict": + """ + Rename a column in the dataset, and move the features associated to the original column under the new column + name. + The renaming is applied to all the datasets of the dataset dictionary. + + Args: + original_column_name (`str`): + Name of the column to rename. + new_column_name (`str`): + New name for the column. + + Returns: + [`IterableDatasetDict`]: A copy of the dataset with a renamed column. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", streaming=True) + >>> ds = ds.rename_column("text", "movie_review") + >>> next(iter(ds["train"])) + {'label': 1, + 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} + ``` + """ + return IterableDatasetDict( + { + k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name) + for k, dataset in self.items() + } + ) + + def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDatasetDict": + """ + Rename several columns in the dataset, and move the features associated to the original columns under + the new column names. + The renaming is applied to all the datasets of the dataset dictionary. + + Args: + column_mapping (`Dict[str, str]`): + A mapping of columns to rename to their new names. + + Returns: + [`IterableDatasetDict`]: A copy of the dataset with renamed columns + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", streaming=True) + >>> ds = ds.rename_columns({"text": "movie_review", "label": "rating"}) + >>> next(iter(ds["train"])) + {'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', + 'rating': 1} + ``` + """ + return IterableDatasetDict( + {k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()} + ) + + def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict": + """ + Remove one or several column(s) in the dataset and the features associated to them. + The removal is done on-the-fly on the examples when iterating over the dataset. + The removal is applied to all the datasets of the dataset dictionary. + + + Args: + column_names (`Union[str, List[str]]`): + Name of the column(s) to remove. + + Returns: + [`IterableDatasetDict`]: A copy of the dataset object without the columns to remove. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", streaming=True) + >>> ds = ds.remove_columns("label") + >>> next(iter(ds["train"])) + {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} + ``` + """ + return IterableDatasetDict({k: dataset.remove_columns(column_names) for k, dataset in self.items()}) + + def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict": + """Select one or several column(s) in the dataset and the features + associated to them. The selection is done on-the-fly on the examples + when iterating over the dataset. The selection is applied to all the + datasets of the dataset dictionary. + + + Args: + column_names (`Union[str, List[str]]`): + Name of the column(s) to keep. + + Returns: + [`IterableDatasetDict`]: A copy of the dataset object with only selected columns. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", streaming=True) + >>> ds = ds.select("text") + >>> next(iter(ds["train"])) + {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} + ``` + """ + return IterableDatasetDict({k: dataset.select_columns(column_names) for k, dataset in self.items()}) + + def cast_column(self, column: str, feature: FeatureType) -> "IterableDatasetDict": + """Cast column to feature for decoding. + The type casting is applied to all the datasets of the dataset dictionary. + + Args: + column (`str`): + Column name. + feature ([`Feature`]): + Target feature. + + Returns: + [`IterableDatasetDict`] + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", streaming=True) + >>> ds["train"].features + {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), + 'text': Value(dtype='string', id=None)} + >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good'])) + >>> ds["train"].features + {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), + 'text': Value(dtype='string', id=None)} + ``` + """ + return IterableDatasetDict( + {k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()} + ) + + def cast( + self, + features: Features, + ) -> "IterableDatasetDict": + """ + Cast the dataset to a new set of features. + The type casting is applied to all the datasets of the dataset dictionary. + + Args: + features (`Features`): + New features to cast the dataset to. + The name of the fields in the features must match the current column names. + The type of the data must also be convertible from one type to the other. + For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`map`] to update the Dataset. + + Returns: + [`IterableDatasetDict`]: A copy of the dataset with casted features. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", streaming=True) + >>> ds["train"].features + {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), + 'text': Value(dtype='string', id=None)} + >>> new_features = ds["train"].features.copy() + >>> new_features['label'] = ClassLabel(names=['bad', 'good']) + >>> new_features['text'] = Value('large_string') + >>> ds = ds.cast(new_features) + >>> ds["train"].features + {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), + 'text': Value(dtype='large_string', id=None)} + ``` + """ + return IterableDatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()}) diff --git a/testbed/huggingface__datasets/src/datasets/distributed.py b/testbed/huggingface__datasets/src/datasets/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..e036fabaf2cf6231ae6a3ca2c443100ccbb0b4d5 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/distributed.py @@ -0,0 +1,39 @@ +from typing import TypeVar + +from .arrow_dataset import Dataset, _split_by_node_map_style_dataset +from .iterable_dataset import IterableDataset, _split_by_node_iterable_dataset + + +DatasetType = TypeVar("DatasetType", Dataset, IterableDataset) + + +def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType: + """ + Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`. + + For map-style datasets: + + Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset. + To maximize data loading throughput, chunks are made of contiguous data on disk if possible. + + For iterable datasets: + + If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`), + then the shards are evenly assigned across the nodes, which is the most optimized. + Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples. + + Args: + dataset ([`Dataset`] or [`IterableDataset`]): + The dataset to split by node. + rank (`int`): + Rank of the current node. + world_size (`int`): + Total number of nodes. + + Returns: + [`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`. + """ + if isinstance(dataset, Dataset): + return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size) + else: + return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size) diff --git a/testbed/huggingface__datasets/src/datasets/exceptions.py b/testbed/huggingface__datasets/src/datasets/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..a6a7aa1acf9bb39ee8f65a5dbb9d232ef455bec0 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/exceptions.py @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright 2023 The HuggingFace Authors. + + +class DatasetsError(Exception): + """Base class for exceptions in this library.""" + + +class DefunctDatasetError(DatasetsError): + """The dataset has been defunct.""" + + +class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError): + """FileNotFoundError raised by this library.""" + + +class DataFilesNotFoundError(FileNotFoundDatasetsError): + """No (supported) data files found.""" + + +class DatasetNotFoundError(FileNotFoundDatasetsError): + """Dataset not found. + + Raised when trying to access: + - a missing dataset, or + - a private/gated dataset and the user is not authenticated. + """ diff --git a/testbed/huggingface__datasets/src/datasets/filesystems/compression.py b/testbed/huggingface__datasets/src/datasets/filesystems/compression.py new file mode 100644 index 0000000000000000000000000000000000000000..d64872040b0abe0cd0bcfdfe004c2279213edafd --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/filesystems/compression.py @@ -0,0 +1,178 @@ +import os +from typing import Optional + +import fsspec +from fsspec.archive import AbstractArchiveFileSystem +from fsspec.utils import DEFAULT_BLOCK_SIZE + + +class BaseCompressedFileFileSystem(AbstractArchiveFileSystem): + """Read contents of compressed file as a filesystem with one file inside.""" + + root_marker = "" + protocol: str = ( + None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz + ) + compression: str = None # compression type in fsspec. ex: "gzip" + extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz + + def __init__( + self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs + ): + """ + The compressed file system can be instantiated from any compressed file. + It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive. + + The single file inside the filesystem is named after the compresssed file, + without the compression extension at the end of the filename. + + Args: + fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()`` + mode (:obj:``str``): Currently, only 'rb' accepted + target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL. + target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS. + """ + super().__init__(self, **kwargs) + # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode + self.file = fsspec.open( + fo, + mode="rb", + protocol=target_protocol, + compression=self.compression, + client_kwargs={ + "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 + "trust_env": True, # Enable reading proxy env variables. + **(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed. + }, + **(target_options or {}), + ) + self.compressed_name = os.path.basename(self.file.path.split("::")[0]) + self.uncompressed_name = ( + self.compressed_name[: self.compressed_name.rindex(".")] + if "." in self.compressed_name + else self.compressed_name + ) + self.dir_cache = None + + @classmethod + def _strip_protocol(cls, path): + # compressed file paths are always relative to the archive root + return super()._strip_protocol(path).lstrip("/") + + def _get_dirs(self): + if self.dir_cache is None: + f = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name} + self.dir_cache = {f["name"]: f} + + def cat(self, path: str): + return self.file.open().read() + + def _open( + self, + path: str, + mode: str = "rb", + block_size=None, + autocommit=True, + cache_options=None, + **kwargs, + ): + path = self._strip_protocol(path) + if mode != "rb": + raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'") + return self.file.open() + + +class Bz2FileSystem(BaseCompressedFileFileSystem): + """Read contents of BZ2 file as a filesystem with one file inside.""" + + protocol = "bz2" + compression = "bz2" + extension = ".bz2" + + +class GzipFileSystem(BaseCompressedFileFileSystem): + """Read contents of GZIP file as a filesystem with one file inside.""" + + protocol = "gzip" + compression = "gzip" + extension = ".gz" + + +class Lz4FileSystem(BaseCompressedFileFileSystem): + """Read contents of LZ4 file as a filesystem with one file inside.""" + + protocol = "lz4" + compression = "lz4" + extension = ".lz4" + + +class XzFileSystem(BaseCompressedFileFileSystem): + """Read contents of .xz (LZMA) file as a filesystem with one file inside.""" + + protocol = "xz" + compression = "xz" + extension = ".xz" + + +class ZstdFileSystem(BaseCompressedFileFileSystem): + """ + Read contents of zstd file as a filesystem with one file inside. + + Note that reading in binary mode with fsspec isn't supported yet: + https://github.com/indygreg/python-zstandard/issues/136 + """ + + protocol = "zstd" + compression = "zstd" + extension = ".zst" + + def __init__( + self, + fo: str, + mode: str = "rb", + target_protocol: Optional[str] = None, + target_options: Optional[dict] = None, + block_size: int = DEFAULT_BLOCK_SIZE, + **kwargs, + ): + super().__init__( + fo=fo, + mode=mode, + target_protocol=target_protocol, + target_options=target_options, + block_size=block_size, + **kwargs, + ) + # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: + # + # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open + # out.close = close + # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only + # + # see https://github.com/intake/filesystem_spec/issues/725 + _enter = self.file.__enter__ + + class WrappedFile: + def __init__(self, file_): + self._file = file_ + + def __enter__(self): + self._file.__enter__() + return self + + def __exit__(self, *args, **kwargs): + self._file.__exit__(*args, **kwargs) + + def __iter__(self): + return iter(self._file) + + def __next__(self): + return next(self._file) + + def __getattr__(self, attr): + return getattr(self._file, attr) + + def fixed_enter(*args, **kwargs): + return WrappedFile(_enter(*args, **kwargs)) + + self.file.__enter__ = fixed_enter diff --git a/testbed/huggingface__datasets/src/datasets/fingerprint.py b/testbed/huggingface__datasets/src/datasets/fingerprint.py new file mode 100644 index 0000000000000000000000000000000000000000..7d73758a049fdd9d41b55513319a788a67aa38c1 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/fingerprint.py @@ -0,0 +1,523 @@ +import inspect +import json +import os +import random +import shutil +import tempfile +import weakref +from functools import wraps +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import pyarrow as pa +import xxhash + +from .info import DatasetInfo +from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH +from .table import ConcatenationTable, InMemoryTable, MemoryMappedTable, Table +from .utils.deprecation_utils import deprecated +from .utils.logging import get_logger +from .utils.py_utils import asdict, dumps + + +if TYPE_CHECKING: + from .arrow_dataset import Dataset + + +logger = get_logger(__name__) + + +# Fingerprinting allows to have one deterministic fingerprint per dataset state. +# A dataset fingerprint is updated after each transform. +# Re-running the same transforms on a dataset in a different session results in the same fingerprint. +# This is possible thanks to a custom hashing function that works with most python objects. + +# Fingerprinting is the main mechanism that enables caching. +# The caching mechanism allows to reload an existing cache file if it's already been computed. + + +################# +# Caching +################# + +_CACHING_ENABLED = True +_TEMP_DIR_FOR_TEMP_CACHE_FILES: Optional["_TempDirWithCustomCleanup"] = None +_DATASETS_WITH_TABLE_IN_TEMP_DIR: Optional[weakref.WeakSet] = None + + +class _TempDirWithCustomCleanup: + """ + A temporary directory with a custom cleanup function. + We need a custom temporary directory cleanup in order to delete the dataset objects that have + cache files in the temporary directory before deleting the dorectory itself. + """ + + def __init__(self, cleanup_func=None, *cleanup_func_args, **cleanup_func_kwargs): + self.name = tempfile.mkdtemp() + self._finalizer = weakref.finalize(self, self._cleanup) + self._cleanup_func = cleanup_func + self._cleanup_func_args = cleanup_func_args + self._cleanup_func_kwargs = cleanup_func_kwargs + + def _cleanup(self): + self._cleanup_func(*self._cleanup_func_args, **self._cleanup_func_kwargs) + if os.path.exists(self.name): + shutil.rmtree(self.name) + + def cleanup(self): + if self._finalizer.detach(): + self._cleanup() + + +def maybe_register_dataset_for_temp_dir_deletion(dataset): + """ + This function registers the datasets that have cache files in _TEMP_DIR_FOR_TEMP_CACHE_FILES in order + to properly delete them before deleting the temporary directory. + The temporary directory _TEMP_DIR_FOR_TEMP_CACHE_FILES is used when caching is disabled. + """ + if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None: + return + + global _DATASETS_WITH_TABLE_IN_TEMP_DIR + if _DATASETS_WITH_TABLE_IN_TEMP_DIR is None: + _DATASETS_WITH_TABLE_IN_TEMP_DIR = weakref.WeakSet() + if any( + Path(_TEMP_DIR_FOR_TEMP_CACHE_FILES.name) in Path(cache_file["filename"]).parents + for cache_file in dataset.cache_files + ): + _DATASETS_WITH_TABLE_IN_TEMP_DIR.add(dataset) + + +def get_datasets_with_cache_file_in_temp_dir(): + return list(_DATASETS_WITH_TABLE_IN_TEMP_DIR) if _DATASETS_WITH_TABLE_IN_TEMP_DIR is not None else [] + + +def enable_caching(): + """ + When applying transforms on a dataset, the data are stored in cache files. + The caching mechanism allows to reload an existing cache file if it's already been computed. + + Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated + after each transform. + + If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. + More precisely, if the caching is disabled: + - cache files are always recreated + - cache files are written to a temporary directory that is deleted when session closes + - cache files are named using a random hash instead of the dataset fingerprint + - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes + - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use + the `download_mode` parameter in [`~datasets.load_dataset`]. + """ + global _CACHING_ENABLED + _CACHING_ENABLED = True + + +def disable_caching(): + """ + When applying transforms on a dataset, the data are stored in cache files. + The caching mechanism allows to reload an existing cache file if it's already been computed. + + Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated + after each transform. + + If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. + More precisely, if the caching is disabled: + - cache files are always recreated + - cache files are written to a temporary directory that is deleted when session closes + - cache files are named using a random hash instead of the dataset fingerprint + - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes + - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use + the `download_mode` parameter in [`~datasets.load_dataset`]. + """ + global _CACHING_ENABLED + _CACHING_ENABLED = False + + +@deprecated( + "Use datasets.enable_caching() or datasets.disable_caching() instead. This function will be removed in a future version of datasets." +) +def set_caching_enabled(boolean: bool): + """ + When applying transforms on a dataset, the data are stored in cache files. + The caching mechanism allows to reload an existing cache file if it's already been computed. + + Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated + after each transform. + + If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. + More precisely, if the caching is disabled: + - cache files are always recreated + - cache files are written to a temporary directory that is deleted when session closes + - cache files are named using a random hash instead of the dataset fingerprint + - use :func:`datasets.Dataset.save_to_disk` to save a transformed dataset or it will be deleted when session closes + - caching doesn't affect :func:`datasets.load_dataset`. If you want to regenerate a dataset from scratch you should use + the ``download_mode`` parameter in :func:`datasets.load_dataset`. + """ + global _CACHING_ENABLED + _CACHING_ENABLED = bool(boolean) + + +def is_caching_enabled() -> bool: + """ + When applying transforms on a dataset, the data are stored in cache files. + The caching mechanism allows to reload an existing cache file if it's already been computed. + + Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated + after each transform. + + If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets. + More precisely, if the caching is disabled: + - cache files are always recreated + - cache files are written to a temporary directory that is deleted when session closes + - cache files are named using a random hash instead of the dataset fingerprint + - use [`~datasets.Dataset.save_to_disk`]] to save a transformed dataset or it will be deleted when session closes + - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use + the `download_mode` parameter in [`~datasets.load_dataset`]. + """ + global _CACHING_ENABLED + return bool(_CACHING_ENABLED) + + +def get_temporary_cache_files_directory() -> str: + """Return a directory that is deleted when session closes.""" + global _TEMP_DIR_FOR_TEMP_CACHE_FILES + if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None: + # Avoids a PermissionError on Windows caused by the datasets referencing + # the files from the cache directory on clean-up + def cleanup_func(): + for dset in get_datasets_with_cache_file_in_temp_dir(): + dset.__del__() + + _TEMP_DIR_FOR_TEMP_CACHE_FILES = _TempDirWithCustomCleanup(cleanup_func=cleanup_func) + return _TEMP_DIR_FOR_TEMP_CACHE_FILES.name + + +################# +# Hashing +################# + + +def hashregister(*types): + def proxy(func): + for t in types: + Hasher.dispatch[t] = func + return func + + return proxy + + +class Hasher: + """Hasher that accepts python objects as inputs.""" + + dispatch: Dict = {} + + def __init__(self): + self.m = xxhash.xxh64() + + @classmethod + def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str: + value = [value] if isinstance(value, bytes) else value + m = xxhash.xxh64() + for x in value: + m.update(x) + return m.hexdigest() + + @classmethod + def hash_default(cls, value: Any) -> str: + return cls.hash_bytes(dumps(value)) + + @classmethod + def hash(cls, value: Any) -> str: + if type(value) in cls.dispatch: + return cls.dispatch[type(value)](cls, value) + else: + return cls.hash_default(value) + + def update(self, value: Any) -> None: + header_for_update = f"=={type(value)}==" + value_for_update = self.hash(value) + self.m.update(header_for_update.encode("utf8")) + self.m.update(value_for_update.encode("utf-8")) + + def hexdigest(self) -> str: + return self.m.hexdigest() + + +# Register a new hasher can be useful for two possible reasons: +# 1 - optimize the hashing of large amount of data (e.g. pa.Table) +# 2 - take advantage of a custom serialization method (e.g. DatasetInfo) + + +@hashregister(pa.Table, Table, InMemoryTable, MemoryMappedTable, ConcatenationTable) +def _hash_pa_table(hasher, value): + def _hash_pa_array(value): + if isinstance(value, pa.ChunkedArray): + return hasher.hash_bytes(c.to_string().encode("utf-8") for c in value.chunks) + else: + return hasher.hash_bytes(value.to_string().encode("utf-8")) + + value = "-".join(col + "-" + _hash_pa_array(value[col]) for col in sorted(value.column_names)) + return hasher.hash_bytes(value.encode("utf-8")) + + +@hashregister(DatasetInfo) +def _hash_dataset_info(hasher, value): + return hasher.hash_bytes(json.dumps(asdict(value), sort_keys=True).encode("utf-8")) + + +################# +# Fingerprinting +################# + +# we show a warning only once when fingerprinting fails to avoid spam +fingerprint_warnings: Dict[str, bool] = {} + + +def generate_fingerprint(dataset) -> str: + state = dataset.__dict__ + hasher = Hasher() + for key in sorted(state): + if key == "_fingerprint": + continue + hasher.update(key) + hasher.update(state[key]) + # hash data files last modification timestamps as well + for cache_file in dataset.cache_files: + hasher.update(os.path.getmtime(cache_file["filename"])) + return hasher.hexdigest() + + +def generate_random_fingerprint(nbits=64) -> str: + return f"{random.getrandbits(nbits):0{nbits//4}x}" + + +def update_fingerprint(fingerprint, transform, transform_args): + global fingerprint_warnings + hasher = Hasher() + hasher.update(fingerprint) + try: + hasher.update(transform) + except: # noqa various errors might raise here from pickle or dill + if _CACHING_ENABLED: + if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False): + logger.warning( + f"Transform {transform} couldn't be hashed properly, a random hash was used instead. " + "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. " + "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. " + "This warning is only showed once. Subsequent hashing failures won't be showed." + ) + fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True + else: + logger.info(f"Transform {transform} couldn't be hashed properly, a random hash was used instead.") + else: + logger.info( + f"Transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled." + ) + + return generate_random_fingerprint() + for key in sorted(transform_args): + hasher.update(key) + try: + hasher.update(transform_args[key]) + except: # noqa various errors might raise here from pickle or dill + if _CACHING_ENABLED: + if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False): + logger.warning( + f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. " + "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. " + "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. " + "This warning is only showed once. Subsequent hashing failures won't be showed." + ) + fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True + else: + logger.info( + f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead." + ) + else: + logger.info( + f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled." + ) + return generate_random_fingerprint() + return hasher.hexdigest() + + +def validate_fingerprint(fingerprint: str, max_length=64): + """ + Make sure the fingerprint is a non-empty string that is not longer that max_length=64 by default, + so that the fingerprint can be used to name cache files without issues. + """ + if not isinstance(fingerprint, str) or not fingerprint: + raise ValueError(f"Invalid fingerprint '{fingerprint}': it should be a non-empty string.") + for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH: + if invalid_char in fingerprint: + raise ValueError( + f"Invalid fingerprint. Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{fingerprint}'. " + f"They could create issues when creating cache files." + ) + if len(fingerprint) > max_length: + raise ValueError( + f"Invalid fingerprint. Maximum lenth is {max_length} but '{fingerprint}' has length {len(fingerprint)}." + "It could create issues when creating cache files." + ) + + +def format_transform_for_fingerprint(func: Callable, version: Optional[str] = None) -> str: + """ + Format a transform to the format that will be used to update the fingerprint. + """ + transform = f"{func.__module__}.{func.__qualname__}" + if version is not None: + transform += f"@{version}" + return transform + + +def format_kwargs_for_fingerprint( + func: Callable, + args: Tuple, + kwargs: Dict[str, Any], + use_kwargs: Optional[List[str]] = None, + ignore_kwargs: Optional[List[str]] = None, + randomized_function: bool = False, +) -> Dict[str, Any]: + """ + Format the kwargs of a transform to the format that will be used to update the fingerprint. + """ + kwargs_for_fingerprint = kwargs.copy() + if args: + params = [p.name for p in inspect.signature(func).parameters.values() if p != p.VAR_KEYWORD] + args = args[1:] # assume the first argument is the dataset + params = params[1:] + kwargs_for_fingerprint.update(zip(params, args)) + else: + del kwargs_for_fingerprint[ + next(iter(inspect.signature(func).parameters)) + ] # assume the first key is the dataset + + # keep the right kwargs to be hashed to generate the fingerprint + + if use_kwargs: + kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k in use_kwargs} + if ignore_kwargs: + kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k not in ignore_kwargs} + if randomized_function: # randomized functions have `seed` and `generator` parameters + if kwargs_for_fingerprint.get("seed") is None and kwargs_for_fingerprint.get("generator") is None: + _, seed, pos, *_ = np.random.get_state() + seed = seed[pos] if pos < 624 else seed[0] + kwargs_for_fingerprint["generator"] = np.random.default_rng(seed) + + # remove kwargs that are the default values + + default_values = { + p.name: p.default for p in inspect.signature(func).parameters.values() if p.default != inspect._empty + } + for default_varname, default_value in default_values.items(): + if default_varname in kwargs_for_fingerprint and kwargs_for_fingerprint[default_varname] == default_value: + kwargs_for_fingerprint.pop(default_varname) + return kwargs_for_fingerprint + + +def fingerprint_transform( + inplace: bool, + use_kwargs: Optional[List[str]] = None, + ignore_kwargs: Optional[List[str]] = None, + fingerprint_names: Optional[List[str]] = None, + randomized_function: bool = False, + version: Optional[str] = None, +): + """ + Wrapper for dataset transforms to update the dataset fingerprint using ``update_fingerprint`` + Args: + inplace (:obj:`bool`): If inplace is True, the fingerprint of the dataset is updated inplace. + Otherwise, a parameter "new_fingerprint" is passed to the wrapped method that should take care of + setting the fingerprint of the returned Dataset. + use_kwargs (:obj:`List[str]`, optional): optional white list of argument names to take into account + to update the fingerprint to the wrapped method that should take care of + setting the fingerprint of the returned Dataset. By default all the arguments are used. + ignore_kwargs (:obj:`List[str]`, optional): optional black list of argument names to take into account + to update the fingerprint. Note that ignore_kwargs prevails on use_kwargs. + fingerprint_names (:obj:`List[str]`, optional, defaults to ["new_fingerprint"]): + If the dataset transforms is not inplace and returns a DatasetDict, then it can require + several fingerprints (one per dataset in the DatasetDict). By specifying fingerprint_names, + one fingerprint named after each element of fingerprint_names is going to be passed. + randomized_function (:obj:`bool`, defaults to False): If the dataset transform is random and has + optional parameters "seed" and "generator", then you can set randomized_function to True. + This way, even if users set "seed" and "generator" to None, then the fingerprint is + going to be randomly generated depending on numpy's current state. In this case, the + generator is set to np.random.default_rng(np.random.get_state()[1][0]). + version (:obj:`str`, optional): version of the transform. The version is taken into account when + computing the fingerprint. If a datase transform changes (or at least if the output data + that are cached changes), then one should increase the version. If the version stays the + same, then old cached data could be reused that are not compatible with the new transform. + It should be in the format "MAJOR.MINOR.PATCH". + """ + + if use_kwargs is not None and not isinstance(use_kwargs, list): + raise ValueError(f"use_kwargs is supposed to be a list, not {type(use_kwargs)}") + + if ignore_kwargs is not None and not isinstance(ignore_kwargs, list): + raise ValueError(f"ignore_kwargs is supposed to be a list, not {type(use_kwargs)}") + + if inplace and fingerprint_names: + raise ValueError("fingerprint_names are only used when inplace is False") + + fingerprint_names = fingerprint_names if fingerprint_names is not None else ["new_fingerprint"] + + def _fingerprint(func): + if not inplace and not all(name in func.__code__.co_varnames for name in fingerprint_names): + raise ValueError(f"function {func} is missing parameters {fingerprint_names} in signature") + + if randomized_function: # randomized function have seed and generator parameters + if "seed" not in func.__code__.co_varnames: + raise ValueError(f"'seed' must be in {func}'s signature") + if "generator" not in func.__code__.co_varnames: + raise ValueError(f"'generator' must be in {func}'s signature") + # this call has to be outside the wrapper or since __qualname__ changes in multiprocessing + transform = format_transform_for_fingerprint(func, version=version) + + @wraps(func) + def wrapper(*args, **kwargs): + kwargs_for_fingerprint = format_kwargs_for_fingerprint( + func, + args, + kwargs, + use_kwargs=use_kwargs, + ignore_kwargs=ignore_kwargs, + randomized_function=randomized_function, + ) + + if args: + dataset: Dataset = args[0] + args = args[1:] + else: + dataset: Dataset = kwargs.pop(next(iter(inspect.signature(func).parameters))) + + # compute new_fingerprint and add it to the args of not in-place transforms + if inplace: + new_fingerprint = update_fingerprint(dataset._fingerprint, transform, kwargs_for_fingerprint) + else: + for fingerprint_name in fingerprint_names: # transforms like `train_test_split` have several hashes + if kwargs.get(fingerprint_name) is None: + kwargs_for_fingerprint["fingerprint_name"] = fingerprint_name + kwargs[fingerprint_name] = update_fingerprint( + dataset._fingerprint, transform, kwargs_for_fingerprint + ) + else: + validate_fingerprint(kwargs[fingerprint_name]) + + # Call actual function + + out = func(dataset, *args, **kwargs) + + # Update fingerprint of in-place transforms + update in-place history of transforms + + if inplace: # update after calling func so that the fingerprint doesn't change if the function fails + dataset._fingerprint = new_fingerprint + + return out + + wrapper._decorator_name_ = "fingerprint" + return wrapper + + return _fingerprint diff --git a/testbed/huggingface__datasets/src/datasets/info.py b/testbed/huggingface__datasets/src/datasets/info.py new file mode 100644 index 0000000000000000000000000000000000000000..e17477d636d08c4c53de3d9003b368612ddf3698 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/info.py @@ -0,0 +1,587 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +""" DatasetInfo and MetricInfo record information we know about a dataset and a metric. + +This includes things that we know about the dataset statically, i.e.: + - description + - canonical location + - does it have validation and tests splits + - size + - etc. + +This also includes the things that can and should be computed once we've +processed the dataset as well: + - number of examples (in each split) + - etc. +""" + +import copy +import dataclasses +import json +import os +import posixpath +import warnings +from dataclasses import dataclass +from pathlib import Path +from typing import ClassVar, Dict, List, Optional, Union + +import fsspec +from huggingface_hub import DatasetCard, DatasetCardData + +from . import config +from .features import Features, Value +from .splits import SplitDict +from .tasks import TaskTemplate, task_template_from_dict +from .utils import Version +from .utils.logging import get_logger +from .utils.py_utils import asdict, unique_values + + +logger = get_logger(__name__) + + +@dataclass +class SupervisedKeysData: + input: str = "" + output: str = "" + + +@dataclass +class DownloadChecksumsEntryData: + key: str = "" + value: str = "" + + +class MissingCachedSizesConfigError(Exception): + """The expected cached sizes of the download file are missing.""" + + +class NonMatchingCachedSizesError(Exception): + """The prepared split doesn't have expected sizes.""" + + +@dataclass +class PostProcessedInfo: + features: Optional[Features] = None + resources_checksums: Optional[dict] = None + + def __post_init__(self): + # Convert back to the correct classes when we reload from dict + if self.features is not None and not isinstance(self.features, Features): + self.features = Features.from_dict(self.features) + + @classmethod + def from_dict(cls, post_processed_info_dict: dict) -> "PostProcessedInfo": + field_names = {f.name for f in dataclasses.fields(cls)} + return cls(**{k: v for k, v in post_processed_info_dict.items() if k in field_names}) + + +@dataclass +class DatasetInfo: + """Information about a dataset. + + `DatasetInfo` documents datasets, including its name, version, and features. + See the constructor arguments and properties for a full list. + + Not all fields are known on construction and may be updated later. + + Attributes: + description (`str`): + A description of the dataset. + citation (`str`): + A BibTeX citation of the dataset. + homepage (`str`): + A URL to the official homepage for the dataset. + license (`str`): + The dataset's license. It can be the name of the license or a paragraph containing the terms of the license. + features ([`Features`], *optional*): + The features used to specify the dataset's column types. + post_processed (`PostProcessedInfo`, *optional*): + Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index. + supervised_keys (`SupervisedKeysData`, *optional*): + Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS). + builder_name (`str`, *optional*): + The name of the `GeneratorBasedBuilder` subclass used to create the dataset. Usually matched to the corresponding script name. It is also the snake_case version of the dataset builder class name. + config_name (`str`, *optional*): + The name of the configuration derived from [`BuilderConfig`]. + version (`str` or [`Version`], *optional*): + The version of the dataset. + splits (`dict`, *optional*): + The mapping between split name and metadata. + download_checksums (`dict`, *optional*): + The mapping between the URL to download the dataset's checksums and corresponding metadata. + download_size (`int`, *optional*): + The size of the files to download to generate the dataset, in bytes. + post_processing_size (`int`, *optional*): + Size of the dataset in bytes after post-processing, if any. + dataset_size (`int`, *optional*): + The combined size in bytes of the Arrow tables for all splits. + size_in_bytes (`int`, *optional*): + The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files). + task_templates (`List[TaskTemplate]`, *optional*): + The task templates to prepare the dataset for during training and evaluation. Each template casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`. + **config_kwargs (additional keyword arguments): + Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`]. + """ + + # Set in the dataset scripts + description: str = dataclasses.field(default_factory=str) + citation: str = dataclasses.field(default_factory=str) + homepage: str = dataclasses.field(default_factory=str) + license: str = dataclasses.field(default_factory=str) + features: Optional[Features] = None + post_processed: Optional[PostProcessedInfo] = None + supervised_keys: Optional[SupervisedKeysData] = None + task_templates: Optional[List[TaskTemplate]] = None + + # Set later by the builder + builder_name: Optional[str] = None + dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name + config_name: Optional[str] = None + version: Optional[Union[str, Version]] = None + # Set later by `download_and_prepare` + splits: Optional[dict] = None + download_checksums: Optional[dict] = None + download_size: Optional[int] = None + post_processing_size: Optional[int] = None + dataset_size: Optional[int] = None + size_in_bytes: Optional[int] = None + + _INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = [ + "config_name", + "download_size", + "dataset_size", + "features", + "splits", + ] + + def __post_init__(self): + # Convert back to the correct classes when we reload from dict + if self.features is not None and not isinstance(self.features, Features): + self.features = Features.from_dict(self.features) + if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo): + self.post_processed = PostProcessedInfo.from_dict(self.post_processed) + if self.version is not None and not isinstance(self.version, Version): + if isinstance(self.version, str): + self.version = Version(self.version) + else: + self.version = Version.from_dict(self.version) + if self.splits is not None and not isinstance(self.splits, SplitDict): + self.splits = SplitDict.from_split_dict(self.splits) + if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData): + if isinstance(self.supervised_keys, (tuple, list)): + self.supervised_keys = SupervisedKeysData(*self.supervised_keys) + else: + self.supervised_keys = SupervisedKeysData(**self.supervised_keys) + + # Parse and make a list of templates + if self.task_templates is not None: + if isinstance(self.task_templates, (list, tuple)): + templates = [ + template if isinstance(template, TaskTemplate) else task_template_from_dict(template) + for template in self.task_templates + ] + self.task_templates = [template for template in templates if template is not None] + elif isinstance(self.task_templates, TaskTemplate): + self.task_templates = [self.task_templates] + else: + template = task_template_from_dict(self.task_templates) + self.task_templates = [template] if template is not None else [] + + # Align task templates with features + if self.task_templates is not None: + self.task_templates = list(self.task_templates) + if self.features is not None: + self.task_templates = [ + template.align_with_features(self.features) for template in (self.task_templates) + ] + + def write_to_directory( + self, dataset_info_dir, pretty_print=False, fs="deprecated", storage_options: Optional[dict] = None + ): + """Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`. + + Args: + dataset_info_dir (`str`): + Destination directory. + pretty_print (`bool`, defaults to `False`): + If `True`, the JSON will be pretty-printed with the indent level of 4. + fs (`fsspec.spec.AbstractFileSystem`, *optional*): + Instance of the remote filesystem used to download the files from. + + + + `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0. + Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`. + + + + storage_options (`dict`, *optional*): + Key/value pairs to be passed on to the file-system backend, if any. + + + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation") + >>> ds.info.write_to_directory("/path/to/directory/") + ``` + """ + if fs != "deprecated": + warnings.warn( + "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n" + "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", + FutureWarning, + ) + storage_options = fs.storage_options + + fs: fsspec.AbstractFileSystem + fs, _, _ = fsspec.get_fs_token_paths(dataset_info_dir, storage_options=storage_options) + with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f: + self._dump_info(f, pretty_print=pretty_print) + if self.license: + with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f: + self._dump_license(f) + + def _dump_info(self, file, pretty_print=False): + """Dump info in `file` file-like object open in bytes mode (to support remote files)""" + file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8")) + + def _dump_license(self, file): + """Dump license in `file` file-like object open in bytes mode (to support remote files)""" + file.write(self.license.encode("utf-8")) + + @classmethod + def from_merge(cls, dataset_infos: List["DatasetInfo"]): + dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None] + description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip() + citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip() + homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip() + license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip() + features = None + supervised_keys = None + task_templates = None + + # Find common task templates across all dataset infos + all_task_templates = [info.task_templates for info in dataset_infos if info.task_templates is not None] + if len(all_task_templates) > 1: + task_templates = list(set(all_task_templates[0]).intersection(*all_task_templates[1:])) + elif len(all_task_templates): + task_templates = list(set(all_task_templates[0])) + # If no common task templates found, replace empty list with None + task_templates = task_templates if task_templates else None + + return cls( + description=description, + citation=citation, + homepage=homepage, + license=license, + features=features, + supervised_keys=supervised_keys, + task_templates=task_templates, + ) + + @classmethod + def from_directory( + cls, dataset_info_dir: str, fs="deprecated", storage_options: Optional[dict] = None + ) -> "DatasetInfo": + """Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`. + + This function updates all the dynamically generated fields (num_examples, + hash, time of creation,...) of the [`DatasetInfo`]. + + This will overwrite all previous metadata. + + Args: + dataset_info_dir (`str`): + The directory containing the metadata file. This + should be the root directory of a specific dataset version. + fs (`fsspec.spec.AbstractFileSystem`, *optional*): + Instance of the remote filesystem used to download the files from. + + + + `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0. + Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`. + + + + storage_options (`dict`, *optional*): + Key/value pairs to be passed on to the file-system backend, if any. + + + + Example: + + ```py + >>> from datasets import DatasetInfo + >>> ds_info = DatasetInfo.from_directory("/path/to/directory/") + ``` + """ + if fs != "deprecated": + warnings.warn( + "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n" + "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", + FutureWarning, + ) + storage_options = fs.storage_options + + fs: fsspec.AbstractFileSystem + fs, _, _ = fsspec.get_fs_token_paths(dataset_info_dir, storage_options=storage_options) + logger.info(f"Loading Dataset info from {dataset_info_dir}") + if not dataset_info_dir: + raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.") + with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f: + dataset_info_dict = json.load(f) + return cls.from_dict(dataset_info_dict) + + @classmethod + def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo": + field_names = {f.name for f in dataclasses.fields(cls)} + return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names}) + + def update(self, other_dataset_info: "DatasetInfo", ignore_none=True): + self_dict = self.__dict__ + self_dict.update( + **{ + k: copy.deepcopy(v) + for k, v in other_dataset_info.__dict__.items() + if (v is not None or not ignore_none) + } + ) + + def copy(self) -> "DatasetInfo": + return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()}) + + def _to_yaml_dict(self) -> dict: + yaml_dict = {} + dataset_info_dict = asdict(self) + for key in dataset_info_dict: + if key in self._INCLUDED_INFO_IN_YAML: + value = getattr(self, key) + if hasattr(value, "_to_yaml_list"): # Features, SplitDict + yaml_dict[key] = value._to_yaml_list() + elif hasattr(value, "_to_yaml_string"): # Version + yaml_dict[key] = value._to_yaml_string() + else: + yaml_dict[key] = value + return yaml_dict + + @classmethod + def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo": + yaml_data = copy.deepcopy(yaml_data) + if yaml_data.get("features") is not None: + yaml_data["features"] = Features._from_yaml_list(yaml_data["features"]) + if yaml_data.get("splits") is not None: + yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"]) + field_names = {f.name for f in dataclasses.fields(cls)} + return cls(**{k: v for k, v in yaml_data.items() if k in field_names}) + + +class DatasetInfosDict(Dict[str, DatasetInfo]): + def write_to_directory(self, dataset_infos_dir, overwrite=False, pretty_print=False) -> None: + total_dataset_infos = {} + dataset_infos_path = os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME) + dataset_readme_path = os.path.join(dataset_infos_dir, "README.md") + if not overwrite: + total_dataset_infos = self.from_directory(dataset_infos_dir) + total_dataset_infos.update(self) + if os.path.exists(dataset_infos_path): + # for backward compatibility, let's update the JSON file if it exists + with open(dataset_infos_path, "w", encoding="utf-8") as f: + dataset_infos_dict = { + config_name: asdict(dset_info) for config_name, dset_info in total_dataset_infos.items() + } + json.dump(dataset_infos_dict, f, indent=4 if pretty_print else None) + # Dump the infos in the YAML part of the README.md file + if os.path.exists(dataset_readme_path): + dataset_card = DatasetCard.load(dataset_readme_path) + dataset_card_data = dataset_card.data + else: + dataset_card = None + dataset_card_data = DatasetCardData() + if total_dataset_infos: + total_dataset_infos.to_dataset_card_data(dataset_card_data) + dataset_card = ( + DatasetCard("---\n" + str(dataset_card_data) + "\n---\n") if dataset_card is None else dataset_card + ) + dataset_card.save(Path(dataset_readme_path)) + + @classmethod + def from_directory(cls, dataset_infos_dir) -> "DatasetInfosDict": + logger.info(f"Loading Dataset Infos from {dataset_infos_dir}") + # Load the info from the YAML part of README.md + if os.path.exists(os.path.join(dataset_infos_dir, "README.md")): + dataset_card_data = DatasetCard.load(Path(dataset_infos_dir) / "README.md").data + if "dataset_info" in dataset_card_data: + return cls.from_dataset_card_data(dataset_card_data) + if os.path.exists(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)): + # this is just to have backward compatibility with dataset_infos.json files + with open(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f: + return cls( + { + config_name: DatasetInfo.from_dict(dataset_info_dict) + for config_name, dataset_info_dict in json.load(f).items() + } + ) + else: + return cls() + + @classmethod + def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "DatasetInfosDict": + if isinstance(dataset_card_data.get("dataset_info"), (list, dict)): + if isinstance(dataset_card_data["dataset_info"], list): + return cls( + { + dataset_info_yaml_dict.get("config_name", "default"): DatasetInfo._from_yaml_dict( + dataset_info_yaml_dict + ) + for dataset_info_yaml_dict in dataset_card_data["dataset_info"] + } + ) + else: + dataset_info = DatasetInfo._from_yaml_dict(dataset_card_data["dataset_info"]) + dataset_info.config_name = dataset_card_data["dataset_info"].get("config_name", "default") + return cls({dataset_info.config_name: dataset_info}) + else: + return cls() + + def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None: + if self: + # first get existing metadata info + if "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], dict): + dataset_metadata_infos = { + dataset_card_data["dataset_info"].get("config_name", "default"): dataset_card_data["dataset_info"] + } + elif "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], list): + dataset_metadata_infos = { + config_metadata["config_name"]: config_metadata + for config_metadata in dataset_card_data["dataset_info"] + } + else: + dataset_metadata_infos = {} + # update/rewrite existing metadata info with the one to dump + total_dataset_infos = { + **dataset_metadata_infos, + **{config_name: dset_info._to_yaml_dict() for config_name, dset_info in self.items()}, + } + # the config_name from the dataset_infos_dict takes over the config_name of the DatasetInfo + for config_name, dset_info_yaml_dict in total_dataset_infos.items(): + dset_info_yaml_dict["config_name"] = config_name + if len(total_dataset_infos) == 1: + # use a struct instead of a list of configurations, since there's only one + dataset_card_data["dataset_info"] = next(iter(total_dataset_infos.values())) + config_name = dataset_card_data["dataset_info"].pop("config_name", None) + if config_name != "default": + # if config_name is not "default" preserve it and put at the first position + dataset_card_data["dataset_info"] = { + "config_name": config_name, + **dataset_card_data["dataset_info"], + } + else: + dataset_card_data["dataset_info"] = [] + for config_name, dataset_info_yaml_dict in sorted(total_dataset_infos.items()): + # add the config_name field in first position + dataset_info_yaml_dict.pop("config_name", None) + dataset_info_yaml_dict = {"config_name": config_name, **dataset_info_yaml_dict} + dataset_card_data["dataset_info"].append(dataset_info_yaml_dict) + + +@dataclass +class MetricInfo: + """Information about a metric. + + `MetricInfo` documents a metric, including its name, version, and features. + See the constructor arguments and properties for a full list. + + Note: Not all fields are known on construction and may be updated later. + """ + + # Set in the dataset scripts + description: str + citation: str + features: Features + inputs_description: str = dataclasses.field(default_factory=str) + homepage: str = dataclasses.field(default_factory=str) + license: str = dataclasses.field(default_factory=str) + codebase_urls: List[str] = dataclasses.field(default_factory=list) + reference_urls: List[str] = dataclasses.field(default_factory=list) + streamable: bool = False + format: Optional[str] = None + + # Set later by the builder + metric_name: Optional[str] = None + config_name: Optional[str] = None + experiment_id: Optional[str] = None + + def __post_init__(self): + if self.format is not None: + for key, value in self.features.items(): + if not isinstance(value, Value): + raise ValueError( + f"When using 'numpy' format, all features should be a `datasets.Value` feature. " + f"Here {key} is an instance of {value.__class__.__name__}" + ) + + def write_to_directory(self, metric_info_dir, pretty_print=False): + """Write `MetricInfo` as JSON to `metric_info_dir`. + Also save the license separately in LICENCE. + If `pretty_print` is True, the JSON will be pretty-printed with the indent level of 4. + + Example: + + ```py + >>> from datasets import load_metric + >>> metric = load_metric("accuracy") + >>> metric.info.write_to_directory("/path/to/directory/") + ``` + """ + with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f: + json.dump(asdict(self), f, indent=4 if pretty_print else None) + + if self.license: + with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f: + f.write(self.license) + + @classmethod + def from_directory(cls, metric_info_dir) -> "MetricInfo": + """Create MetricInfo from the JSON file in `metric_info_dir`. + + Args: + metric_info_dir: `str` The directory containing the metadata file. This + should be the root directory of a specific dataset version. + + Example: + + ```py + >>> from datasets import MetricInfo + >>> metric_info = MetricInfo.from_directory("/path/to/directory/") + ``` + """ + logger.info(f"Loading Metric info from {metric_info_dir}") + if not metric_info_dir: + raise ValueError("Calling MetricInfo.from_directory() with undefined metric_info_dir.") + + with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f: + metric_info_dict = json.load(f) + return cls.from_dict(metric_info_dict) + + @classmethod + def from_dict(cls, metric_info_dict: dict) -> "MetricInfo": + field_names = {f.name for f in dataclasses.fields(cls)} + return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names}) diff --git a/testbed/huggingface__datasets/src/datasets/inspect.py b/testbed/huggingface__datasets/src/datasets/inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..cad7b611eff931b080b153013b6a3a6805b19235 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/inspect.py @@ -0,0 +1,519 @@ +# Copyright 2020 The HuggingFace Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +""" List and inspect datasets.""" + +import inspect +import os +import shutil +import warnings +from pathlib import PurePath +from typing import Dict, List, Mapping, Optional, Sequence, Union + +import huggingface_hub + +from .download.download_config import DownloadConfig +from .download.download_manager import DownloadMode +from .download.streaming_download_manager import StreamingDownloadManager +from .info import DatasetInfo +from .load import ( + dataset_module_factory, + get_dataset_builder_class, + import_main_class, + load_dataset_builder, + metric_module_factory, +) +from .utils.deprecation_utils import deprecated +from .utils.file_utils import relative_to_absolute_path +from .utils.logging import get_logger +from .utils.version import Version + + +logger = get_logger(__name__) + + +class SplitsNotFoundError(ValueError): + pass + + +@deprecated("Use 'huggingface_hub.list_datasets' instead.") +def list_datasets(with_community_datasets=True, with_details=False): + """List all the datasets scripts available on the Hugging Face Hub. + + Args: + with_community_datasets (`bool`, *optional*, defaults to `True`): + Include the community provided datasets. + with_details (`bool`, *optional*, defaults to `False`): + Return the full details on the datasets instead of only the short name. + + Example: + + ```py + >>> from datasets import list_datasets + >>> list_datasets() + ['acronym_identification', + 'ade_corpus_v2', + 'adversarial_qa', + 'aeslc', + 'afrikaans_ner_corpus', + 'ag_news', + ... + ] + ``` + """ + datasets = huggingface_hub.list_datasets(full=with_details) + if not with_community_datasets: + datasets = [dataset for dataset in datasets if "/" not in dataset.id] + if not with_details: + datasets = [dataset.id for dataset in datasets] + return list(datasets) + + +@deprecated( + "Use 'evaluate.list_evaluation_modules' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate" +) +def list_metrics(with_community_metrics=True, with_details=False): + """List all the metrics script available on the Hugging Face Hub. + + + + Use `evaluate.list_evaluation_modules` instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate + + + + Args: + with_community_metrics (:obj:`bool`, optional, default ``True``): Include the community provided metrics. + with_details (:obj:`bool`, optional, default ``False``): Return the full details on the metrics instead of only the short name. + + Example: + + ```py + >>> from datasets import list_metrics + >>> list_metrics() + ['accuracy', + 'bertscore', + 'bleu', + 'bleurt', + 'cer', + 'chrf', + ... + ] + ``` + """ + metrics = huggingface_hub.list_metrics() + if not with_community_metrics: + metrics = [metric for metric in metrics if "/" not in metric.id] + if not with_details: + metrics = [metric.id for metric in metrics] + return metrics + + +@deprecated("Clone the dataset repository from the Hugging Face Hub instead.") +def inspect_dataset(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs): + """ + Allow inspection/modification of a dataset script by copying on local drive at local_path. + + Args: + path (`str`): Path to the dataset processing script with the dataset builder. Can be either: + + - a local path to processing script or the directory containing the script (if the script has the same name + as the directory), + e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`. + - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`list_datasets`]) + e.g. `'squad'`, `'glue'` or `'openai/webtext'`. + local_path (`str`): + Path to the local folder to copy the dataset script to. + download_config ([`DownloadConfig`], *optional*): + Specific download configuration parameters. + **download_kwargs (additional keyword arguments): + Optional arguments for [`DownloadConfig`] which will override + the attributes of `download_config` if supplied. + """ + dataset_module = dataset_module_factory(path, download_config=download_config, **download_kwargs) + builder_cls = get_dataset_builder_class(dataset_module) + module_source_path = inspect.getsourcefile(builder_cls) + module_source_dirpath = os.path.dirname(module_source_path) + for dirpath, dirnames, filenames in os.walk(module_source_dirpath): + dst_dirpath = os.path.join(local_path, os.path.relpath(dirpath, module_source_dirpath)) + os.makedirs(dst_dirpath, exist_ok=True) + # skipping hidden directories; prune the search + # [:] for the in-place list modification required by os.walk + dirnames[:] = [dirname for dirname in dirnames if not dirname.startswith((".", "__"))] + for filename in filenames: + shutil.copy2(os.path.join(dirpath, filename), os.path.join(dst_dirpath, filename)) + shutil.copystat(dirpath, dst_dirpath) + local_path = relative_to_absolute_path(local_path) + print( + f"The processing script for dataset {path} can be inspected at {local_path}. " + f"The main class is in {module_source_dirpath}. " + f'You can modify this processing script and use it with `datasets.load_dataset("{PurePath(local_path).as_posix()}")`.' + ) + + +@deprecated( + "Use 'evaluate.inspect_evaluation_module' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate" +) +def inspect_metric(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs): + r""" + Allow inspection/modification of a metric script by copying it on local drive at local_path. + + + + Use `evaluate.inspect_evaluation_module` instead, from the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate + + + + Args: + path (``str``): path to the dataset processing script with the dataset builder. Can be either: + + - a local path to processing script or the directory containing the script (if the script has the same name as the directory), + e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'`` + - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``) + e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'`` + local_path (``str``): path to the local folder to copy the datset script to. + download_config (Optional ``datasets.DownloadConfig``): specific download configuration parameters. + **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override the attributes in download_config if supplied. + """ + metric_module = metric_module_factory(path, download_config=download_config, **download_kwargs) + metric_cls = import_main_class(metric_module.module_path, dataset=False) + module_source_path = inspect.getsourcefile(metric_cls) + module_source_dirpath = os.path.dirname(module_source_path) + for dirpath, dirnames, filenames in os.walk(module_source_dirpath): + dst_dirpath = os.path.join(local_path, os.path.relpath(dirpath, module_source_dirpath)) + os.makedirs(dst_dirpath, exist_ok=True) + # skipping hidden directories; prune the search + dirnames[:] = [dirname for dirname in dirnames if not dirname.startswith((".", "__"))] + for filename in filenames: + shutil.copy2(os.path.join(dirpath, filename), os.path.join(dst_dirpath, filename)) + shutil.copystat(dirpath, dst_dirpath) + local_path = relative_to_absolute_path(local_path) + print( + f"The processing scripts for metric {path} can be inspected at {local_path}. " + f"The main class is in {module_source_dirpath}. " + f'You can modify this processing scripts and use it with `datasets.load_metric("{PurePath(local_path).as_posix()}")`.' + ) + + +def get_dataset_infos( + path: str, + data_files: Optional[Union[Dict, List, str]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + revision: Optional[Union[str, Version]] = None, + token: Optional[Union[bool, str]] = None, + use_auth_token="deprecated", + **config_kwargs, +): + """Get the meta information about a dataset, returned as a dict mapping config name to DatasetInfoDict. + + Args: + path (`str`): path to the dataset processing script with the dataset builder. Can be either: + + - a local path to processing script or the directory containing the script (if the script has the same name as the directory), + e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'` + - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`]) + e.g. `'squad'`, `'glue'` or``'openai/webtext'` + revision (`Union[str, datasets.Version]`, *optional*): + If specified, the dataset module will be loaded from the datasets repository at this version. + By default: + - it is set to the local version of the lib. + - it will also try to load it from the main branch if it's not available at the local version of the lib. + Specifying a version that is different from your local version of the lib might cause compatibility issues. + download_config ([`DownloadConfig`], *optional*): + Specific download configuration parameters. + download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): + Download/generate mode. + data_files (`Union[Dict, List, str]`, *optional*): + Defining the data_files of the dataset configuration. + token (`str` or `bool`, *optional*): + Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. + If `True`, or not specified, will get token from `"~/.huggingface"`. + use_auth_token (`str` or `bool`, *optional*): + Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. + If `True`, or not specified, will get token from `"~/.huggingface"`. + + + + `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0. + + + + **config_kwargs (additional keyword arguments): + Optional attributes for builder class which will override the attributes if supplied. + + Example: + + ```py + >>> from datasets import get_dataset_infos + >>> get_dataset_infos('rotten_tomatoes') + {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews...), ...} + ``` + """ + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + "You can remove this warning by passing 'token=' instead.", + FutureWarning, + ) + token = use_auth_token + + config_names = get_dataset_config_names( + path=path, + revision=revision, + download_config=download_config, + download_mode=download_mode, + data_files=data_files, + token=token, + ) + return { + config_name: get_dataset_config_info( + path=path, + config_name=config_name, + data_files=data_files, + download_config=download_config, + download_mode=download_mode, + revision=revision, + token=token, + **config_kwargs, + ) + for config_name in config_names + } + + +def get_dataset_config_names( + path: str, + revision: Optional[Union[str, Version]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + dynamic_modules_path: Optional[str] = None, + data_files: Optional[Union[Dict, List, str]] = None, + **download_kwargs, +): + """Get the list of available config names for a particular dataset. + + Args: + path (`str`): path to the dataset processing script with the dataset builder. Can be either: + + - a local path to processing script or the directory containing the script (if the script has the same name as the directory), + e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'` + - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`]) + e.g. `'squad'`, `'glue'` or `'openai/webtext'` + revision (`Union[str, datasets.Version]`, *optional*): + If specified, the dataset module will be loaded from the datasets repository at this version. + By default: + - it is set to the local version of the lib. + - it will also try to load it from the main branch if it's not available at the local version of the lib. + Specifying a version that is different from your local version of the lib might cause compatibility issues. + download_config ([`DownloadConfig`], *optional*): + Specific download configuration parameters. + download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): + Download/generate mode. + dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`): + Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`. + By default the datasets and metrics are stored inside the `datasets_modules` module. + data_files (`Union[Dict, List, str]`, *optional*): + Defining the data_files of the dataset configuration. + **download_kwargs (additional keyword arguments): + Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied, + for example `token`. + + Example: + + ```py + >>> from datasets import get_dataset_config_names + >>> get_dataset_config_names("glue") + ['cola', + 'sst2', + 'mrpc', + 'qqp', + 'stsb', + 'mnli', + 'mnli_mismatched', + 'mnli_matched', + 'qnli', + 'rte', + 'wnli', + 'ax'] + ``` + """ + dataset_module = dataset_module_factory( + path, + revision=revision, + download_config=download_config, + download_mode=download_mode, + dynamic_modules_path=dynamic_modules_path, + data_files=data_files, + **download_kwargs, + ) + builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path)) + return list(builder_cls.builder_configs.keys()) or [ + dataset_module.builder_kwargs.get("config_name", builder_cls.DEFAULT_CONFIG_NAME or "default") + ] + + +def get_dataset_config_info( + path: str, + config_name: Optional[str] = None, + data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + revision: Optional[Union[str, Version]] = None, + token: Optional[Union[bool, str]] = None, + use_auth_token="deprecated", + **config_kwargs, +) -> DatasetInfo: + """Get the meta information (DatasetInfo) about a dataset for a particular config + + Args: + path (``str``): path to the dataset processing script with the dataset builder. Can be either: + + - a local path to processing script or the directory containing the script (if the script has the same name as the directory), + e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'`` + - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``) + e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'`` + config_name (:obj:`str`, optional): Defining the name of the dataset configuration. + data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s). + download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters. + download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode. + revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load. + As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. + You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. + token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. + If True, or not specified, will get token from `"~/.huggingface"`. + use_auth_token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. + If True, or not specified, will get token from `"~/.huggingface"`. + + + + `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0. + + + + **config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied. + + """ + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + "You can remove this warning by passing 'token=' instead.", + FutureWarning, + ) + token = use_auth_token + + builder = load_dataset_builder( + path, + name=config_name, + data_files=data_files, + download_config=download_config, + download_mode=download_mode, + revision=revision, + token=token, + **config_kwargs, + ) + info = builder.info + if info.splits is None: + download_config = download_config.copy() if download_config else DownloadConfig() + if token is not None: + download_config.token = token + builder._check_manual_download( + StreamingDownloadManager(base_path=builder.base_path, download_config=download_config) + ) + try: + info.splits = { + split_generator.name: {"name": split_generator.name, "dataset_name": path} + for split_generator in builder._split_generators( + StreamingDownloadManager(base_path=builder.base_path, download_config=download_config) + ) + } + except Exception as err: + raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err + return info + + +def get_dataset_split_names( + path: str, + config_name: Optional[str] = None, + data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + revision: Optional[Union[str, Version]] = None, + token: Optional[Union[bool, str]] = None, + use_auth_token="deprecated", + **config_kwargs, +): + """Get the list of available splits for a particular config and dataset. + + Args: + path (`str`): path to the dataset processing script with the dataset builder. Can be either: + + - a local path to processing script or the directory containing the script (if the script has the same name as the directory), + e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'` + - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`]) + e.g. `'squad'`, `'glue'` or `'openai/webtext'` + config_name (`str`, *optional*): + Defining the name of the dataset configuration. + data_files (`str` or `Sequence` or `Mapping`, *optional*): + Path(s) to source data file(s). + download_config ([`DownloadConfig`], *optional*): + Specific download configuration parameters. + download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): + Download/generate mode. + revision ([`Version`] or `str`, *optional*): + Version of the dataset script to load. + As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. + You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. + token (`str` or `bool`, *optional*): + Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. + If `True`, or not specified, will get token from `"~/.huggingface"`. + use_auth_token (`str` or `bool`, *optional*): + Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. + If `True`, or not specified, will get token from `"~/.huggingface"`. + + + + `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0. + + + + **config_kwargs (additional keyword arguments): + Optional attributes for builder class which will override the attributes if supplied. + + Example: + + ```py + >>> from datasets import get_dataset_split_names + >>> get_dataset_split_names('rotten_tomatoes') + ['train', 'validation', 'test'] + ``` + """ + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + "You can remove this warning by passing 'token=' instead.", + FutureWarning, + ) + token = use_auth_token + + info = get_dataset_config_info( + path, + config_name=config_name, + data_files=data_files, + download_config=download_config, + download_mode=download_mode, + revision=revision, + token=token, + **config_kwargs, + ) + return list(info.splits.keys()) diff --git a/testbed/huggingface__datasets/src/datasets/io/abc.py b/testbed/huggingface__datasets/src/datasets/io/abc.py new file mode 100644 index 0000000000000000000000000000000000000000..a1913cc20e3fd748ef912e2fb3d7c1e18f16ac8c --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/io/abc.py @@ -0,0 +1,53 @@ +from abc import ABC, abstractmethod +from typing import Optional, Union + +from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit +from ..utils.typing import NestedDataStructureLike, PathLike + + +class AbstractDatasetReader(ABC): + def __init__( + self, + path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None, + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + self.path_or_paths = path_or_paths + self.split = split if split or isinstance(path_or_paths, dict) else "train" + self.features = features + self.cache_dir = cache_dir + self.keep_in_memory = keep_in_memory + self.streaming = streaming + self.num_proc = num_proc + self.kwargs = kwargs + + @abstractmethod + def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: + pass + + +class AbstractDatasetInputStream(ABC): + def __init__( + self, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + num_proc: Optional[int] = None, + **kwargs, + ): + self.features = features + self.cache_dir = cache_dir + self.keep_in_memory = keep_in_memory + self.streaming = streaming + self.num_proc = num_proc + self.kwargs = kwargs + + @abstractmethod + def read(self) -> Union[Dataset, IterableDataset]: + pass diff --git a/testbed/huggingface__datasets/src/datasets/io/generator.py b/testbed/huggingface__datasets/src/datasets/io/generator.py new file mode 100644 index 0000000000000000000000000000000000000000..3cb461769c5bec9b86c984a17bb4890bdc9fab7e --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/io/generator.py @@ -0,0 +1,58 @@ +from typing import Callable, Optional + +from .. import Features +from ..packaged_modules.generator.generator import Generator +from .abc import AbstractDatasetInputStream + + +class GeneratorDatasetInputStream(AbstractDatasetInputStream): + def __init__( + self, + generator: Callable, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + gen_kwargs: Optional[dict] = None, + num_proc: Optional[int] = None, + **kwargs, + ): + super().__init__( + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + num_proc=num_proc, + **kwargs, + ) + self.builder = Generator( + cache_dir=cache_dir, + features=features, + generator=generator, + gen_kwargs=gen_kwargs, + **kwargs, + ) + + def read(self): + # Build iterable dataset + if self.streaming: + dataset = self.builder.as_streaming_dataset(split="train") + # Build regular (map-style) dataset + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + try_from_hf_gcs=False, + base_path=base_path, + num_proc=self.num_proc, + ) + dataset = self.builder.as_dataset( + split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset diff --git a/testbed/huggingface__datasets/src/datasets/io/json.py b/testbed/huggingface__datasets/src/datasets/io/json.py new file mode 100644 index 0000000000000000000000000000000000000000..ae4710e072681bbba0ecc759aaf2632270349d28 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/io/json.py @@ -0,0 +1,163 @@ +import multiprocessing +import os +from typing import BinaryIO, Optional, Union + +import fsspec + +from .. import Dataset, Features, NamedSplit, config +from ..formatting import query_table +from ..packaged_modules.json.json import Json +from ..utils import tqdm as hf_tqdm +from ..utils.typing import NestedDataStructureLike, PathLike +from .abc import AbstractDatasetReader + + +class JsonDatasetReader(AbstractDatasetReader): + def __init__( + self, + path_or_paths: NestedDataStructureLike[PathLike], + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + streaming: bool = False, + field: Optional[str] = None, + num_proc: Optional[int] = None, + **kwargs, + ): + super().__init__( + path_or_paths, + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + num_proc=num_proc, + **kwargs, + ) + self.field = field + path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} + self.builder = Json( + cache_dir=cache_dir, + data_files=path_or_paths, + features=features, + field=field, + **kwargs, + ) + + def read(self): + # Build iterable dataset + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.split) + # Build regular (map-style) dataset + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + # try_from_hf_gcs=try_from_hf_gcs, + base_path=base_path, + num_proc=self.num_proc, + ) + dataset = self.builder.as_dataset( + split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset + + +class JsonDatasetWriter: + def __init__( + self, + dataset: Dataset, + path_or_buf: Union[PathLike, BinaryIO], + batch_size: Optional[int] = None, + num_proc: Optional[int] = None, + **to_json_kwargs, + ): + if num_proc is not None and num_proc <= 0: + raise ValueError(f"num_proc {num_proc} must be an integer > 0.") + + self.dataset = dataset + self.path_or_buf = path_or_buf + self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + self.num_proc = num_proc + self.encoding = "utf-8" + self.to_json_kwargs = to_json_kwargs + + def write(self) -> int: + _ = self.to_json_kwargs.pop("path_or_buf", None) + orient = self.to_json_kwargs.pop("orient", "records") + lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False) + if "index" not in self.to_json_kwargs and orient in ["split", "table"]: + self.to_json_kwargs["index"] = False + compression = self.to_json_kwargs.pop("compression", None) + + if compression not in [None, "infer", "gzip", "bz2", "xz"]: + raise NotImplementedError(f"`datasets` currently does not support {compression} compression") + + if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): + with fsspec.open(self.path_or_buf, "wb", compression=compression) as buffer: + written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs) + else: + if compression: + raise NotImplementedError( + f"The compression parameter is not supported when writing to a buffer, but compression={compression}" + " was passed. Please provide a local path instead." + ) + written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs) + return written + + def _batch_json(self, args): + offset, orient, lines, to_json_kwargs = args + + batch = query_table( + table=self.dataset.data, + key=slice(offset, offset + self.batch_size), + indices=self.dataset._indices, + ) + json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs) + if not json_str.endswith("\n"): + json_str += "\n" + return json_str.encode(self.encoding) + + def _write( + self, + file_obj: BinaryIO, + orient, + lines, + **to_json_kwargs, + ) -> int: + """Writes the pyarrow table as JSON lines to a binary file handle. + + Caller is responsible for opening and closing the handle. + """ + written = 0 + + if self.num_proc is None or self.num_proc == 1: + for offset in hf_tqdm( + range(0, len(self.dataset), self.batch_size), + unit="ba", + desc="Creating json from Arrow format", + ): + json_str = self._batch_json((offset, orient, lines, to_json_kwargs)) + written += file_obj.write(json_str) + else: + num_rows, batch_size = len(self.dataset), self.batch_size + with multiprocessing.Pool(self.num_proc) as pool: + for json_str in hf_tqdm( + pool.imap( + self._batch_json, + [(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)], + ), + total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, + unit="ba", + desc="Creating json from Arrow format", + ): + written += file_obj.write(json_str) + + return written diff --git a/testbed/huggingface__datasets/src/datasets/io/spark.py b/testbed/huggingface__datasets/src/datasets/io/spark.py new file mode 100644 index 0000000000000000000000000000000000000000..7562ba1fb5f77ed8f82374e3021fcb3a93b1da8d --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/io/spark.py @@ -0,0 +1,57 @@ +from typing import Optional + +import pyspark + +from .. import Features, NamedSplit +from ..download import DownloadMode +from ..packaged_modules.spark.spark import Spark +from .abc import AbstractDatasetReader + + +class SparkDatasetReader(AbstractDatasetReader): + """A dataset reader that reads from a Spark DataFrame. + + When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be + provided. Streaming is not currently supported. + """ + + def __init__( + self, + df: pyspark.sql.DataFrame, + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + streaming: bool = True, + cache_dir: str = None, + keep_in_memory: bool = False, + working_dir: str = None, + load_from_cache_file: bool = True, + file_format: str = "arrow", + **kwargs, + ): + super().__init__( + split=split, + features=features, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + streaming=streaming, + **kwargs, + ) + self._load_from_cache_file = load_from_cache_file + self._file_format = file_format + self.builder = Spark( + df=df, + features=features, + cache_dir=cache_dir, + working_dir=working_dir, + **kwargs, + ) + + def read(self): + if self.streaming: + return self.builder.as_streaming_dataset(split=self.split) + download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD + self.builder.download_and_prepare( + download_mode=download_mode, + file_format=self._file_format, + ) + return self.builder.as_dataset(split=self.split) diff --git a/testbed/huggingface__datasets/src/datasets/io/sql.py b/testbed/huggingface__datasets/src/datasets/io/sql.py new file mode 100644 index 0000000000000000000000000000000000000000..ceb425447c29c170499f68ab6fa221844e36d760 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/io/sql.py @@ -0,0 +1,125 @@ +import multiprocessing +from typing import TYPE_CHECKING, Optional, Union + +from .. import Dataset, Features, config +from ..formatting import query_table +from ..packaged_modules.sql.sql import Sql +from ..utils import tqdm as hf_tqdm +from .abc import AbstractDatasetInputStream + + +if TYPE_CHECKING: + import sqlite3 + + import sqlalchemy + + +class SqlDatasetReader(AbstractDatasetInputStream): + def __init__( + self, + sql: Union[str, "sqlalchemy.sql.Selectable"], + con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], + features: Optional[Features] = None, + cache_dir: str = None, + keep_in_memory: bool = False, + **kwargs, + ): + super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs) + self.builder = Sql( + cache_dir=cache_dir, + features=features, + sql=sql, + con=con, + **kwargs, + ) + + def read(self): + download_config = None + download_mode = None + verification_mode = None + base_path = None + + self.builder.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + # try_from_hf_gcs=try_from_hf_gcs, + base_path=base_path, + ) + + # Build dataset for splits + dataset = self.builder.as_dataset( + split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory + ) + return dataset + + +class SqlDatasetWriter: + def __init__( + self, + dataset: Dataset, + name: str, + con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], + batch_size: Optional[int] = None, + num_proc: Optional[int] = None, + **to_sql_kwargs, + ): + if num_proc is not None and num_proc <= 0: + raise ValueError(f"num_proc {num_proc} must be an integer > 0.") + + self.dataset = dataset + self.name = name + self.con = con + self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + self.num_proc = num_proc + self.to_sql_kwargs = to_sql_kwargs + + def write(self) -> int: + _ = self.to_sql_kwargs.pop("sql", None) + _ = self.to_sql_kwargs.pop("con", None) + index = self.to_sql_kwargs.pop("index", False) + + written = self._write(index=index, **self.to_sql_kwargs) + return written + + def _batch_sql(self, args): + offset, index, to_sql_kwargs = args + to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs + batch = query_table( + table=self.dataset.data, + key=slice(offset, offset + self.batch_size), + indices=self.dataset._indices, + ) + df = batch.to_pandas() + num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs) + return num_rows or len(df) + + def _write(self, index, **to_sql_kwargs) -> int: + """Writes the pyarrow table as SQL to a database. + + Caller is responsible for opening and closing the SQL connection. + """ + written = 0 + + if self.num_proc is None or self.num_proc == 1: + for offset in hf_tqdm( + range(0, len(self.dataset), self.batch_size), + unit="ba", + desc="Creating SQL from Arrow format", + ): + written += self._batch_sql((offset, index, to_sql_kwargs)) + else: + num_rows, batch_size = len(self.dataset), self.batch_size + with multiprocessing.Pool(self.num_proc) as pool: + for num_rows in hf_tqdm( + pool.imap( + self._batch_sql, + [(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)], + ), + total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, + unit="ba", + desc="Creating SQL from Arrow format", + ): + written += num_rows + + return written diff --git a/testbed/huggingface__datasets/src/datasets/iterable_dataset.py b/testbed/huggingface__datasets/src/datasets/iterable_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..d9c6057929cc00353eb6d3896724c612e0a06caf --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/iterable_dataset.py @@ -0,0 +1,2381 @@ +import copy +import itertools +import sys +import warnings +from collections import Counter +from copy import deepcopy +from dataclasses import dataclass +from functools import partial +from itertools import cycle, islice +from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union + +import numpy as np +import pyarrow as pa + +from . import config +from .arrow_dataset import Dataset, DatasetInfoMixin +from .features import Features +from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects +from .filesystems import _reset_fsspec_lock +from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter +from .info import DatasetInfo +from .splits import NamedSplit +from .table import cast_table_to_features, read_schema_from_file, table_cast +from .utils.logging import get_logger +from .utils.py_utils import Literal +from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs + + +logger = get_logger(__name__) + +Key = Union[int, str] + + +def identity_func(x): + return x + + +def _rename_columns_fn(example: Dict, column_mapping: Dict[str, str]): + if any(col not in example for col in column_mapping): + raise ValueError( + f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(column_mapping) - set(example)} are not in the dataset." + ) + if any(col in example for col in column_mapping.values()): + raise ValueError( + f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(example) - set(column_mapping.values())} are already in the dataset." + ) + return { + new_column_name: example[original_column_name] + for original_column_name, new_column_name in column_mapping.items() + } + + +def add_column_fn(example: Dict, idx: int, name: str, column: List[Dict]): + if name in example: + raise ValueError(f"Error when adding {name}: column {name} is already in the dataset.") + return {name: column[idx]} + + +def _infer_features_from_batch(batch: Dict[str, list], try_features: Optional[Features] = None) -> Features: + pa_table = pa.Table.from_pydict(batch) + if try_features is not None: + try: + pa_table = table_cast(pa_table, pa.schema(try_features.type)) + except (TypeError, pa.ArrowInvalid, pa.ArrowNotImplementedError): + pass + return Features.from_arrow_schema(pa_table.schema) + + +def _examples_to_batch(examples: List[Dict[str, Any]]) -> Dict[str, list]: + # we order the columns by order of appearance + # to do so, we use a dict as an ordered set + cols = {col: None for example in examples for col in example} + # when an example is missing a column, we set the value to None with .get() + arrays = [[example.get(col) for example in examples] for col in cols] + return dict(zip(cols, arrays)) + + +def _batch_to_examples(batch: Dict[str, list]) -> List[Dict[str, Any]]: + """Convert a batch (dict of examples) to examples list""" + n_examples = len(batch[next(iter(batch))]) + for i in range(n_examples): + yield {col: array[i] for col, array in batch.items()} + + +class _HasNextIterator(Iterator): + """Iterator with an hasnext() function. Taken from https://stackoverflow.com/questions/1966591/has-next-in-python-iterators.""" + + def __init__(self, it): + self.it = iter(it) + self._hasnext = None + + def __iter__(self): + return self + + def __next__(self): + if self._hasnext: + result = self._thenext + else: + result = next(self.it) + self._hasnext = None + return result + + def hasnext(self): + if self._hasnext is None: + try: + self._thenext = next(self.it) + except StopIteration: + self._hasnext = False + else: + self._hasnext = True + return self._hasnext + + +def _convert_to_arrow( + iterable: Iterable[Tuple[Key, dict]], + batch_size: int, + drop_last_batch: bool = False, +) -> Iterator[Tuple[Key, pa.Table]]: + """Convert and group examples in Arrow tables of size `batch_size`. + + Args: + iterable (`Iterable[Tuple[Key, dict]]`): + An examples iterable containing tuples (example_key, example) of type (int/str, dict) + batch_size (`Optional[int]`): + Size of each sub-table to yield. If None or <= 0, yields the full table. + drop_last_batch (`bool`, defaults to `False`): + Drop the last batch if it is smaller than `batch_size`. + """ + if batch_size is None or batch_size <= 0: + yield ( + "all", + pa.Table.from_pylist(cast_to_python_objects([example for _, example in iterable], only_1d_for_numpy=True)), + ) + return + iterator = iter(iterable) + for key, example in iterator: + iterator_batch = islice(iterator, batch_size - 1) + key_examples_list = [(key, example)] + list(iterator_batch) + if len(key_examples_list) < batch_size and drop_last_batch: + return + keys, examples = zip(*key_examples_list) + new_key = "_".join(str(key) for key in keys) + yield new_key, pa.Table.from_pylist(cast_to_python_objects(examples, only_1d_for_numpy=True)) + + +def _batch_arrow_tables( + iterable: Iterable[Tuple[Key, pa.Table]], + batch_size: Optional[int], + drop_last_batch: bool = False, +) -> Iterator[Tuple[Key, pa.Table]]: + """Iterate over sub-tables of size `batch_size`. + + Args: + iterable (`Iterable[Tuple[Key, pa.Table]]`): + A tables iterable containing tuples (table_key, table) of type (int/str, pa.Table) + batch_size (`Optional[int]`): + Size of each sub-table to yield. If None or <= 0, yields the full table. + drop_last_batch (`bool`, defaults to `False`): + Drop the last batch if it is smaller than `batch_size`. + """ + if batch_size is None or batch_size <= 0: + yield "all", pa.concat_tables([pa_table for _, pa_table in iterable]) + return + keys_buffer = [] + chunks_buffer = [] + chunks_buffer_size = 0 + for key, pa_table in iterable: + for chunk in pa_table.to_reader(max_chunksize=batch_size): + if len(chunk) == 0: + continue + elif chunks_buffer_size + len(chunk) < batch_size: + keys_buffer.append(key) + chunks_buffer.append(chunk) + chunks_buffer_size += len(chunk) + continue + elif chunks_buffer_size + len(chunk) == batch_size: + keys_buffer.append(key) + chunks_buffer.append(chunk) + new_key = "_".join(str(_key) for _key in keys_buffer) + yield new_key, pa.Table.from_batches(chunks_buffer) + keys_buffer = [] + chunks_buffer = [] + chunks_buffer_size = 0 + else: + cropped_chunk_length = batch_size - chunks_buffer_size + keys_buffer.append(f"{key}[:{cropped_chunk_length}]") + chunks_buffer.append(chunk.slice(0, cropped_chunk_length)) + new_key = "_".join(str(_key) for _key in keys_buffer) + yield new_key, pa.Table.from_batches(chunks_buffer) + keys_buffer = [f"{key}[{cropped_chunk_length}:]"] + chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)] + chunks_buffer_size = len(chunk) - cropped_chunk_length + if not drop_last_batch and chunks_buffer: + new_key = "_".join(str(_key) for _key in keys_buffer) + yield new_key, pa.Table.from_batches(chunks_buffer) + + +class _BaseExamplesIterable: + """Base class for the examples iterable used by an IterableDataset""" + + def __init__(self) -> None: + self.iter_arrow: Optional[Callable[[], Iterator[Tuple[Key, pa.Table]]]] = None + + def __iter__(self) -> Iterator[Tuple[Key, dict]]: + """An examples iterable should yield tuples (example_key, example) of type (int/str, dict)""" + raise NotImplementedError(f"{type(self)} doesn't implement __iter__ yet") + + def shuffle_data_sources(self, generator: np.random.Generator) -> "_BaseExamplesIterable": + """ + Either shuffle the shards/sources of the dataset, or propagate the shuffling to the underlying iterable. + If the order of the shards must stay fixed (when using .skip or .take for example), then this method returns self. + """ + raise NotImplementedError(f"{type(self)} doesn't implement shuffle_data_sources yet") + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "_BaseExamplesIterable": + """Either keep only the requested shard, or propagate the request to the underlying iterable.""" + raise NotImplementedError(f"{type(self)} doesn't implement shard_data_sources yet") + + def split_shard_indices_by_worker(self, worker_id: int, num_workers: int) -> List[int]: + return list(range(worker_id, self.n_shards, num_workers)) + + @property + def n_shards(self) -> int: + raise NotImplementedError(f"{type(self)} doesn't implement n_shards yet") + + +class ExamplesIterable(_BaseExamplesIterable): + def __init__(self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict): + super().__init__() + self.generate_examples_fn = generate_examples_fn + self.kwargs = kwargs + + def __iter__(self): + yield from self.generate_examples_fn(**self.kwargs) + + def shuffle_data_sources(self, generator: np.random.Generator) -> "ExamplesIterable": + return ShuffledDataSourcesExamplesIterable(self.generate_examples_fn, self.kwargs, generator) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "ExamplesIterable": + """Keep only the requested shard.""" + gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards) + shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers) + requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices]) + return ExamplesIterable(self.generate_examples_fn, requested_gen_kwargs) + + @property + def n_shards(self) -> int: + return _number_of_shards_in_gen_kwargs(self.kwargs) + + +class ShuffledDataSourcesExamplesIterable(ExamplesIterable): + def __init__( + self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict, generator: np.random.Generator + ): + super().__init__(generate_examples_fn, kwargs) + self.generator = deepcopy(generator) + + def __iter__(self): + """Shuffle the kwargs order to shuffle shards""" + rng = deepcopy(self.generator) + kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) + yield from self.generate_examples_fn(**kwargs_with_shuffled_shards) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "ExamplesIterable": + """Keep only the requested shard.""" + rng = deepcopy(self.generator) + kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) + return ExamplesIterable(self.generate_examples_fn, kwargs_with_shuffled_shards).shard_data_sources( + worker_id, num_workers + ) + + +class ArrowExamplesIterable(_BaseExamplesIterable): + def __init__(self, generate_tables_fn: Callable[..., Tuple[Key, pa.Table]], kwargs: dict): + super().__init__() + self.generate_tables_fn = generate_tables_fn + self.kwargs = kwargs + self.iter_arrow = self._iter_arrow + + def __iter__(self): + formatter = PythonFormatter() + for key, pa_table in self.generate_tables_fn(**self.kwargs): + for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER): + formatted_batch = formatter.format_batch(pa_subtable) + for example in _batch_to_examples(formatted_batch): + yield key, example + + def _iter_arrow(self): + yield from self.generate_tables_fn(**self.kwargs) + + def shuffle_data_sources(self, generator: np.random.Generator) -> "ArrowExamplesIterable": + return ShuffledDataSourcesArrowExamplesIterable(self.generate_tables_fn, self.kwargs, generator) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "ArrowExamplesIterable": + """Keep only the requested shard.""" + gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards) + shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers) + requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices]) + return ArrowExamplesIterable(self.generate_tables_fn, requested_gen_kwargs) + + @property + def n_shards(self) -> int: + return _number_of_shards_in_gen_kwargs(self.kwargs) + + +class ShuffledDataSourcesArrowExamplesIterable(ArrowExamplesIterable): + def __init__( + self, + generate_tables_fn: Callable[..., Tuple[Key, pa.Table]], + kwargs: dict, + generator: np.random.Generator, + ): + super().__init__(generate_tables_fn, kwargs) + self.generator = deepcopy(generator) + + def __iter__(self): + """Shuffle the kwargs order to shuffle shards""" + rng = deepcopy(self.generator) + kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) + formatter = PythonFormatter() + for key, pa_table in self.generate_tables_fn(**kwargs_with_shuffled_shards): + for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER): + formatted_batch = formatter.format_batch(pa_subtable) + for example in _batch_to_examples(formatted_batch): + yield key, example + + def _iter_arrow(self): + rng = deepcopy(self.generator) + kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) + yield from self.generate_tables_fn(**kwargs_with_shuffled_shards) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "ArrowExamplesIterable": + """Keep only the requested shard.""" + rng = deepcopy(self.generator) + kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) + return ArrowExamplesIterable(self.generate_tables_fn, kwargs_with_shuffled_shards).shard_data_sources( + worker_id, num_workers + ) + + +class SelectColumnsIterable(_BaseExamplesIterable): + def __init__(self, ex_iterable: _BaseExamplesIterable, column_names: List[str]): + super().__init__() + self.ex_iterable = ex_iterable + self.column_names = column_names + if self.ex_iterable.iter_arrow: + self.iter_arrow = self._iter_arrow + + def __iter__(self): + for idx, row in self.ex_iterable: + yield idx, {c: row[c] for c in self.column_names} + + def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]: + for idx, pa_table in self.ex_iterable.iter_arrow(): + yield idx, pa_table.select(self.column_names) + + def shuffle_data_sources(self, generator: np.random.Generator) -> "SelectColumnsIterable": + return SelectColumnsIterable(self.ex_iterable.shuffle_data_sources(generator), self.column_names) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "SelectColumnsIterable": + return SelectColumnsIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), self.column_names) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + + +class StepExamplesIterable(_BaseExamplesIterable): + def __init__(self, ex_iterable: _BaseExamplesIterable, step: int, offset: int): + super().__init__() + self.ex_iterable = ex_iterable + self.step = step + self.offset = offset + # TODO(QL): implement iter_arrow + + def __iter__(self): + ex_iterator = iter(self.ex_iterable) + while True: + batch = list(islice(ex_iterator, self.step)) + if len(batch) > self.offset: + yield batch[self.offset] + else: + break + + def shuffle_data_sources(self, generator: np.random.Generator) -> "StepExamplesIterable": + return StepExamplesIterable( + self.ex_iterable.shuffle_data_sources(generator), step=self.step, offset=self.offset + ) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "StepExamplesIterable": + return StepExamplesIterable( + self.ex_iterable.shard_data_sources(worker_id, num_workers), step=self.step, offset=self.offset + ) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + + +class CyclingMultiSourcesExamplesIterable(_BaseExamplesIterable): + def __init__( + self, + ex_iterables: List[_BaseExamplesIterable], + stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", + ): + super().__init__() + self.ex_iterables = ex_iterables + self.stopping_strategy = stopping_strategy + + # if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted + # if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once + self.bool_strategy_func = np.all if (stopping_strategy == "all_exhausted") else np.any + # TODO(QL): implement iter_arrow + + def _get_indices_iterator(self): + # this is an infinite iterator to keep track of which iterator we want to pick examples from + return cycle(range(len(self.ex_iterables))) + + def __iter__(self): + iterators = [_HasNextIterator(ex_iterable) for ex_iterable in self.ex_iterables] + + indices_iterator = self._get_indices_iterator() + + is_exhausted = np.full(len(self.ex_iterables), False) + for i in indices_iterator: + try: # let's pick one example from the iterator at index i + yield next(iterators[i]) + + # it will resume from the yield at the next call so that we can directly test if the iterable is exhausted and if we need to break out of the loop + if not iterators[i].hasnext(): + is_exhausted[i] = True + + if self.bool_strategy_func(is_exhausted): + # if the stopping criteria is met, break the main for loop + break + # otherwise reinitialise the iterator and yield the first example + iterators[i] = _HasNextIterator(self.ex_iterables[i]) + + except StopIteration: + # here it means that the i-th iterabledataset is empty, i.e we never have the occasion to yield an element of the i-th dataset. + # we still check if the stopping criteria is met and if we break out of the loop in case of an oversampling strategy + is_exhausted[i] = True + + if self.bool_strategy_func(is_exhausted): + # if the stopping criteria is met, break the main for loop + break + + def shuffle_data_sources(self, generator: np.random.Generator) -> "CyclingMultiSourcesExamplesIterable": + """Shuffle each underlying examples iterable.""" + ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables] + return CyclingMultiSourcesExamplesIterable(ex_iterables, self.stopping_strategy) + + @property + def n_shards(self) -> int: + return min(ex_iterable.n_shards for ex_iterable in self.ex_iterables) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "CyclingMultiSourcesExamplesIterable": + """Either keep only the requested shard, or propagate the request to the underlying iterable.""" + return CyclingMultiSourcesExamplesIterable( + [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables], + stopping_strategy=self.stopping_strategy, + ) + + +class VerticallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable): + """ + VerticallyConcatenatedMultiSourcesExamplesIterable simply chains the input iterables. + It doesn't require the examples iterables to always yield the same columns. + Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`. + + For information, `IterableDataset` merges the features of all the datasets to concatenate into one. + We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate. + + Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None. + This is done with `_apply_feature_types_on_example`. + """ + + def __init__(self, ex_iterables: List[_BaseExamplesIterable]): + super().__init__() + self.ex_iterables = ex_iterables + if all(ex_iterable.iter_arrow is not None for ex_iterable in ex_iterables): + self.iter_arrow = self._iter_arrow + + def __iter__(self): + for ex_iterable in self.ex_iterables: + yield from ex_iterable + + def _iter_arrow(self): + for ex_iterable in self.ex_iterables: + yield from ex_iterable.iter_arrow() + + def shuffle_data_sources( + self, generator: np.random.Generator + ) -> "VerticallyConcatenatedMultiSourcesExamplesIterable": + """Shuffle the list of examples iterable, as well as each underlying examples iterable.""" + rng = deepcopy(generator) + ex_iterables = list(self.ex_iterables) + rng.shuffle(ex_iterables) + ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in ex_iterables] + return VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables) + + @property + def n_shards(self) -> int: + return min(ex_iterable.n_shards for ex_iterable in self.ex_iterables) + + def shard_data_sources( + self, worker_id: int, num_workers: int + ) -> "VerticallyConcatenatedMultiSourcesExamplesIterable": + """Either keep only the requested shard, or propagate the request to the underlying iterable.""" + return VerticallyConcatenatedMultiSourcesExamplesIterable( + [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables] + ) + + +def _check_column_names(column_names: List[str]): + """Check the column names to make sure they don't contain duplicates.""" + counter = Counter(column_names) + if not all(count == 1 for count in counter.values()): + duplicated_columns = [col for col in counter if counter[col] > 1] + raise ValueError( + f"The examples iterables can't have duplicated columns but columns {duplicated_columns} are duplicated." + ) + + +class HorizontallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable): + """ + HorizontallyConcatenatedMultiSourcesExamplesIterable merges examples together for the input list of iterables. + It also checks that there are no duplicate columns (otherwise we don't know which one to keep). + This check is done once when yielding the first example. + + However it doesn't fill missing columns with None. + Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`. + + For information, `IterableDataset` merges the features of all the datasets to concatenate into one. + We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate. + + Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None. + This is done with `_apply_feature_types_on_example`. + """ + + def __init__(self, ex_iterables: List[_BaseExamplesIterable]): + super().__init__() + self.ex_iterables = ex_iterables + # TODO(QL): implement iter_arrow + + def __iter__(self): + ex_iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables] + for i in itertools.count(): + keys = [] + examples = [] + for ex_iterator in list(ex_iterators): + try: + key, example = next(ex_iterator) + keys.append(key) + examples.append(example) + except StopIteration: + ex_iterators.remove(ex_iterator) + if ex_iterators: + if i == 0: + _check_column_names([column_name for example in examples for column_name in example]) + new_example = {} + for example in examples: + new_example.update(example) + new_key = "_".join(str(key) for key in keys) + yield new_key, new_example + else: + break + + def shuffle_data_sources( + self, generator: np.random.Generator + ) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable": + """Doesn't shuffle the wrapped examples iterable since it would break the alignment between them.""" + return self + + @property + def n_shards(self) -> int: + return 1 + + def shard_data_sources( + self, worker_id: int, num_workers: int + ) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable": + """Either keep only the requested shard, or propagate the request to the underlying iterable.""" + return HorizontallyConcatenatedMultiSourcesExamplesIterable( + [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables] + ) + + +class RandomlyCyclingMultiSourcesExamplesIterable(CyclingMultiSourcesExamplesIterable): + def __init__( + self, + ex_iterables: List[_BaseExamplesIterable], + generator: np.random.Generator, + probabilities: Optional[List[float]] = None, + stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", + ): + super().__init__(ex_iterables, stopping_strategy) + self.generator = deepcopy(generator) + self.probabilities = probabilities + # TODO(QL): implement iter_arrow + + @staticmethod + def _iter_random_indices( + rng: np.random.Generator, + num_sources: int, + random_batch_size=1000, + p: Optional[List[float]] = None, + ) -> Iterator[int]: + """Get an infinite iterator that randomly samples the index of the source to pick examples from.""" + if p is None: + while True: + yield from (int(i) for i in rng.integers(0, num_sources, size=random_batch_size)) + else: + while True: + yield from (int(i) for i in rng.choice(num_sources, size=random_batch_size, p=p)) + + def _get_indices_iterator(self): + rng = deepcopy(self.generator) + # this is an infinite iterator that randomly samples the index of the source to pick examples from + return self._iter_random_indices(rng, len(self.ex_iterables), p=self.probabilities) + + def shuffle_data_sources(self, generator: np.random.Generator) -> "RandomlyCyclingMultiSourcesExamplesIterable": + """Shuffle the data sources of each wrapped examples iterable.""" + ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables] + return RandomlyCyclingMultiSourcesExamplesIterable( + ex_iterables, + generator=generator, + probabilities=self.probabilities, + stopping_strategy=self.stopping_strategy, + ) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "RandomlyCyclingMultiSourcesExamplesIterable": + """Either keep only the requested shard, or propagate the request to the underlying iterable.""" + return RandomlyCyclingMultiSourcesExamplesIterable( + [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables], + self.generator, + self.probabilities, + self.stopping_strategy, + ) + + +class MappedExamplesIterable(_BaseExamplesIterable): + def __init__( + self, + ex_iterable: _BaseExamplesIterable, + function: Callable, + with_indices: bool = False, + input_columns: Optional[List[str]] = None, + batched: bool = False, + batch_size: Optional[int] = 1000, + drop_last_batch: bool = False, + remove_columns: Optional[List[str]] = None, + fn_kwargs: Optional[dict] = None, + formatting: Optional["FormattingConfig"] = None, + format_type="deprecated", + ): + if format_type != "deprecated": + warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. " + help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead." + warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2) + formatting = FormattingConfig(format_type=format_type) + super().__init__() + self.ex_iterable = ex_iterable + self.function = function + self.batched = batched + self.batch_size = batch_size + self.drop_last_batch = drop_last_batch + self.remove_columns = remove_columns + self.with_indices = with_indices + self.input_columns = input_columns + self.fn_kwargs = fn_kwargs or {} + self.formatting = formatting + if self.formatting and self.formatting.format_type == "arrow": + self.iter_arrow = self._iter_arrow + + def __iter__(self): + if self.formatting and self.formatting.format_type == "arrow": + yield from ArrowExamplesIterable(self._iter_arrow, {}) + else: + yield from self._iter() + + def _iter(self): + iterator = iter(self.ex_iterable) + current_idx = 0 + + if self.formatting: + formatter = get_formatter(self.formatting.format_type) + format_dict = ( + formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects + ) + else: + format_dict = None + + if self.batched: + for key, example in iterator: + # If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset + iterator_batch = ( + iterator + if self.batch_size is None or self.batch_size <= 0 + else islice(iterator, self.batch_size - 1) + ) + key_examples_list = [(key, example)] + list(iterator_batch) + keys, examples = zip(*key_examples_list) + if ( + self.drop_last_batch + and self.batch_size is not None + and self.batch_size > 0 + and len(examples) < self.batch_size + ): # ignore last batch + return + batch = _examples_to_batch(examples) + batch = format_dict(batch) if format_dict else batch + # then apply the transform + inputs = batch + function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns] + if self.with_indices: + function_args.append([current_idx + i for i in range(len(key_examples_list))]) + transformed_batch = dict(batch) # this will be updated with the function output + transformed_batch.update(self.function(*function_args, **self.fn_kwargs)) + # then remove the unwanted columns + if self.remove_columns: + for c in self.remove_columns: + del transformed_batch[c] + if transformed_batch: + first_col = next(iter(transformed_batch)) + bad_cols = [ + col + for col in transformed_batch + if len(transformed_batch[col]) != len(transformed_batch[first_col]) + ] + if bad_cols: + raise ValueError( + f"Column lengths mismatch: columns {bad_cols} have length {[len(transformed_batch[col]) for col in bad_cols]} while {first_col} has length {len(transformed_batch[first_col])}." + ) + # the new key is the concatenation of the examples keys from the batch + new_key = "_".join(str(key) for key in keys) + # yield one example at a time from the transformed batch + for example in _batch_to_examples(transformed_batch): + yield new_key, example + current_idx += 1 + else: + for key, example in iterator: + # If not batched, we can apply the transform and yield the example directly + # first copy the example, since we might drop some keys + example = dict(example) + example = format_dict(example) if format_dict else example + # then apply the transform + inputs = example + function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns] + if self.with_indices: + function_args.append(current_idx) + transformed_example = dict(example) # this will be updated with the function output + transformed_example.update(self.function(*function_args, **self.fn_kwargs)) + # then we remove the unwanted columns + if self.remove_columns: + for c in self.remove_columns: + del transformed_example[c] + yield key, transformed_example + current_idx += 1 + + def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]: + if self.ex_iterable.iter_arrow: + iterator = _batch_arrow_tables( + self.ex_iterable.iter_arrow(), + batch_size=self.batch_size if self.batched else 1, + drop_last_batch=self.drop_last_batch, + ) + else: + iterator = _convert_to_arrow( + self.ex_iterable, + batch_size=self.batch_size if self.batched else 1, + drop_last_batch=self.drop_last_batch, + ) + current_idx = 0 + for key, pa_table in iterator: + # first build the batch + function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns] + if self.with_indices: + if self.batched: + function_args.append([current_idx + i for i in range(len(pa_table))]) + else: + function_args.append(current_idx) + # then apply the transform + output_table = self.function(*function_args, **self.fn_kwargs) + if not isinstance(output_table, pa.Table): + raise TypeError( + f"Provided `function` which is applied to pyarrow tables returns a variable of type {type(output_table)}. Make sure provided `function` returns a a pyarrow table to update the dataset." + ) + # we don't need to merge results for consistency with Dataset.map which merges iif both input and output are dicts + # then remove the unwanted columns + if self.remove_columns: + for column in self.remove_columns: + if column in output_table.column_names: + output_table = output_table.remove_column(output_table.column_names.index(column)) + # return output + yield key, output_table + current_idx += len(pa_table) + + def shuffle_data_sources(self, generator: np.random.Generator) -> "MappedExamplesIterable": + """Shuffle the wrapped examples iterable.""" + return MappedExamplesIterable( + self.ex_iterable.shuffle_data_sources(generator), + function=self.function, + with_indices=self.with_indices, + input_columns=self.input_columns, + batched=self.batched, + batch_size=self.batch_size, + remove_columns=self.remove_columns, + fn_kwargs=self.fn_kwargs, + ) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "MappedExamplesIterable": + """Keep only the requested shard.""" + return MappedExamplesIterable( + self.ex_iterable.shard_data_sources(worker_id, num_workers), + function=self.function, + with_indices=self.with_indices, + input_columns=self.input_columns, + batched=self.batched, + batch_size=self.batch_size, + remove_columns=self.remove_columns, + fn_kwargs=self.fn_kwargs, + ) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + + +class FilteredExamplesIterable(_BaseExamplesIterable): + def __init__( + self, + ex_iterable: _BaseExamplesIterable, + function: Callable, + with_indices: bool = False, + input_columns: Optional[List[str]] = None, + batched: bool = False, + batch_size: Optional[int] = 1000, + fn_kwargs: Optional[dict] = None, + formatting: Optional["FormattingConfig"] = None, + format_type="deprecated", + ): + if format_type != "deprecated": + warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. " + help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead." + warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2) + formatting = FormattingConfig(format_type=format_type) + super().__init__() + self.ex_iterable = ex_iterable + self.function = function + self.batched = batched + self.batch_size = batch_size + self.with_indices = with_indices + self.input_columns = input_columns + self.fn_kwargs = fn_kwargs or {} + self.formatting = formatting + if self.formatting and self.formatting.format_type == "arrow": + self.iter_arrow = self._iter_arrow + + def __iter__(self): + if self.formatting and self.formatting.format_type == "arrow": + yield from ArrowExamplesIterable(self._iter_arrow, {}) + else: + yield from self._iter() + + def _iter(self): + if self.formatting: + formatter = get_formatter(self.formatting.format_type) + format_dict = ( + formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects + ) + else: + format_dict = None + + iterator = iter(self.ex_iterable) + current_idx = 0 + if self.batched: + for key, example in iterator: + # If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset + iterator_batch = ( + iterator + if self.batch_size is None or self.batch_size <= 0 + else islice(iterator, self.batch_size - 1) + ) + key_examples_list = [(key, example)] + list(iterator_batch) + keys, examples = zip(*key_examples_list) + batch = _examples_to_batch(examples) + batch = format_dict(batch) if format_dict else batch + # then compute the mask for the batch + inputs = batch + function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns] + if self.with_indices: + function_args.append([current_idx + i for i in range(len(key_examples_list))]) + mask = self.function(*function_args, **self.fn_kwargs) + # yield one example at a time from the batch + for key_example, to_keep in zip(key_examples_list, mask): + if to_keep: + yield key_example + current_idx += 1 + else: + for key, example in iterator: + # If not batched, we can apply the filtering function direcly + example = dict(example) + inputs = format_dict(example) if format_dict else example + function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns] + if self.with_indices: + function_args.append(current_idx) + to_keep = self.function(*function_args, **self.fn_kwargs) + if to_keep: + yield key, example + current_idx += 1 + + def _iter_arrow(self): + if self.ex_iterable.iter_arrow: + iterator = _batch_arrow_tables( + self.ex_iterable.iter_arrow(), batch_size=self.batch_size if self.batched else 1 + ) + else: + iterator = _convert_to_arrow(self.ex_iterable, batch_size=self.batch_size if self.batched else 1) + current_idx = 0 + for key, pa_table in iterator: + # first build the batch + function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns] + if self.with_indices: + if self.batched: + function_args.append([current_idx + i for i in range(len(pa_table))]) + else: + function_args.append(current_idx) + # then apply the transform + mask = self.function(*function_args, **self.fn_kwargs) + # yield the filtered table + if self.batched: + yield key, pa_table.filter(mask) + elif mask.as_py() if isinstance(mask, pa.BooleanScalar) else mask: + yield key, pa_table + current_idx += len(pa_table) + + def shuffle_data_sources(self, seed: Optional[int]) -> "FilteredExamplesIterable": + """Shuffle the wrapped examples iterable.""" + return FilteredExamplesIterable( + self.ex_iterable.shuffle_data_sources(seed), + function=self.function, + with_indices=self.with_indices, + input_columns=self.input_columns, + batched=self.batched, + batch_size=self.batch_size, + ) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "FilteredExamplesIterable": + """Keep only the requested shard.""" + return FilteredExamplesIterable( + self.ex_iterable.shard_data_sources(worker_id, num_workers), + function=self.function, + with_indices=self.with_indices, + input_columns=self.input_columns, + batched=self.batched, + batch_size=self.batch_size, + ) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + + +class BufferShuffledExamplesIterable(_BaseExamplesIterable): + def __init__(self, ex_iterable: _BaseExamplesIterable, buffer_size: int, generator: np.random.Generator): + super().__init__() + self.ex_iterable = ex_iterable + self.buffer_size = buffer_size + self.generator = generator + # TODO(QL): implement iter_arrow + + @staticmethod + def _iter_random_indices(rng: np.random.Generator, buffer_size: int, random_batch_size=1000) -> Iterator[int]: + while True: + yield from (int(i) for i in rng.integers(0, buffer_size, size=random_batch_size)) + + def __iter__(self): + buffer_size = self.buffer_size + rng = deepcopy(self.generator) + indices_iterator = self._iter_random_indices(rng, buffer_size) + # this is the shuffle buffer that we keep in memory + mem_buffer = [] + for x in self.ex_iterable: + if len(mem_buffer) == buffer_size: # if the buffer is full, pick and example from it + i = next(indices_iterator) + yield mem_buffer[i] + mem_buffer[i] = x # replace the picked example by a new one + else: # otherwise, keep filling the buffer + mem_buffer.append(x) + # when we run out of examples, we shuffle the remaining examples in the buffer and yield them + rng.shuffle(mem_buffer) + yield from mem_buffer + + def shuffle_data_sources(self, generator: np.random.Generator) -> "BufferShuffledExamplesIterable": + """Shuffle the wrapped examples iterable as well as the shuffling buffer.""" + return BufferShuffledExamplesIterable( + self.ex_iterable.shuffle_data_sources(generator), buffer_size=self.buffer_size, generator=generator + ) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "BufferShuffledExamplesIterable": + """Keep only the requested shard.""" + return BufferShuffledExamplesIterable( + self.ex_iterable.shard_data_sources(worker_id, num_workers), + buffer_size=self.buffer_size, + generator=self.generator, + ) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + + +class SkipExamplesIterable(_BaseExamplesIterable): + def __init__(self, ex_iterable: _BaseExamplesIterable, n: int): + super().__init__() + self.ex_iterable = ex_iterable + self.n = n + # TODO(QL): implement iter_arrow + + def __iter__(self): + yield from islice(self.ex_iterable, self.n, None) + + def shuffle_data_sources(self, generator: np.random.Generator) -> "SkipExamplesIterable": + """Doesn't shuffle the wrapped examples iterable since it would skip examples from other shards instead.""" + return self + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + + +class TakeExamplesIterable(_BaseExamplesIterable): + def __init__(self, ex_iterable: _BaseExamplesIterable, n: int): + super().__init__() + self.ex_iterable = ex_iterable + self.n = n + # TODO(QL): implement iter_arrow + + def __iter__(self): + yield from islice(self.ex_iterable, self.n) + + def shuffle_data_sources(self, generator: np.random.Generator) -> "TakeExamplesIterable": + """Doesn't shuffle the wrapped examples iterable since it would take examples from other shards instead.""" + return self + + @staticmethod + def split_number(num, n): + quotient = num // n + remainder = num % n + result = [quotient] * n + for i in range(remainder): + result[i] += 1 + return result + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "TakeExamplesIterable": + """Keep only the requested shard.""" + return TakeExamplesIterable( + self.ex_iterable.shard_data_sources(worker_id, num_workers), + n=self.split_number(self.n, num_workers)[worker_id], + ) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + + +def _apply_feature_types_on_example( + example: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]] +) -> dict: + example = dict(example) + # add missing columns + for column_name in features: + if column_name not in example: + example[column_name] = None + # we encode the example for ClassLabel feature types for example + encoded_example = features.encode_example(example) + # Decode example for Audio feature, e.g. + decoded_example = features.decode_example(encoded_example, token_per_repo_id=token_per_repo_id) + return decoded_example + + +def _apply_feature_types_on_batch( + batch: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]] +) -> dict: + batch = dict(batch) + # add missing columns + n_examples = len(batch[next(iter(batch))]) + for column_name in features: + if column_name not in batch: + batch[column_name] = [None] * n_examples + # we encode the batch for ClassLabel feature types for example + encoded_batch = features.encode_batch(batch) + # Decode batch for Audio feature, e.g. + decoded_batch = features.decode_batch(encoded_batch, token_per_repo_id=token_per_repo_id) + return decoded_batch + + +class TypedExamplesIterable(_BaseExamplesIterable): + def __init__( + self, + ex_iterable: _BaseExamplesIterable, + features: Features, + token_per_repo_id: Dict[str, Union[str, bool, None]], + ): + super().__init__() + self.ex_iterable = ex_iterable + self.features = features + self.token_per_repo_id = token_per_repo_id + if self.ex_iterable.iter_arrow is not None: + self.iter_arrow = self._iter_arrow + + def __iter__(self): + # Then for each example, `TypedExamplesIterable` automatically fills missing columns with None. + # This is done with `_apply_feature_types_on_example`. + for key, example in self.ex_iterable: + yield ( + key, + _apply_feature_types_on_example(example, self.features, token_per_repo_id=self.token_per_repo_id), + ) + + def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]: + schema = self.features.arrow_schema + for key, pa_table in self.ex_iterable.iter_arrow(): + columns = set(pa_table.column_names) + # add missing columns + for column_name in self.features: + if column_name not in columns: + col = pa.NullArray.from_buffers(pa.null(), len(pa_table), [None]) + pa_table = pa_table.append_column(column_name, col) + if pa_table.schema != schema: + pa_table = cast_table_to_features(pa_table, self.features) + yield key, pa_table + + def shuffle_data_sources(self, generator: np.random.Generator) -> "TypedExamplesIterable": + """Shuffle the wrapped examples iterable.""" + return TypedExamplesIterable( + self.ex_iterable.shuffle_data_sources(generator), + features=self.features, + token_per_repo_id=self.token_per_repo_id, + ) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "TypedExamplesIterable": + """Keep only the requested shard.""" + return TypedExamplesIterable( + self.ex_iterable.shard_data_sources(worker_id, num_workers), + features=self.features, + token_per_repo_id=self.token_per_repo_id, + ) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + + +@dataclass +class FormattingConfig: + format_type: Optional[str] + + def __post_init__(self): + if self.format_type == "pandas": + raise NotImplementedError( + "The 'pandas' formatting is not implemented for iterable datasets. You can use 'numpy' or 'arrow' instead." + ) + + +@dataclass +class ShufflingConfig: + generator: np.random.Generator + _original_seed: Optional[int] = None + + +@dataclass +class DistributedConfig: + rank: int + world_size: int + + +def _maybe_add_torch_iterable_dataset_parent_class(cls): + """Add torch.utils.data.IterableDataset as a parent class if 'torch' is available""" + if config.TORCH_AVAILABLE: + import torch.utils.data + + if torch.utils.data.IterableDataset not in cls.__bases__: + cls.__bases__ += (torch.utils.data.IterableDataset,) + + +class IterableDataset(DatasetInfoMixin): + """A Dataset backed by an iterable.""" + + def __init__( + self, + ex_iterable: _BaseExamplesIterable, + info: Optional[DatasetInfo] = None, + split: Optional[NamedSplit] = None, + formatting: Optional[FormattingConfig] = None, + shuffling: Optional[ShufflingConfig] = None, + distributed: Optional[DistributedConfig] = None, + token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None, + format_type="deprecated", + ): + if distributed and distributed.world_size > 1 and shuffling and shuffling._original_seed is None: + raise RuntimeError( + "The dataset doesn't have a fixed random seed across nodes to shuffle and split the list of dataset shards by node. " + "Please pass e.g. `seed=42` in `.shuffle()` to make all the nodes use the same seed. " + ) + if format_type != "deprecated": + warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. " + help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead." + warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2) + formatting = FormattingConfig(format_type=format_type) + + info = info.copy() if info is not None else DatasetInfo() + DatasetInfoMixin.__init__(self, info=info, split=split) + + self._ex_iterable = ex_iterable + self._formatting = formatting + self._shuffling = shuffling + self._distributed = distributed + self._epoch = 0 + self._token_per_repo_id: Dict[str, Union[str, bool, None]] = token_per_repo_id or {} + _maybe_add_torch_iterable_dataset_parent_class(self.__class__) + + def __getstate__(self): + return self.__dict__ + + def __setstate__(self, d): + self.__dict__ = d + # Re-add torch iterable dataset as a parent class, since dynamically added parent classes are not kept when pickling + _maybe_add_torch_iterable_dataset_parent_class(self.__class__) + + def _head(self, n=5): + return _examples_to_batch(list(self.take(n))) + + def _effective_generator(self): + if self._shuffling and self._epoch == 0: + return self._shuffling.generator + elif self._shuffling: + # Create effective seed using self._epoch (we subtract in order to avoir overflow in long_scalars) + effective_seed = deepcopy(self._shuffling.generator).integers(0, 1 << 63) - self._epoch + effective_seed = (1 << 63) + effective_seed if effective_seed < 0 else effective_seed + return np.random.default_rng(effective_seed) + else: + raise ValueError("This dataset is not shuffled") + + @property + def n_shards(self) -> int: + if self._distributed and self._ex_iterable.n_shards % self._distributed.world_size == 0: + return self._ex_iterable.n_shards // self._distributed.world_size + return self._ex_iterable.n_shards + + def _iter_pytorch(self): + ex_iterable = self._prepare_ex_iterable_for_iteration() + # fix for fsspec when using multiprocess + _reset_fsspec_lock() + # check if there aren't too many workers + import torch.utils.data + + worker_info = torch.utils.data.get_worker_info() + if self._is_main_process() and ex_iterable.n_shards < worker_info.num_workers: + logger.warning( + f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={ex_iterable.n_shards}). " + f"Stopping {worker_info.num_workers - ex_iterable.n_shards} dataloader workers." + ) + logger.info( + f"To parallelize data loading, we give each process some shards (or data sources) to process. " + f"Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={ex_iterable.n_shards}. " + f"To enable more parallelism, please split the dataset in more files than {ex_iterable.n_shards}." + ) + # split workload + _log_prefix = f"node#{self._distributed.rank} " if self._distributed else "" + shards_indices = self._ex_iterable.split_shard_indices_by_worker(worker_info.id, worker_info.num_workers) + if shards_indices: + logger.debug( + f"{_log_prefix}dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{ex_iterable.n_shards} shards." + ) + ex_iterable = ex_iterable.shard_data_sources(worker_id=worker_info.id, num_workers=worker_info.num_workers) + + if self._formatting: + formatter = get_formatter(self._formatting.format_type, features=self.features) + format_dict = ( + formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects + ) + else: + format_dict = None + + if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"): + if ex_iterable.iter_arrow: + iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1) + else: + iterator = _convert_to_arrow(ex_iterable, batch_size=1) + for key, pa_table in iterator: + yield formatter.format_row(pa_table) + return + else: + for key, example in ex_iterable: + if self.features: + # `IterableDataset` automatically fills missing columns with None. + # This is done with `_apply_feature_types_on_example`. + example = _apply_feature_types_on_example( + example, self.features, token_per_repo_id=self._token_per_repo_id + ) + yield format_dict(example) if format_dict else example + logger.debug( + f"{_log_prefix}dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{ex_iterable.n_shards} shards." + ) + else: + logger.debug( + f"{_log_prefix}dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({ex_iterable.n_shards}<{worker_info.num_workers})." + ) + + def _is_main_process(self): + if self._distributed and self._distributed.rank > 0: + return False + if "torch" in sys.modules: + import torch.utils.data + + worker_info = torch.utils.data.get_worker_info() + if worker_info is not None and worker_info.id > 0: + return False + return True + + def _prepare_ex_iterable_for_iteration(self) -> _BaseExamplesIterable: + if self._shuffling: + ex_iterable = self._ex_iterable.shuffle_data_sources(self._effective_generator()) + else: + ex_iterable = self._ex_iterable + + if self._distributed: + rank = self._distributed.rank + world_size = self._distributed.world_size + if ex_iterable.n_shards % world_size == 0: + if self._is_main_process(): + n_shards_per_node = ex_iterable.n_shards // world_size + plural = "s" if n_shards_per_node > 1 else "" + logger.info( + f"Assigning {n_shards_per_node} shard{plural} (or data source{plural}) of the dataset to each node." + ) + ex_iterable = ex_iterable.shard_data_sources(rank, world_size) + else: + if self._is_main_process(): + logger.info( + f"Assigning 1 out of {world_size} examples of the dataset to each node. The others are skipped during the iteration." + ) + logger.info( + f"It is more optimized to distribute the dataset shards (or data sources) across nodes. " + f"You can do that by using a dataset with number of shards that is a factor of world_size={world_size}. " + f"The current dataset has {ex_iterable.n_shards} which is not a factor of {world_size}" + ) + ex_iterable = StepExamplesIterable(ex_iterable, step=world_size, offset=rank) + + return ex_iterable + + def __iter__(self): + if "torch" in sys.modules: + import torch.utils.data + + worker_info = torch.utils.data.get_worker_info() + if isinstance(self, torch.utils.data.IterableDataset) and worker_info is not None: + # We're a torch.utils.data.IterableDataset in a PyTorch worker process + yield from self._iter_pytorch() + return + + ex_iterable = self._prepare_ex_iterable_for_iteration() + if self._formatting: + formatter = get_formatter(self._formatting.format_type, features=self.features) + format_dict = ( + formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects + ) + else: + format_dict = None + + if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == "arrow"): + if ex_iterable.iter_arrow: + iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1) + else: + iterator = _convert_to_arrow(ex_iterable, batch_size=1) + for key, pa_table in iterator: + yield formatter.format_row(pa_table) + return + + for key, example in ex_iterable: + if self.features: + # `IterableDataset` automatically fills missing columns with None. + # This is done with `_apply_feature_types_on_example`. + example = _apply_feature_types_on_example( + example, self.features, token_per_repo_id=self._token_per_repo_id + ) + yield format_dict(example) if format_dict else example + + def iter(self, batch_size: int, drop_last_batch: bool = False): + """Iterate through the batches of size `batch_size`. + + Args: + batch_size (:obj:`int`): size of each batch to yield. + drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be + dropped + """ + + if self._formatting: + formatter = get_formatter(self._formatting.format_type, features=self.features) + format_dict = ( + formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects + ) + else: + format_dict = None + + ex_iterable = self._prepare_ex_iterable_for_iteration() + if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"): + if ex_iterable.iter_arrow: + iterator = _batch_arrow_tables( + ex_iterable.iter_arrow(), batch_size=batch_size, drop_last_batch=drop_last_batch + ) + else: + iterator = _convert_to_arrow(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch) + for key, pa_table in iterator: + yield formatter.format_batch(pa_table) + return + + iterator = iter(ex_iterable) + for key, example in iterator: + # If batched, first build the batch + examples = [example] + [example for key, example in islice(iterator, batch_size - 1)] + if drop_last_batch and len(examples) < batch_size: # ignore last batch + return + batch = _examples_to_batch(examples) + if self.features: + # `IterableDataset` automatically fills missing columns with None. + # This is done with `_apply_feature_types_on_batch`. + batch = _apply_feature_types_on_batch(batch, self.features, token_per_repo_id=self._token_per_repo_id) + yield format_dict(batch) if format_dict else batch + + @staticmethod + def from_generator( + generator: Callable, + features: Optional[Features] = None, + gen_kwargs: Optional[dict] = None, + ) -> "IterableDataset": + """Create an Iterable Dataset from a generator. + + Args: + generator (`Callable`): + A generator function that `yields` examples. + features (`Features`, *optional*): + Dataset features. + gen_kwargs(`dict`, *optional*): + Keyword arguments to be passed to the `generator` callable. + You can define a sharded iterable dataset by passing the list of shards in `gen_kwargs`. + This can be used to improve shuffling and when iterating over the dataset with multiple workers. + + Returns: + `IterableDataset` + + Example: + + ```py + >>> def gen(): + ... yield {"text": "Good", "label": 0} + ... yield {"text": "Bad", "label": 1} + ... + >>> ds = IterableDataset.from_generator(gen) + ``` + + ```py + >>> def gen(shards): + ... for shard in shards: + ... with open(shard) as f: + ... for line in f: + ... yield {"line": line} + ... + >>> shards = [f"data{i}.txt" for i in range(32)] + >>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards}) + >>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer + >>> from torch.utils.data import DataLoader + >>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards + ``` + """ + from .io.generator import GeneratorDatasetInputStream + + return GeneratorDatasetInputStream( + generator=generator, + features=features, + gen_kwargs=gen_kwargs, + streaming=True, + ).read() + + @staticmethod + def from_spark( + df: "pyspark.sql.DataFrame", + split: Optional[NamedSplit] = None, + features: Optional[Features] = None, + **kwargs, + ) -> "IterableDataset": + """Create an IterableDataset from Spark DataFrame. The dataset is streamed to the driver in batches. + + Args: + df (`pyspark.sql.DataFrame`): + The DataFrame containing the desired data. + split (`NamedSplit`, *optional*): + Split name to be assigned to the dataset. + features (`Features`, *optional*): + Dataset features. + + Returns: + [`IterableDataset`] + + Example: + + ```py + >>> df = spark.createDataFrame( + >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], + >>> columns=["id", "name"], + >>> ) + >>> ds = IterableDataset.from_spark(df) + ``` + """ + from .io.spark import SparkDatasetReader + + if sys.platform == "win32": + raise EnvironmentError("IterableDataset.from_spark is not currently supported on Windows") + + return SparkDatasetReader( + df, + split=split, + features=features, + streaming=True, + **kwargs, + ).read() + + @staticmethod + def from_file(filename: str) -> "IterableDataset": + """Instantiate a IterableDataset from Arrow table at filename. + + Args: + filename (`str`): + File name of the dataset. + + Returns: + [`IterableDataset`] + """ + pa_table_schema = read_schema_from_file(filename) + inferred_features = Features.from_arrow_schema(pa_table_schema) + ex_iterable = ArrowExamplesIterable(Dataset._generate_tables_from_cache_file, kwargs={"filename": filename}) + return IterableDataset(ex_iterable=ex_iterable, info=DatasetInfo(features=inferred_features)) + + def with_format( + self, + type: Optional[str] = None, + ) -> "IterableDataset": + """ + Return a dataset with the specified format. + Supported formats: "arrow", or None for regular python objects. + The other formats are currently not implemented. + + Args: + + type (`str`, optional, default None): if set to "torch", the returned dataset + will be a subclass of torch.utils.data.IterableDataset to be used in a DataLoader + """ + type = get_format_type_from_alias(type) + # TODO(QL): add format_kwargs + # TODO(QL): add format_columns and return_all_columns + # TODO(QL): add pandas format + return IterableDataset( + ex_iterable=self._ex_iterable, + info=self._info.copy(), + split=self._split, + formatting=FormattingConfig(format_type=type), + shuffling=copy.deepcopy(self._shuffling), + distributed=copy.deepcopy(self._distributed), + token_per_repo_id=self._token_per_repo_id, + ) + + def map( + self, + function: Optional[Callable] = None, + with_indices: bool = False, + input_columns: Optional[Union[str, List[str]]] = None, + batched: bool = False, + batch_size: Optional[int] = 1000, + drop_last_batch: bool = False, + remove_columns: Optional[Union[str, List[str]]] = None, + features: Optional[Features] = None, + fn_kwargs: Optional[dict] = None, + ) -> "IterableDataset": + """ + Apply a function to all the examples in the iterable dataset (individually or in batches) and update them. + If your function returns a column that already exists, then it overwrites it. + The function is applied on-the-fly on the examples when iterating over the dataset. + + You can specify whether the function should be batched or not with the `batched` parameter: + + - If batched is `False`, then the function takes 1 example in and should return 1 example. + An example is a dictionary, e.g. `{"text": "Hello there !"}`. + - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. + A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}. + - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. + Note that the last batch may have less than `n` examples. + A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. + + Args: + function (`Callable`, *optional*, defaults to `None`): + Function applied on-the-fly on the examples when you iterate on the dataset. + It must have one of the following signatures: + + - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` + - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True` + - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` + - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True` + + For advanced usage, the function can also return a `pyarrow.Table`. + Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. + If no function is provided, default to identity function: `lambda x: x`. + with_indices (`bool`, defaults to `False`): + Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. + input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): + The columns to be passed into `function` + as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. + batched (`bool`, defaults to `False`): + Provide batch of examples to `function`. + batch_size (`int`, *optional*, defaults to `1000`): + Number of examples per batch provided to `function` if `batched=True`. + `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`. + drop_last_batch (`bool`, defaults to `False`): + Whether a last batch smaller than the batch_size should be + dropped instead of being processed by the function. + remove_columns (`[List[str]]`, *optional*, defaults to `None`): + Remove a selection of columns while doing the mapping. + Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding + columns with names in `remove_columns`, these columns will be kept. + features (`[Features]`, *optional*, defaults to `None`): + Feature types of the resulting dataset. + fn_kwargs (`Dict`, *optional*, default `None`): + Keyword arguments to be passed to `function`. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) + >>> def add_prefix(example): + ... example["text"] = "Review: " + example["text"] + ... return example + >>> ds = ds.map(add_prefix) + >>> list(ds.take(3)) + [{'label': 1, + 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, + {'label': 1, + 'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, + {'label': 1, 'text': 'Review: effective but too-tepid biopic'}] + ``` + """ + if isinstance(input_columns, str): + input_columns = [input_columns] + if isinstance(remove_columns, str): + remove_columns = [remove_columns] + if function is None: + function = identity_func + if fn_kwargs is None: + fn_kwargs = {} + ex_iterable = MappedExamplesIterable( + TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id) + if self._info.features is not None + else self._ex_iterable, + function=function, + with_indices=with_indices, + input_columns=input_columns, + batched=batched, + batch_size=batch_size, + drop_last_batch=drop_last_batch, + remove_columns=remove_columns, + fn_kwargs=fn_kwargs, + formatting=self._formatting, + ) + info = self.info.copy() + info.features = features + return IterableDataset( + ex_iterable=ex_iterable, + info=info, + split=self._split, + formatting=self._formatting, + shuffling=copy.deepcopy(self._shuffling), + distributed=copy.deepcopy(self._distributed), + token_per_repo_id=self._token_per_repo_id, + ) + + def filter( + self, + function: Optional[Callable] = None, + with_indices=False, + input_columns: Optional[Union[str, List[str]]] = None, + batched: bool = False, + batch_size: Optional[int] = 1000, + fn_kwargs: Optional[dict] = None, + ) -> "IterableDataset": + """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function. + The filtering is done on-the-fly when iterating over the dataset. + + Args: + function (`Callable`): + Callable with one of the following signatures: + + - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False` + - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False` + - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True` + - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True` + + If no function is provided, defaults to an always True function: `lambda x: True`. + with_indices (`bool`, defaults to `False`): + Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. + input_columns (`str` or `List[str]`, *optional*): + The columns to be passed into `function` as + positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. + batched (`bool`, defaults to `False`): + Provide batch of examples to `function`. + batch_size (`int`, *optional*, default `1000`): + Number of examples per batch provided to `function` if `batched=True`. + fn_kwargs (`Dict`, *optional*, default `None`): + Keyword arguments to be passed to `function`. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) + >>> ds = ds.filter(lambda x: x["label"] == 0) + >>> list(ds.take(3)) + [{'label': 0, 'movie_review': 'simplistic , silly and tedious .'}, + {'label': 0, + 'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."}, + {'label': 0, + 'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}] + ``` + """ + if isinstance(input_columns, str): + input_columns = [input_columns] + + # TODO(QL): keep the features (right now if we keep it it would call decode_example again on an already decoded example) + info = copy.deepcopy(self._info) + info.features = None + + # We need the examples to be decoded for certain feature types like Image or Audio, so we use TypedExamplesIterable here + ex_iterable = FilteredExamplesIterable( + TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id) + if self._info.features is not None + else self._ex_iterable, + function=function, + with_indices=with_indices, + input_columns=input_columns, + batched=batched, + batch_size=batch_size, + fn_kwargs=fn_kwargs, + formatting=self._formatting, + ) + return IterableDataset( + ex_iterable=ex_iterable, + info=info, + split=self._split, + formatting=self._formatting, + shuffling=copy.deepcopy(self._shuffling), + distributed=copy.deepcopy(self._distributed), + token_per_repo_id=self._token_per_repo_id, + ) + + def shuffle( + self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000 + ) -> "IterableDataset": + """ + Randomly shuffles the elements of this dataset. + + This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer, + replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or + equal to the full size of the dataset is required. + + For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will + initially select a random element from only the first 1000 elements in the buffer. Once an element is + selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element, + maintaining the 1000 element buffer. + + If the dataset is made of several shards, it also does shuffle the order of the shards. + However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`] + then the order of the shards is kept unchanged. + + Args: + seed (`int`, *optional*, defaults to `None`): + Random seed that will be used to shuffle the dataset. + It is used to sample from the shuffle buffer and also to shuffle the data shards. + generator (`numpy.random.Generator`, *optional*): + Numpy random Generator to use to compute the permutation of the dataset rows. + If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). + buffer_size (`int`, defaults to `1000`): + Size of the buffer. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) + >>> list(ds.take(3)) + [{'label': 1, + 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, + {'label': 1, + 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, + {'label': 1, 'text': 'effective but too-tepid biopic'}] + >>> shuffled_ds = ds.shuffle(seed=42) + >>> list(shuffled_ds.take(3)) + [{'label': 1, + 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."}, + {'label': 1, + 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'}, + {'label': 1, + 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}] + ``` + """ + if generator is None: + generator = np.random.default_rng(seed) + else: + generator = deepcopy(generator) + shuffling = ShufflingConfig(generator=generator, _original_seed=seed) + return IterableDataset( + ex_iterable=BufferShuffledExamplesIterable( + self._ex_iterable, buffer_size=buffer_size, generator=generator + ).shuffle_data_sources(generator), + info=self._info.copy(), + split=self._split, + formatting=self._formatting, + shuffling=shuffling, + distributed=copy.deepcopy(self._distributed), + token_per_repo_id=self._token_per_repo_id, + ) + + def set_epoch(self, epoch: int): + self._epoch = epoch + + def skip(self, n) -> "IterableDataset": + """ + Create a new [`IterableDataset`] that skips the first `n` elements. + + Args: + n (`int`): + Number of elements to skip. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) + >>> list(ds.take(3)) + [{'label': 1, + 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, + {'label': 1, + 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, + {'label': 1, 'text': 'effective but too-tepid biopic'}] + >>> ds = ds.skip(1) + >>> list(ds.take(3)) + [{'label': 1, + 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, + {'label': 1, 'text': 'effective but too-tepid biopic'}, + {'label': 1, + 'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}] + ``` + """ + ex_iterable = SkipExamplesIterable(self._ex_iterable, n) + return IterableDataset( + ex_iterable=ex_iterable, + info=self._info.copy(), + split=self._split, + formatting=self._formatting, + shuffling=copy.deepcopy(self._shuffling), + distributed=copy.deepcopy(self._distributed), + token_per_repo_id=self._token_per_repo_id, + ) + + def take(self, n) -> "IterableDataset": + """ + Create a new [`IterableDataset`] with only the first `n` elements. + + Args: + n (`int`): + Number of elements to take. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) + >>> small_ds = ds.take(2) + >>> list(small_ds) + [{'label': 1, + 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, + {'label': 1, + 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}] + ``` + """ + ex_iterable = TakeExamplesIterable(self._ex_iterable, n) + return IterableDataset( + ex_iterable=ex_iterable, + info=self._info.copy(), + split=self._split, + formatting=self._formatting, + shuffling=copy.deepcopy(self._shuffling), + distributed=copy.deepcopy(self._distributed), + token_per_repo_id=self._token_per_repo_id, + ) + + @property + def column_names(self) -> Optional[List[str]]: + """Names of the columns in the dataset. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True) + >>> ds.column_names + ['text', 'label'] + ``` + """ + return list(self._info.features.keys()) if self._info.features is not None else None + + def add_column(self, name: str, column: Union[list, np.array]) -> "IterableDataset": + """Add column to Dataset. + + Args: + name (str): Column name. + column (list or np.array): Column data to be added. + + Returns: + `IterableDataset` + """ + return self.map(partial(add_column_fn, name=name, column=column), with_indices=True) + + def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset": + """ + Rename a column in the dataset, and move the features associated to the original column under the new column + name. + + Args: + original_column_name (`str`): + Name of the column to rename. + new_column_name (`str`): + New name for the column. + + Returns: + `IterableDataset`: A copy of the dataset with a renamed column. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) + >>> next(iter(ds)) + {'label': 1, + 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} + >>> ds = ds.rename_column("text", "movie_review") + >>> next(iter(ds)) + {'label': 1, + 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} + ``` + """ + return self.rename_columns({original_column_name: new_column_name}) + + def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDataset": + """ + Rename several columns in the dataset, and move the features associated to the original columns under + the new column names. + + Args: + column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names + + Returns: + `IterableDataset`: A copy of the dataset with renamed columns + """ + + original_features = self._info.features.copy() if self._info.features else None + ds_iterable = self.map( + partial(_rename_columns_fn, column_mapping=column_mapping), remove_columns=list(column_mapping) + ) + if original_features is not None: + ds_iterable._info.features = Features( + { + column_mapping[col] if col in column_mapping.keys() else col: feature + for col, feature in original_features.items() + } + ) + # check that it's still valid, especially with regard to task templates + try: + ds_iterable._info.copy() + except ValueError: + ds_iterable._info.task_templates = None + return ds_iterable + + def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": + """ + Remove one or several column(s) in the dataset and the features associated to them. + The removal is done on-the-fly on the examples when iterating over the dataset. + + + Args: + column_names (`Union[str, List[str]]`): + Name of the column(s) to remove. + + Returns: + `IterableDataset`: A copy of the dataset object without the columns to remove. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) + >>> next(iter(ds)) + {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} + >>> ds = ds.remove_columns("label") + >>> next(iter(ds)) + {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} + ``` + """ + original_features = self._info.features.copy() if self._info.features else None + ds_iterable = self.map(remove_columns=column_names) + if original_features is not None: + ds_iterable._info.features = original_features.copy() + for col, _ in original_features.items(): + if col in column_names: + del ds_iterable._info.features[col] + # check that it's still valid, especially with regard to task templates + try: + ds_iterable._info.copy() + except ValueError: + ds_iterable._info.task_templates = None + + return ds_iterable + + def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": + """Select one or several column(s) in the dataset and the features + associated to them. The selection is done on-the-fly on the examples + when iterating over the dataset. + + + Args: + column_names (`Union[str, List[str]]`): + Name of the column(s) to select. + + Returns: + `IterableDataset`: A copy of the dataset object with selected columns. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) + >>> next(iter(ds)) + {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} + >>> ds = ds.select_columns("text") + >>> next(iter(ds)) + {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} + ``` + """ + if isinstance(column_names, str): + column_names = [column_names] + + if self._info: + info = copy.deepcopy(self._info) + if self._info.features is not None: + for column_name in column_names: + if column_name not in self._info.features: + raise ValueError( + f"Column name {column_name} not in the " + "dataset. Columns in the dataset: " + f"{list(self._info.features.keys())}." + ) + info.features = Features({c: info.features[c] for c in column_names}) + # check that it's still valid, especially with regard to task templates + try: + info.copy() + except ValueError: + info.task_templates = None + + ex_iterable = SelectColumnsIterable(self._ex_iterable, column_names) + return IterableDataset( + ex_iterable=ex_iterable, + info=info, + split=self._split, + formatting=self._formatting, + shuffling=self._shuffling, + distributed=self._distributed, + token_per_repo_id=self._token_per_repo_id, + ) + + def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset": + """Cast column to feature for decoding. + + Args: + column (`str`): + Column name. + feature (`Feature`): + Target feature. + + Returns: + `IterableDataset` + + Example: + + ```py + >>> from datasets import load_dataset, Audio + >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train", streaming=True) + >>> ds.features + {'audio': Audio(sampling_rate=8000, mono=True, decode=True, id=None), + 'english_transcription': Value(dtype='string', id=None), + 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), + 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), + 'path': Value(dtype='string', id=None), + 'transcription': Value(dtype='string', id=None)} + >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000)) + >>> ds.features + {'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None), + 'english_transcription': Value(dtype='string', id=None), + 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), + 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), + 'path': Value(dtype='string', id=None), + 'transcription': Value(dtype='string', id=None)} + ``` + """ + info = self._info.copy() + info.features[column] = feature + # check that it's still valid, especially with regard to task templates + try: + info.copy() + except ValueError: + info.task_templates = None + return IterableDataset( + ex_iterable=self._ex_iterable, + info=info, + split=self._split, + formatting=self._formatting, + shuffling=copy.deepcopy(self._shuffling), + distributed=copy.deepcopy(self._distributed), + token_per_repo_id=self._token_per_repo_id, + ) + + def cast( + self, + features: Features, + ) -> "IterableDataset": + """ + Cast the dataset to a new set of features. + + Args: + features ([`Features`]): + New features to cast the dataset to. + The name of the fields in the features must match the current column names. + The type of the data must also be convertible from one type to the other. + For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset. + + Returns: + `IterableDataset`: A copy of the dataset with casted features. + + Example: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) + >>> ds.features + {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), + 'text': Value(dtype='string', id=None)} + >>> new_features = ds.features.copy() + >>> new_features["label"] = ClassLabel(names=["bad", "good"]) + >>> new_features["text"] = Value("large_string") + >>> ds = ds.cast(new_features) + >>> ds.features + {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None), + 'text': Value(dtype='large_string', id=None)} + ``` + """ + info = self._info.copy() + info.features = features + # check that it's still valid, especially with regard to task templates + try: + info.copy() + except ValueError: + info.task_templates = None + return IterableDataset( + ex_iterable=self._ex_iterable, + info=info, + split=self._split, + formatting=self._formatting, + shuffling=copy.deepcopy(self._shuffling), + distributed=copy.deepcopy(self._distributed), + token_per_repo_id=self._token_per_repo_id, + ) + + def _step(self, step: int, offset: int) -> "IterableDataset": + ex_iterable = StepExamplesIterable(self._ex_iterable, step=step, offset=offset) + return IterableDataset( + ex_iterable=ex_iterable, + info=self._info.copy(), + split=self._split, + formatting=self._formatting, + shuffling=copy.deepcopy(self._shuffling), + distributed=copy.deepcopy(self._distributed), + token_per_repo_id=self._token_per_repo_id, + ) + + def _resolve_features(self): + if self.features is not None: + return self + elif isinstance(self._ex_iterable, TypedExamplesIterable): + features = self._ex_iterable.features + else: + features = _infer_features_from_batch(self.with_format(None)._head()) + info = self.info.copy() + info.features = features + return IterableDataset( + ex_iterable=self._ex_iterable, + info=info, + split=self._split, + formatting=self._formatting, + shuffling=copy.deepcopy(self._shuffling), + distributed=copy.deepcopy(self._distributed), + token_per_repo_id=self._token_per_repo_id, + ) + + +def _concatenate_iterable_datasets( + dsets: List[IterableDataset], + info: Optional[DatasetInfo] = None, + split: Optional[NamedSplit] = None, + axis: int = 0, +) -> IterableDataset: + """ + Converts a list of `IterableDataset` with the same schema into a single `IterableDataset`. + Missing data are filled with None values. + + + + Args: + dsets (`List[datasets.IterableDataset]`): List of Datasets to concatenate. + info (`DatasetInfo`, optional): Dataset information, like description, citation, etc. + split (`NamedSplit`, optional): Name of the dataset split. + axis (``{0, 1}``, default ``0``, meaning over rows): + Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns + (horizontally). + + *New in version 1.6.0* + + Example: + + ```py + >>> ds3 = _concatenate_iterable_datasets([ds1, ds2]) + ``` + """ + dsets = [d._resolve_features() for d in dsets] + + # Perform checks (and a potentional cast if axis=0) + if axis == 0: + _check_if_features_can_be_aligned([dset.features for dset in dsets]) + else: + _check_column_names([col_name for dset in dsets for col_name in dset.features]) + + # TODO: improve this to account for a mix of ClassLabel and Value for example + # right now it would keep the type of the first dataset in the list + features = Features( + {k: v for features in _align_features([dset.features for dset in dsets]) for k, v in features.items()} + ) + + ex_iterables = [d._ex_iterable for d in dsets] + if axis == 0: + ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables) + else: + ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable(ex_iterables) + # Set new info - we update the features + # setting the features also ensures to fill missing columns with None + if info is None: + info = DatasetInfo.from_merge([d.info for d in dsets]) + else: + info = info.copy() + info.features = features + # Get all the auth tokens per repository - in case the datasets come from different private repositories + token_per_repo_id = {repo_id: token for dataset in dsets for repo_id, token in dataset._token_per_repo_id.items()} + # Return new daset + return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id) + + +def _interleave_iterable_datasets( + datasets: List[IterableDataset], + probabilities: Optional[List[float]] = None, + seed: Optional[int] = None, + info: Optional[DatasetInfo] = None, + split: Optional[NamedSplit] = None, + stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", +) -> IterableDataset: + """ + Interleave several iterable datasets (sources) into a single iterable dataset. + The new iterable dataset alternates between the sources to yield examples. + If `probabilities = None` (default) the iterable dataset will cycles through the sources in order for each next example in the iteration. + If `probabilities` is not `None, the iterable dataset will sample a random source according to the provided probabilities for each next examples in the iteration. + + + + Args: + datasets (`List[IterableDataset]`): list of datasets to interleave + probabilities (`List[float]`, optional, default None): If specified, the new iterable dataset samples + examples from one source at a time according to these probabilities. + seed (`int`, optional, default None): The random seed used to choose a source for each example. + stopping_strategy (`str`, defaults to `first_exhausted`): + Two strategies are proposed right now. + By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples. + If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once. + Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous: + - with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples. + - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting. + + Output: + `datasets.IterableDataset` + """ + datasets = [d._resolve_features() for d in datasets] + + # Perform checks + _check_if_features_can_be_aligned([dset.features for dset in datasets]) + + # TODO: improve this to account for a mix of ClassLabel and Value for example + # right now it would keep the type of the first dataset in the list + features = Features( + {k: v for features in _align_features([dset.features for dset in datasets]) for k, v in features.items()} + ) + + ex_iterables = [d._ex_iterable for d in datasets] + + # Use cycling or random cycling of sources + if probabilities is None: + ex_iterable = CyclingMultiSourcesExamplesIterable(ex_iterables, stopping_strategy=stopping_strategy) + else: + generator = np.random.default_rng(seed) + ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable( + ex_iterables, generator=generator, probabilities=probabilities, stopping_strategy=stopping_strategy + ) + # Set new info - we update the features + # setting the features also ensures to fill missing columns with None + if info is None: + info = DatasetInfo.from_merge([d.info for d in datasets]) + else: + info = info.copy() + info.features = features + # Get all the auth tokens per repository - in case the datasets come from different private repositories + token_per_repo_id = { + repo_id: token for dataset in datasets for repo_id, token in dataset._token_per_repo_id.items() + } + # Return new daset + return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id) + + +def _split_by_node_iterable_dataset(dataset: IterableDataset, rank: int, world_size: int) -> IterableDataset: + """ + Split an iterable dataset for the node at rank `rank` in a pool of nodes of size `world_size`. + + If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`), + then the shards are evenly assigned across the nodes, which is the most optimized. + Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples. + + Args: + dataset ([`IterableDataset`]): + The iterable dataset to split by node. + rank (`int`): + Rank of the current node. + world_size (`int`): + Total number of nodes. + + Returns: + [`IterableDataset`]: The iterable dataset to be used on the node at rank `rank`. + """ + if dataset._distributed: + world_size = world_size * dataset._distributed.world_size + rank = world_size * dataset._distributed.rank + rank + distributed = DistributedConfig(rank=rank, world_size=world_size) + return IterableDataset( + ex_iterable=dataset._ex_iterable, + info=dataset._info.copy(), + split=dataset._split, + formatting=dataset._formatting, + shuffling=copy.deepcopy(dataset._shuffling), + distributed=distributed, + token_per_repo_id=dataset._token_per_repo_id, + ) diff --git a/testbed/huggingface__datasets/src/datasets/keyhash.py b/testbed/huggingface__datasets/src/datasets/keyhash.py new file mode 100644 index 0000000000000000000000000000000000000000..3c75fcfd7ffb300aac1ffd0fc822287f21b56f8a --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/keyhash.py @@ -0,0 +1,104 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 + +""" +Hashing function for dataset keys using `hashlib.md5` + +Requirements for the hash function: + +- Provides a uniformly distributed hash from random space +- Adequately fast speed +- Working with multiple input types (in this case, `str`, `int` or `bytes`) +- Should be platform independent (generates same hash on different OS and systems) + +The hashing function provides a unique 128-bit integer hash of the key provided. + +The split name is being used here as the hash salt to avoid having same hashes +in different splits due to same keys +""" + +from typing import Union + +from huggingface_hub.utils import insecure_hashlib + + +def _as_bytes(hash_data: Union[str, int, bytes]) -> bytes: + """ + Returns the input hash_data in its bytes form + + Args: + hash_data: the hash salt/key to be converted to bytes + """ + if isinstance(hash_data, bytes): + # Data already in bytes, returns as it as + return hash_data + elif isinstance(hash_data, str): + # We keep the data as it as for it ot be later encoded to UTF-8 + # However replace `\\` with `/` for Windows compatibility + hash_data = hash_data.replace("\\", "/") + elif isinstance(hash_data, int): + hash_data = str(hash_data) + else: + # If data is not of the required type, raise error + raise InvalidKeyError(hash_data) + + return hash_data.encode("utf-8") + + +class InvalidKeyError(Exception): + """Raises an error when given key is of invalid datatype.""" + + def __init__(self, hash_data): + self.prefix = "\nFAILURE TO GENERATE DATASET: Invalid key type detected" + self.err_msg = f"\nFound Key {hash_data} of type {type(hash_data)}" + self.suffix = "\nKeys should be either str, int or bytes type" + super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}") + + +class DuplicatedKeysError(Exception): + """Raise an error when duplicate key found.""" + + def __init__(self, key, duplicate_key_indices, fix_msg=""): + self.key = key + self.duplicate_key_indices = duplicate_key_indices + self.fix_msg = fix_msg + self.prefix = "Found multiple examples generated with the same key" + if len(duplicate_key_indices) <= 20: + self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices)} have the key {key}" + else: + self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices[:20])}... ({len(duplicate_key_indices) - 20} more) have the key {key}" + self.suffix = "\n" + fix_msg if fix_msg else "" + super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}") + + +class KeyHasher: + """KeyHasher class for providing hash using md5""" + + def __init__(self, hash_salt: str): + self._split_md5 = insecure_hashlib.md5(_as_bytes(hash_salt)) + + def hash(self, key: Union[str, int, bytes]) -> int: + """Returns 128-bits unique hash of input key + + Args: + key: the input key to be hashed (should be str, int or bytes) + + Returns: 128-bit int hash key""" + md5 = self._split_md5.copy() + byte_key = _as_bytes(key) + md5.update(byte_key) + # Convert to integer with hexadecimal conversion + return int(md5.hexdigest(), 16) diff --git a/testbed/huggingface__datasets/src/datasets/load.py b/testbed/huggingface__datasets/src/datasets/load.py new file mode 100644 index 0000000000000000000000000000000000000000..8bdd580006f803dbb81d42c67a262cb70f4fde93 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/load.py @@ -0,0 +1,2242 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Access datasets.""" +import filecmp +import importlib +import inspect +import json +import os +import posixpath +import shutil +import time +import warnings +from collections import Counter +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union + +import fsspec +import requests +from huggingface_hub import DatasetCard, DatasetCardData, HfApi + +from . import config +from .arrow_dataset import Dataset +from .builder import BuilderConfig, DatasetBuilder +from .data_files import ( + DEFAULT_PATTERNS_ALL, + DataFilesDict, + DataFilesList, + EmptyDatasetError, + get_data_patterns, + get_metadata_patterns, + sanitize_patterns, +) +from .dataset_dict import DatasetDict, IterableDatasetDict +from .download.download_config import DownloadConfig +from .download.download_manager import DownloadMode +from .download.streaming_download_manager import StreamingDownloadManager, xbasename, xglob, xjoin +from .exceptions import DataFilesNotFoundError, DatasetNotFoundError +from .features import Features +from .fingerprint import Hasher +from .info import DatasetInfo, DatasetInfosDict +from .iterable_dataset import IterableDataset +from .metric import Metric +from .naming import camelcase_to_snakecase, snakecase_to_camelcase +from .packaged_modules import ( + _EXTENSION_TO_MODULE, + _MODULE_SUPPORTS_METADATA, + _MODULE_TO_EXTENSIONS, + _PACKAGED_DATASETS_MODULES, + _hash_python_lines, +) +from .splits import Split +from .utils._filelock import FileLock +from .utils.deprecation_utils import deprecated +from .utils.file_utils import ( + OfflineModeIsEnabled, + _raise_if_offline_mode_is_enabled, + cached_path, + head_hf_s3, + hf_github_url, + init_hf_modules, + is_relative_path, + relative_to_absolute_path, + url_or_path_join, +) +from .utils.hub import hf_hub_url +from .utils.info_utils import VerificationMode, is_small_dataset +from .utils.logging import get_logger +from .utils.metadata import MetadataConfigs +from .utils.py_utils import get_imports +from .utils.version import Version + + +logger = get_logger(__name__) + +ALL_ALLOWED_EXTENSIONS = list(_EXTENSION_TO_MODULE.keys()) + [".zip"] + + +def init_dynamic_modules( + name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None +): + """ + Create a module with name `name` in which you can add dynamic modules + such as metrics or datasets. The module can be imported using its name. + The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can + be overridden by specifying a path to another directory in `hf_modules_cache`. + """ + hf_modules_cache = init_hf_modules(hf_modules_cache) + dynamic_modules_path = os.path.join(hf_modules_cache, name) + os.makedirs(dynamic_modules_path, exist_ok=True) + if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")): + with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"): + pass + return dynamic_modules_path + + +def import_main_class(module_path, dataset=True) -> Optional[Union[Type[DatasetBuilder], Type[Metric]]]: + """Import a module at module_path and return its main class: + - a DatasetBuilder if dataset is True + - a Metric if dataset is False + """ + module = importlib.import_module(module_path) + + if dataset: + main_cls_type = DatasetBuilder + else: + main_cls_type = Metric + + # Find the main class in our imported module + module_main_cls = None + for name, obj in module.__dict__.items(): + if inspect.isclass(obj) and issubclass(obj, main_cls_type): + if inspect.isabstract(obj): + continue + module_main_cls = obj + obj_module = inspect.getmodule(obj) + if obj_module is not None and module == obj_module: + break + + return module_main_cls + + +class _InitializeConfiguredDatasetBuilder: + """ + From https://stackoverflow.com/questions/4647566/pickle-a-dynamically-parameterized-sub-class + See also ConfiguredDatasetBuilder.__reduce__ + When called with the param value as the only argument, returns an + un-initialized instance of the parameterized class. Subsequent __setstate__ + will be called by pickle. + """ + + def __call__(self, builder_cls, metadata_configs, default_config_name, name): + # make a simple object which has no complex __init__ (this one will do) + obj = _InitializeConfiguredDatasetBuilder() + obj.__class__ = configure_builder_class( + builder_cls, metadata_configs, default_config_name=default_config_name, dataset_name=name + ) + return obj + + +def configure_builder_class( + builder_cls: Type[DatasetBuilder], + builder_configs: List[BuilderConfig], + default_config_name: Optional[str], + dataset_name: str, +) -> Type[DatasetBuilder]: + """ + Dynamically create a builder class with custom builder configs parsed from README.md file, + i.e. set BUILDER_CONFIGS class variable of a builder class to custom configs list. + """ + + class ConfiguredDatasetBuilder(builder_cls): + BUILDER_CONFIGS = builder_configs + DEFAULT_CONFIG_NAME = default_config_name + + __module__ = builder_cls.__module__ # so that the actual packaged builder can be imported + + def __reduce__(self): # to make dynamically created class pickable, see _InitializeParameterizedDatasetBuilder + parent_builder_cls = self.__class__.__mro__[1] + return ( + _InitializeConfiguredDatasetBuilder(), + ( + parent_builder_cls, + self.BUILDER_CONFIGS, + self.DEFAULT_CONFIG_NAME, + self.dataset_name, + ), + self.__dict__.copy(), + ) + + ConfiguredDatasetBuilder.__name__ = ( + f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}" + ) + ConfiguredDatasetBuilder.__qualname__ = ( + f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}" + ) + + return ConfiguredDatasetBuilder + + +def get_dataset_builder_class( + dataset_module: "DatasetModule", dataset_name: Optional[str] = None +) -> Type[DatasetBuilder]: + builder_cls = import_main_class(dataset_module.module_path) + if dataset_module.builder_configs_parameters.builder_configs: + builder_cls = configure_builder_class( + builder_cls, + builder_configs=dataset_module.builder_configs_parameters.builder_configs, + default_config_name=dataset_module.builder_configs_parameters.default_config_name, + dataset_name=dataset_name, + ) + return builder_cls + + +def files_to_hash(file_paths: List[str]) -> str: + """ + Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way. + """ + # List all python files in directories if directories are supplied as part of external imports + to_use_files: List[Union[Path, str]] = [] + for file_path in file_paths: + if os.path.isdir(file_path): + to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]"))) + else: + to_use_files.append(file_path) + + # Get the code from all these files + lines = [] + for file_path in to_use_files: + with open(file_path, encoding="utf-8") as f: + lines.extend(f.readlines()) + return _hash_python_lines(lines) + + +def increase_load_count(name: str, resource_type: str): + """Update the download count of a dataset or metric.""" + if not config.HF_DATASETS_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS: + try: + head_hf_s3(name, filename=name + ".py", dataset=(resource_type == "dataset")) + except Exception: + pass + + +def _download_additional_modules( + name: str, base_path: str, imports: Tuple[str, str, str, str], download_config: Optional[DownloadConfig] +) -> List[Tuple[str, str]]: + """ + Download additional module for a module .py at URL (or local path) /.py + The imports must have been parsed first using ``get_imports``. + + If some modules need to be installed with pip, an error is raised showing how to install them. + This function return the list of downloaded modules as tuples (import_name, module_file_path). + + The downloaded modules can then be moved into an importable directory with ``_copy_script_and_other_resources_in_importable_dir``. + """ + local_imports = [] + library_imports = [] + download_config = download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = "Downloading extra modules" + for import_type, import_name, import_path, sub_directory in imports: + if import_type == "library": + library_imports.append((import_name, import_path)) # Import from a library + continue + + if import_name == name: + raise ValueError( + f"Error in the {name} script, importing relative {import_name} module " + f"but {import_name} is the name of the script. " + f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' " + f"comment pointing to the original relative import file path." + ) + if import_type == "internal": + url_or_filename = url_or_path_join(base_path, import_path + ".py") + elif import_type == "external": + url_or_filename = import_path + else: + raise ValueError("Wrong import_type") + + local_import_path = cached_path( + url_or_filename, + download_config=download_config, + ) + if sub_directory is not None: + local_import_path = os.path.join(local_import_path, sub_directory) + local_imports.append((import_name, local_import_path)) + + # Check library imports + needs_to_be_installed = {} + for library_import_name, library_import_path in library_imports: + try: + lib = importlib.import_module(library_import_name) # noqa F841 + except ImportError: + if library_import_name not in needs_to_be_installed or library_import_path != library_import_name: + needs_to_be_installed[library_import_name] = library_import_path + if needs_to_be_installed: + _dependencies_str = "dependencies" if len(needs_to_be_installed) > 1 else "dependency" + _them_str = "them" if len(needs_to_be_installed) > 1 else "it" + if "sklearn" in needs_to_be_installed.keys(): + needs_to_be_installed["sklearn"] = "scikit-learn" + raise ImportError( + f"To be able to use {name}, you need to install the following {_dependencies_str}: " + f"{', '.join(needs_to_be_installed)}.\nPlease install {_them_str} using 'pip install " + f"{' '.join(needs_to_be_installed.values())}' for instance." + ) + return local_imports + + +def _copy_script_and_other_resources_in_importable_dir( + name: str, + importable_directory_path: str, + subdirectory_name: str, + original_local_path: str, + local_imports: List[Tuple[str, str]], + additional_files: List[Tuple[str, str]], + download_mode: Optional[Union[DownloadMode, str]], +) -> str: + """Copy a script and its required imports to an importable directory + + Args: + name (str): name of the resource to load + importable_directory_path (str): path to the loadable folder in the dynamic modules directory + subdirectory_name (str): name of the subdirectory in importable_directory_path in which to place the script + original_local_path (str): local path to the resource script + local_imports (List[Tuple[str, str]]): list of (destination_filename, import_file_to_copy) + additional_files (List[Tuple[str, str]]): list of (destination_filename, additional_file_to_copy) + download_mode (Optional[Union[DownloadMode, str]]): download mode + + Return: + importable_local_file: path to an importable module with importlib.import_module + """ + + # Define a directory with a unique name in our dataset or metric folder + # path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py + # we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together + importable_subdirectory = os.path.join(importable_directory_path, subdirectory_name) + importable_local_file = os.path.join(importable_subdirectory, name + ".py") + # Prevent parallel disk operations + lock_path = importable_directory_path + ".lock" + with FileLock(lock_path): + # Create main dataset/metrics folder if needed + if download_mode == DownloadMode.FORCE_REDOWNLOAD and os.path.exists(importable_directory_path): + shutil.rmtree(importable_directory_path) + os.makedirs(importable_directory_path, exist_ok=True) + + # add an __init__ file to the main dataset folder if needed + init_file_path = os.path.join(importable_directory_path, "__init__.py") + if not os.path.exists(init_file_path): + with open(init_file_path, "w"): + pass + + # Create hash dataset folder if needed + os.makedirs(importable_subdirectory, exist_ok=True) + # add an __init__ file to the hash dataset folder if needed + init_file_path = os.path.join(importable_subdirectory, "__init__.py") + if not os.path.exists(init_file_path): + with open(init_file_path, "w"): + pass + + # Copy dataset.py file in hash folder if needed + if not os.path.exists(importable_local_file): + shutil.copyfile(original_local_path, importable_local_file) + # Record metadata associating original dataset path with local unique folder + # Use os.path.splitext to split extension from importable_local_file + meta_path = os.path.splitext(importable_local_file)[0] + ".json" + if not os.path.exists(meta_path): + meta = {"original file path": original_local_path, "local file path": importable_local_file} + # the filename is *.py in our case, so better rename to filename.json instead of filename.py.json + with open(meta_path, "w", encoding="utf-8") as meta_file: + json.dump(meta, meta_file) + + # Copy all the additional imports + for import_name, import_path in local_imports: + if os.path.isfile(import_path): + full_path_local_import = os.path.join(importable_subdirectory, import_name + ".py") + if not os.path.exists(full_path_local_import): + shutil.copyfile(import_path, full_path_local_import) + elif os.path.isdir(import_path): + full_path_local_import = os.path.join(importable_subdirectory, import_name) + if not os.path.exists(full_path_local_import): + shutil.copytree(import_path, full_path_local_import) + else: + raise ImportError(f"Error with local import at {import_path}") + + # Copy additional files like dataset_infos.json file if needed + for file_name, original_path in additional_files: + destination_additional_path = os.path.join(importable_subdirectory, file_name) + if not os.path.exists(destination_additional_path) or not filecmp.cmp( + original_path, destination_additional_path + ): + shutil.copyfile(original_path, destination_additional_path) + return importable_local_file + + +def _create_importable_file( + local_path: str, + local_imports: List[Tuple[str, str]], + additional_files: List[Tuple[str, str]], + dynamic_modules_path: str, + module_namespace: str, + name: str, + download_mode: DownloadMode, +) -> Tuple[str, str]: + importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--")) + Path(importable_directory_path).mkdir(parents=True, exist_ok=True) + (Path(importable_directory_path).parent / "__init__.py").touch(exist_ok=True) + hash = files_to_hash([local_path] + [loc[1] for loc in local_imports]) + importable_local_file = _copy_script_and_other_resources_in_importable_dir( + name=name.split("/")[-1], + importable_directory_path=importable_directory_path, + subdirectory_name=hash, + original_local_path=local_path, + local_imports=local_imports, + additional_files=additional_files, + download_mode=download_mode, + ) + logger.debug(f"Created importable dataset file at {importable_local_file}") + module_path = ".".join( + [os.path.basename(dynamic_modules_path), module_namespace, name.replace("/", "--"), hash, name.split("/")[-1]] + ) + return module_path, hash + + +def infer_module_for_data_files_list( + data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None +) -> Optional[Tuple[str, str]]: + """Infer module (and builder kwargs) from list of data files. + + It picks the module based on the most common file extension. + In case of a draw ".parquet" is the favorite, and then alphabetical order. + + Args: + data_files_list (DataFilesList): List of data files. + download_config (bool or str, optional): mainly use use_auth_token or storage_options to support different platforms and auth types. + + Returns: + tuple[str, dict[str, Any]]: Tuple with + - inferred module name + - dict of builder kwargs + """ + extensions_counter = Counter( + "." + suffix.lower() + for filepath in data_files_list[: config.DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE] + for suffix in xbasename(filepath).split(".")[1:] + ) + if extensions_counter: + + def sort_key(ext_count: Tuple[str, int]) -> Tuple[int, bool]: + """Sort by count and set ".parquet" as the favorite in case of a draw""" + ext, count = ext_count + return (count, ext == ".parquet", ext) + + for ext, _ in sorted(extensions_counter.items(), key=sort_key, reverse=True): + if ext in _EXTENSION_TO_MODULE: + return _EXTENSION_TO_MODULE[ext] + elif ext == ".zip": + return infer_module_for_data_files_list_in_archives(data_files_list, download_config=download_config) + return None, {} + + +def infer_module_for_data_files_list_in_archives( + data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None +) -> Optional[Tuple[str, str]]: + """Infer module (and builder kwargs) from list of archive data files. + + Args: + data_files_list (DataFilesList): List of data files. + download_config (bool or str, optional): mainly use use_auth_token or storage_options to support different platforms and auth types. + + Returns: + tuple[str, dict[str, Any]]: Tuple with + - inferred module name + - dict of builder kwargs + """ + archived_files = [] + archive_files_counter = 0 + for filepath in data_files_list: + if str(filepath).endswith(".zip"): + archive_files_counter += 1 + if archive_files_counter > config.GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE: + break + extracted = xjoin(StreamingDownloadManager().extract(filepath), "**") + archived_files += [ + f.split("::")[0] + for f in xglob(extracted, recursive=True, download_config=download_config)[ + : config.ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE + ] + ] + extensions_counter = Counter( + "." + suffix.lower() for filepath in archived_files for suffix in xbasename(filepath).split(".")[1:] + ) + if extensions_counter: + most_common = extensions_counter.most_common(1)[0][0] + if most_common in _EXTENSION_TO_MODULE: + return _EXTENSION_TO_MODULE[most_common] + return None, {} + + +def infer_module_for_data_files( + data_files: DataFilesDict, path: Optional[str] = None, download_config: Optional[DownloadConfig] = None +) -> Tuple[Optional[str], Dict[str, Any]]: + """Infer module (and builder kwargs) from data files. Raise if module names for different splits don't match. + + Args: + data_files ([`DataFilesDict`]): Dict of list of data files. + path (str, *optional*): Dataset name or path. + download_config ([`DownloadConfig`], *optional*): + Specific download configuration parameters to authenticate on the Hugging Face Hub for private remote files. + + Returns: + tuple[str, dict[str, Any]]: Tuple with + - inferred module name + - builder kwargs + """ + split_modules = { + split: infer_module_for_data_files_list(data_files_list, download_config=download_config) + for split, data_files_list in data_files.items() + } + module_name, default_builder_kwargs = next(iter(split_modules.values())) + if any((module_name, default_builder_kwargs) != split_module for split_module in split_modules.values()): + raise ValueError(f"Couldn't infer the same data file format for all splits. Got {split_modules}") + if not module_name: + raise DataFilesNotFoundError("No (supported) data files found" + (f" in {path}" if path else "")) + return module_name, default_builder_kwargs + + +def update_hash_with_config_parameters(hash: str, config_parameters: dict) -> str: + """ + Used to update hash of packaged modules which is used for creating unique cache directories to reflect + different config parameters which are passed in metadata from readme. + """ + params_to_exclude = {"config_name", "version", "description"} + params_to_add_to_hash = { + param: value for param, value in sorted(config_parameters.items()) if param not in params_to_exclude + } + m = Hasher() + m.update(hash) + m.update(params_to_add_to_hash) + return m.hexdigest() + + +def create_builder_configs_from_metadata_configs( + module_path: str, + metadata_configs: MetadataConfigs, + supports_metadata: bool, + base_path: Optional[str] = None, + default_builder_kwargs: Dict[str, Any] = None, + download_config: Optional[DownloadConfig] = None, +) -> Tuple[List[BuilderConfig], str]: + builder_cls = import_main_class(module_path) + builder_config_cls = builder_cls.BUILDER_CONFIG_CLASS + default_config_name = metadata_configs.get_default_config_name() + builder_configs = [] + + base_path = base_path if base_path is not None else "" + for config_name, config_params in metadata_configs.items(): + config_data_files = config_params.get("data_files") + config_data_dir = config_params.get("data_dir") + config_base_path = base_path + "/" + config_data_dir if config_data_dir else base_path + try: + config_patterns = ( + sanitize_patterns(config_data_files) + if config_data_files is not None + else get_data_patterns(config_base_path) + ) + config_data_files_dict = DataFilesDict.from_patterns( + config_patterns, + base_path=config_base_path, + allowed_extensions=ALL_ALLOWED_EXTENSIONS, + download_config=download_config, + ) + except EmptyDatasetError as e: + raise EmptyDatasetError( + f"Dataset at '{base_path}' doesn't contain data files matching the patterns for config '{config_name}'," + f" check `data_files` and `data_fir` parameters in the `configs` YAML field in README.md. " + ) from e + if config_data_files is None and supports_metadata and config_patterns != DEFAULT_PATTERNS_ALL: + try: + config_metadata_patterns = get_metadata_patterns(base_path) + except FileNotFoundError: + config_metadata_patterns = None + if config_metadata_patterns is not None: + config_metadata_data_files_list = DataFilesList.from_patterns( + config_metadata_patterns, base_path=base_path + ) + if config_metadata_data_files_list: + config_data_files_dict = DataFilesDict( + { + split: data_files_list + config_metadata_data_files_list + for split, data_files_list in config_data_files_dict.items() + } + ) + ignored_params = [ + param for param in config_params if not hasattr(builder_config_cls, param) and param != "default" + ] + if ignored_params: + logger.warning( + f"Some datasets params were ignored: {ignored_params}. " + "Make sure to use only valid params for the dataset builder and to have " + "a up-to-date version of the `datasets` library." + ) + builder_configs.append( + builder_config_cls( + name=config_name, + data_files=config_data_files_dict, + data_dir=config_data_dir, + **{ + param: value + for param, value in {**default_builder_kwargs, **config_params}.items() + if hasattr(builder_config_cls, param) and param not in ("default", "data_files", "data_dir") + }, + ) + ) + return builder_configs, default_config_name + + +@dataclass +class BuilderConfigsParameters: + """Dataclass containing objects related to creation of builder configurations from yaml's metadata content. + + Attributes: + metadata_configs (`MetadataConfigs`, *optional*): + Configs parsed from yaml's metadata. + builder_configs (`list[BuilderConfig]`, *optional*): + List of BuilderConfig objects created from metadata_configs above. + default_config_name (`str`): + Name of default config taken from yaml's metadata. + """ + + metadata_configs: Optional[MetadataConfigs] = None + builder_configs: Optional[List[BuilderConfig]] = None + default_config_name: Optional[str] = None + + +@dataclass +class DatasetModule: + module_path: str + hash: str + builder_kwargs: dict + builder_configs_parameters: BuilderConfigsParameters = field(default_factory=BuilderConfigsParameters) + dataset_infos: Optional[DatasetInfosDict] = None + + +@dataclass +class MetricModule: + module_path: str + hash: str + + +class _DatasetModuleFactory: + def get_module(self) -> DatasetModule: + raise NotImplementedError + + +class _MetricModuleFactory: + def get_module(self) -> MetricModule: + raise NotImplementedError + + +class GithubMetricModuleFactory(_MetricModuleFactory): + """Get the module of a metric. The metric script is downloaded from GitHub. + + + + Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate + + + """ + + @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate") + def __init__( + self, + name: str, + revision: Optional[Union[str, Version]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + dynamic_modules_path: Optional[str] = None, + ): + self.name = name + self.revision = revision + self.download_config = download_config.copy() if download_config else DownloadConfig() + if self.download_config.max_retries < 3: + self.download_config.max_retries = 3 + self.download_mode = download_mode + self.dynamic_modules_path = dynamic_modules_path + assert self.name.count("/") == 0 + increase_load_count(name, resource_type="metric") + + def download_loading_script(self, revision: Optional[str]) -> str: + file_path = hf_github_url(path=self.name, name=self.name + ".py", revision=revision, dataset=False) + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = "Downloading builder script" + return cached_path(file_path, download_config=download_config) + + def get_module(self) -> MetricModule: + # get script and other files + revision = self.revision + try: + local_path = self.download_loading_script(revision) + revision = self.revision + except FileNotFoundError: + if revision is not None: + raise + else: + revision = "main" + local_path = self.download_loading_script(revision) + logger.warning( + f"Couldn't find a directory or a metric named '{self.name}' in this version. " + f"It was picked from the main branch on github instead." + ) + imports = get_imports(local_path) + local_imports = _download_additional_modules( + name=self.name, + base_path=hf_github_url(path=self.name, name="", revision=revision, dataset=False), + imports=imports, + download_config=self.download_config, + ) + # copy the script and the files in an importable directory + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + module_path, hash = _create_importable_file( + local_path=local_path, + local_imports=local_imports, + additional_files=[], + dynamic_modules_path=dynamic_modules_path, + module_namespace="metrics", + name=self.name, + download_mode=self.download_mode, + ) + # make the new module to be noticed by the import system + importlib.invalidate_caches() + return MetricModule(module_path, hash) + + +class LocalMetricModuleFactory(_MetricModuleFactory): + """Get the module of a local metric. The metric script is loaded from a local script. + + + + Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate + + + """ + + @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate") + def __init__( + self, + path: str, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + dynamic_modules_path: Optional[str] = None, + ): + self.path = path + self.name = Path(path).stem + self.download_config = download_config or DownloadConfig() + self.download_mode = download_mode + self.dynamic_modules_path = dynamic_modules_path + + def get_module(self) -> MetricModule: + # get script and other files + imports = get_imports(self.path) + local_imports = _download_additional_modules( + name=self.name, + base_path=str(Path(self.path).parent), + imports=imports, + download_config=self.download_config, + ) + # copy the script and the files in an importable directory + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + module_path, hash = _create_importable_file( + local_path=self.path, + local_imports=local_imports, + additional_files=[], + dynamic_modules_path=dynamic_modules_path, + module_namespace="metrics", + name=self.name, + download_mode=self.download_mode, + ) + # make the new module to be noticed by the import system + importlib.invalidate_caches() + return MetricModule(module_path, hash) + + +class LocalDatasetModuleFactoryWithScript(_DatasetModuleFactory): + """Get the module of a local dataset. The dataset script is loaded from a local script.""" + + def __init__( + self, + path: str, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + dynamic_modules_path: Optional[str] = None, + ): + self.path = path + self.name = Path(path).stem + self.download_config = download_config or DownloadConfig() + self.download_mode = download_mode + self.dynamic_modules_path = dynamic_modules_path + + def get_module(self) -> DatasetModule: + # get script and other files + dataset_infos_path = Path(self.path).parent / config.DATASETDICT_INFOS_FILENAME + dataset_readme_path = Path(self.path).parent / "README.md" + imports = get_imports(self.path) + local_imports = _download_additional_modules( + name=self.name, + base_path=str(Path(self.path).parent), + imports=imports, + download_config=self.download_config, + ) + additional_files = [] + if dataset_infos_path.is_file(): + additional_files.append((config.DATASETDICT_INFOS_FILENAME, str(dataset_infos_path))) + if dataset_readme_path.is_file(): + additional_files.append(("README.md", dataset_readme_path)) + # copy the script and the files in an importable directory + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + module_path, hash = _create_importable_file( + local_path=self.path, + local_imports=local_imports, + additional_files=additional_files, + dynamic_modules_path=dynamic_modules_path, + module_namespace="datasets", + name=self.name, + download_mode=self.download_mode, + ) + # make the new module to be noticed by the import system + importlib.invalidate_caches() + builder_kwargs = {"hash": hash, "base_path": str(Path(self.path).parent)} + return DatasetModule(module_path, hash, builder_kwargs) + + +class LocalDatasetModuleFactoryWithoutScript(_DatasetModuleFactory): + """Get the module of a dataset loaded from the user's data files. The dataset builder module to use is inferred + from the data files extensions.""" + + def __init__( + self, + path: str, + data_dir: Optional[str] = None, + data_files: Optional[Union[str, List, Dict]] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + ): + if data_dir and os.path.isabs(data_dir): + raise ValueError(f"`data_dir` must be relative to a dataset directory's root: {path}") + + self.path = Path(path).as_posix() + self.name = Path(path).stem + self.data_files = data_files + self.data_dir = data_dir + self.download_mode = download_mode + + def get_module(self) -> DatasetModule: + readme_path = os.path.join(self.path, "README.md") + dataset_card_data = DatasetCard.load(readme_path).data if os.path.isfile(readme_path) else DatasetCardData() + metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) + dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data) + # we need a set of data files to find which dataset builder to use + # because we need to infer module name by files extensions + base_path = Path(self.path, self.data_dir or "").expanduser().resolve().as_posix() + if self.data_files is not None: + patterns = sanitize_patterns(self.data_files) + elif metadata_configs and "data_files" in next(iter(metadata_configs.values())): + patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"]) + else: + patterns = get_data_patterns(base_path) + data_files = DataFilesDict.from_patterns( + patterns, + base_path=base_path, + allowed_extensions=ALL_ALLOWED_EXTENSIONS, + ) + module_name, default_builder_kwargs = infer_module_for_data_files( + data_files=data_files, + path=self.path, + ) + data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name]) + # Collect metadata files if the module supports them + supports_metadata = module_name in _MODULE_SUPPORTS_METADATA + if self.data_files is None and supports_metadata: + try: + metadata_patterns = get_metadata_patterns(base_path) + except FileNotFoundError: + metadata_patterns = None + if metadata_patterns is not None: + metadata_data_files_list = DataFilesList.from_patterns(metadata_patterns, base_path=base_path) + if metadata_data_files_list: + data_files = DataFilesDict( + { + split: data_files_list + metadata_data_files_list + for split, data_files_list in data_files.items() + } + ) + + module_path, hash = _PACKAGED_DATASETS_MODULES[module_name] + if metadata_configs: + builder_configs, default_config_name = create_builder_configs_from_metadata_configs( + module_path, + metadata_configs, + base_path=base_path, + supports_metadata=supports_metadata, + default_builder_kwargs=default_builder_kwargs, + ) + else: + builder_configs, default_config_name = None, None + builder_kwargs = { + "hash": hash, + "base_path": self.path, + "dataset_name": camelcase_to_snakecase(Path(self.path).name), + } + if self.data_files is not None or not metadata_configs: + builder_kwargs["data_files"] = data_files + builder_kwargs.update(default_builder_kwargs) # from _EXTENSION_TO_MODULE + # this file is deprecated and was created automatically in old versions of push_to_hub + if os.path.isfile(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME)): + with open(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f: + legacy_dataset_infos = DatasetInfosDict( + { + config_name: DatasetInfo.from_dict(dataset_info_dict) + for config_name, dataset_info_dict in json.load(f).items() + } + ) + if len(legacy_dataset_infos) == 1: + # old config e.g. named "username--dataset_name" + legacy_config_name = next(iter(legacy_dataset_infos)) + legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name) + legacy_dataset_infos.update(dataset_infos) + dataset_infos = legacy_dataset_infos + if default_config_name is None and len(dataset_infos) == 1: + default_config_name = next(iter(dataset_infos)) + + return DatasetModule( + module_path, + hash, + builder_kwargs, + dataset_infos=dataset_infos, + builder_configs_parameters=BuilderConfigsParameters( + metadata_configs=metadata_configs, + builder_configs=builder_configs, + default_config_name=default_config_name, + ), + ) + + +class PackagedDatasetModuleFactory(_DatasetModuleFactory): + """Get the dataset builder module from the ones that are packaged with the library: csv, json, etc.""" + + def __init__( + self, + name: str, + data_dir: Optional[str] = None, + data_files: Optional[Union[str, List, Dict]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + ): + self.name = name + self.data_files = data_files + self.data_dir = data_dir + self.download_config = download_config + self.download_mode = download_mode + increase_load_count(name, resource_type="dataset") + + def get_module(self) -> DatasetModule: + base_path = Path(self.data_dir or "").expanduser().resolve().as_posix() + patterns = sanitize_patterns(self.data_files) if self.data_files is not None else get_data_patterns(base_path) + data_files = DataFilesDict.from_patterns( + patterns, + download_config=self.download_config, + base_path=base_path, + ) + supports_metadata = self.name in _MODULE_SUPPORTS_METADATA + if self.data_files is None and supports_metadata and patterns != DEFAULT_PATTERNS_ALL: + try: + metadata_patterns = get_metadata_patterns(base_path) + except FileNotFoundError: + metadata_patterns = None + if metadata_patterns is not None: + metadata_data_files_list = DataFilesList.from_patterns( + metadata_patterns, download_config=self.download_config, base_path=base_path + ) + if metadata_data_files_list: + data_files = DataFilesDict( + { + split: data_files_list + metadata_data_files_list + for split, data_files_list in data_files.items() + } + ) + + module_path, hash = _PACKAGED_DATASETS_MODULES[self.name] + + builder_kwargs = { + "hash": hash, + "data_files": data_files, + "dataset_name": self.name, + } + + return DatasetModule(module_path, hash, builder_kwargs) + + +class HubDatasetModuleFactoryWithoutScript(_DatasetModuleFactory): + """ + Get the module of a dataset loaded from data files of a dataset repository. + The dataset builder module to use is inferred from the data files extensions. + """ + + def __init__( + self, + name: str, + revision: Optional[Union[str, Version]] = None, + data_dir: Optional[str] = None, + data_files: Optional[Union[str, List, Dict]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + ): + self.name = name + self.revision = revision + self.data_files = data_files + self.data_dir = data_dir + self.download_config = download_config or DownloadConfig() + self.download_mode = download_mode + increase_load_count(name, resource_type="dataset") + + def get_module(self) -> DatasetModule: + hfh_dataset_info = HfApi(config.HF_ENDPOINT).dataset_info( + self.name, + revision=self.revision, + token=self.download_config.token, + timeout=100.0, + ) + # even if metadata_configs is not None (which means that we will resolve files for each config later) + # we cannot skip resolving all files because we need to infer module name by files extensions + revision = hfh_dataset_info.sha # fix the revision in case there are new commits in the meantime + base_path = f"hf://datasets/{self.name}@{revision}/{self.data_dir or ''}".rstrip("/") + + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = "Downloading readme" + try: + dataset_readme_path = cached_path( + hf_hub_url(self.name, "README.md", revision=revision), + download_config=download_config, + ) + dataset_card_data = DatasetCard.load(Path(dataset_readme_path)).data + except FileNotFoundError: + dataset_card_data = DatasetCardData() + metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) + dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data) + # we need a set of data files to find which dataset builder to use + # because we need to infer module name by files extensions + if self.data_files is not None: + patterns = sanitize_patterns(self.data_files) + elif metadata_configs and "data_files" in next(iter(metadata_configs.values())): + patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"]) + else: + patterns = get_data_patterns(base_path, download_config=self.download_config) + data_files = DataFilesDict.from_patterns( + patterns, + base_path=base_path, + allowed_extensions=ALL_ALLOWED_EXTENSIONS, + download_config=self.download_config, + ) + module_name, default_builder_kwargs = infer_module_for_data_files( + data_files=data_files, + path=self.name, + download_config=self.download_config, + ) + data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name]) + # Collect metadata files if the module supports them + supports_metadata = module_name in _MODULE_SUPPORTS_METADATA + if self.data_files is None and supports_metadata: + try: + metadata_patterns = get_metadata_patterns(base_path) + except FileNotFoundError: + metadata_patterns = None + if metadata_patterns is not None: + metadata_data_files_list = DataFilesList.from_patterns( + metadata_patterns, download_config=self.download_config, base_path=base_path + ) + if metadata_data_files_list: + data_files = DataFilesDict( + { + split: data_files_list + metadata_data_files_list + for split, data_files_list in data_files.items() + } + ) + + module_path, hash = _PACKAGED_DATASETS_MODULES[module_name] + if metadata_configs: + builder_configs, default_config_name = create_builder_configs_from_metadata_configs( + module_path, + metadata_configs, + base_path=base_path, + supports_metadata=supports_metadata, + default_builder_kwargs=default_builder_kwargs, + download_config=self.download_config, + ) + else: + builder_configs, default_config_name = None, None + builder_kwargs = { + "hash": hash, + "base_path": hf_hub_url(self.name, "", revision=self.revision), + "repo_id": self.name, + "dataset_name": camelcase_to_snakecase(Path(self.name).name), + } + if self.data_files is not None or not metadata_configs: + builder_kwargs["data_files"] = data_files + builder_kwargs.update(default_builder_kwargs) # from _EXTENSION_TO_MODULE + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = "Downloading metadata" + try: + # this file is deprecated and was created automatically in old versions of push_to_hub + dataset_infos_path = cached_path( + hf_hub_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=self.revision), + download_config=download_config, + ) + with open(dataset_infos_path, encoding="utf-8") as f: + legacy_dataset_infos = DatasetInfosDict( + { + config_name: DatasetInfo.from_dict(dataset_info_dict) + for config_name, dataset_info_dict in json.load(f).items() + } + ) + if len(legacy_dataset_infos) == 1: + # old config e.g. named "username--dataset_name" + legacy_config_name = next(iter(legacy_dataset_infos)) + legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name) + legacy_dataset_infos.update(dataset_infos) + dataset_infos = legacy_dataset_infos + except FileNotFoundError: + pass + if default_config_name is None and len(dataset_infos) == 1: + default_config_name = next(iter(dataset_infos)) + + return DatasetModule( + module_path, + hash, + builder_kwargs, + dataset_infos=dataset_infos, + builder_configs_parameters=BuilderConfigsParameters( + metadata_configs=metadata_configs, + builder_configs=builder_configs, + default_config_name=default_config_name, + ), + ) + + +class HubDatasetModuleFactoryWithScript(_DatasetModuleFactory): + """ + Get the module of a dataset from a dataset repository. + The dataset script comes from the script inside the dataset repository. + """ + + def __init__( + self, + name: str, + revision: Optional[Union[str, Version]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + dynamic_modules_path: Optional[str] = None, + ): + self.name = name + self.revision = revision + self.download_config = download_config or DownloadConfig() + self.download_mode = download_mode + self.dynamic_modules_path = dynamic_modules_path + increase_load_count(name, resource_type="dataset") + + def download_loading_script(self) -> str: + file_path = hf_hub_url(repo_id=self.name, path=self.name.split("/")[-1] + ".py", revision=self.revision) + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = "Downloading builder script" + return cached_path(file_path, download_config=download_config) + + def download_dataset_infos_file(self) -> str: + dataset_infos = hf_hub_url(repo_id=self.name, path=config.DATASETDICT_INFOS_FILENAME, revision=self.revision) + # Download the dataset infos file if available + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = "Downloading metadata" + try: + return cached_path( + dataset_infos, + download_config=download_config, + ) + except (FileNotFoundError, ConnectionError): + return None + + def download_dataset_readme_file(self) -> str: + readme_url = hf_hub_url(repo_id=self.name, path="README.md", revision=self.revision) + # Download the dataset infos file if available + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = "Downloading readme" + try: + return cached_path( + readme_url, + download_config=download_config, + ) + except (FileNotFoundError, ConnectionError): + return None + + def get_module(self) -> DatasetModule: + # get script and other files + local_path = self.download_loading_script() + dataset_infos_path = self.download_dataset_infos_file() + dataset_readme_path = self.download_dataset_readme_file() + imports = get_imports(local_path) + local_imports = _download_additional_modules( + name=self.name, + base_path=hf_hub_url(repo_id=self.name, path="", revision=self.revision), + imports=imports, + download_config=self.download_config, + ) + additional_files = [] + if dataset_infos_path: + additional_files.append((config.DATASETDICT_INFOS_FILENAME, dataset_infos_path)) + if dataset_readme_path: + additional_files.append(("README.md", dataset_readme_path)) + # copy the script and the files in an importable directory + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + module_path, hash = _create_importable_file( + local_path=local_path, + local_imports=local_imports, + additional_files=additional_files, + dynamic_modules_path=dynamic_modules_path, + module_namespace="datasets", + name=self.name, + download_mode=self.download_mode, + ) + # make the new module to be noticed by the import system + importlib.invalidate_caches() + builder_kwargs = { + "hash": hash, + "base_path": hf_hub_url(self.name, "", revision=self.revision), + "repo_id": self.name, + } + return DatasetModule(module_path, hash, builder_kwargs) + + +class CachedDatasetModuleFactory(_DatasetModuleFactory): + """ + Get the module of a dataset that has been loaded once already and cached. + The script that is loaded from the cache is the most recent one with a matching name. + """ + + def __init__( + self, + name: str, + dynamic_modules_path: Optional[str] = None, + ): + self.name = name + self.dynamic_modules_path = dynamic_modules_path + assert self.name.count("/") <= 1 + + def get_module(self) -> DatasetModule: + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + importable_directory_path = os.path.join(dynamic_modules_path, "datasets", self.name.replace("/", "--")) + hashes = ( + [h for h in os.listdir(importable_directory_path) if len(h) == 64] + if os.path.isdir(importable_directory_path) + else None + ) + if not hashes: + raise FileNotFoundError(f"Dataset {self.name} is not cached in {dynamic_modules_path}") + # get most recent + + def _get_modification_time(module_hash): + return (Path(importable_directory_path) / module_hash / (self.name.split("/")[-1] + ".py")).stat().st_mtime + + hash = sorted(hashes, key=_get_modification_time)[-1] + warning_msg = ( + f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} " + f"(last modified on {time.ctime(_get_modification_time(hash))}) since it " + f"couldn't be found locally at {self.name}." + ) + if not config.HF_DATASETS_OFFLINE: + warning_msg += ", or remotely on the Hugging Face Hub." + logger.warning(warning_msg) + # make the new module to be noticed by the import system + module_path = ".".join( + [ + os.path.basename(dynamic_modules_path), + "datasets", + self.name.replace("/", "--"), + hash, + self.name.split("/")[-1], + ] + ) + importlib.invalidate_caches() + builder_kwargs = { + "hash": hash, + "repo_id": self.name, + } + return DatasetModule(module_path, hash, builder_kwargs) + + +class CachedMetricModuleFactory(_MetricModuleFactory): + """ + Get the module of a metric that has been loaded once already and cached. + The script that is loaded from the cache is the most recent one with a matching name. + + + + Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate + + + """ + + @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate") + def __init__( + self, + name: str, + dynamic_modules_path: Optional[str] = None, + ): + self.name = name + self.dynamic_modules_path = dynamic_modules_path + assert self.name.count("/") == 0 + + def get_module(self) -> MetricModule: + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + importable_directory_path = os.path.join(dynamic_modules_path, "metrics", self.name) + hashes = ( + [h for h in os.listdir(importable_directory_path) if len(h) == 64] + if os.path.isdir(importable_directory_path) + else None + ) + if not hashes: + raise FileNotFoundError(f"Metric {self.name} is not cached in {dynamic_modules_path}") + # get most recent + + def _get_modification_time(module_hash): + return (Path(importable_directory_path) / module_hash / (self.name + ".py")).stat().st_mtime + + hash = sorted(hashes, key=_get_modification_time)[-1] + logger.warning( + f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} " + f"(last modified on {time.ctime(_get_modification_time(hash))}) since it " + f"couldn't be found locally at {self.name}, or remotely on the Hugging Face Hub." + ) + # make the new module to be noticed by the import system + module_path = ".".join([os.path.basename(dynamic_modules_path), "metrics", self.name, hash, self.name]) + importlib.invalidate_caches() + return MetricModule(module_path, hash) + + +def dataset_module_factory( + path: str, + revision: Optional[Union[str, Version]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + dynamic_modules_path: Optional[str] = None, + data_dir: Optional[str] = None, + data_files: Optional[Union[Dict, List, str, DataFilesDict]] = None, + **download_kwargs, +) -> DatasetModule: + """ + Download/extract/cache a dataset module. + + Dataset codes are cached inside the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks). + + Args: + + path (str): Path or name of the dataset. + Depending on ``path``, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory. + + For local datasets: + + - if ``path`` is a local directory (containing data files only) + -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory + e.g. ``'./path/to/directory/with/my/csv/data'``. + - if ``path`` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory): + -> load the dataset builder from the dataset script + e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``. + + For datasets on the Hugging Face Hub (list all available datasets with ``huggingface_hub.list_datasets()``) + + - if ``path`` is a dataset repository on the HF hub (containing data files only) + -> load a generic dataset builder (csv, text etc.) based on the content of the repository + e.g. ``'username/dataset_name'``, a dataset repository on the HF hub containing your data files. + - if ``path`` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory) + -> load the dataset builder from the dataset script in the dataset repository + e.g. ``glue``, ``squad``, ``'username/dataset_name'``, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`. + + revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load. + As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. + You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. + download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters. + download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode. + dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules): + Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`. + By default, the datasets and metrics are stored inside the `datasets_modules` module. + data_dir (:obj:`str`, optional): Directory with the data files. Used only if `data_files` is not specified, + in which case it's equal to pass `os.path.join(data_dir, "**")` as `data_files`. + data_files (:obj:`Union[Dict, List, str]`, optional): Defining the data_files of the dataset configuration. + **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override + the attributes in download_config if supplied. + + Returns: + DatasetModule + """ + if download_config is None: + download_config = DownloadConfig(**download_kwargs) + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + download_config.extract_compressed_file = True + download_config.force_extract = True + download_config.force_download = download_mode == DownloadMode.FORCE_REDOWNLOAD + + filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1] + if not filename.endswith(".py"): + filename = filename + ".py" + combined_path = os.path.join(path, filename) + + # We have several ways to get a dataset builder: + # + # - if path is the name of a packaged dataset module + # -> use the packaged module (json, csv, etc.) + # + # - if os.path.join(path, name) is a local python file + # -> use the module from the python file + # - if path is a local directory (but no python file) + # -> use a packaged module (csv, text etc.) based on content of the directory + # + # - if path has one "/" and is dataset repository on the HF hub with a python file + # -> the module from the python file in the dataset repository + # - if path has one "/" and is dataset repository on the HF hub without a python file + # -> use a packaged module (csv, text etc.) based on content of the repository + + # Try packaged + if path in _PACKAGED_DATASETS_MODULES: + return PackagedDatasetModuleFactory( + path, + data_dir=data_dir, + data_files=data_files, + download_config=download_config, + download_mode=download_mode, + ).get_module() + # Try locally + elif path.endswith(filename): + if os.path.isfile(path): + return LocalDatasetModuleFactoryWithScript( + path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path + ).get_module() + else: + raise FileNotFoundError(f"Couldn't find a dataset script at {relative_to_absolute_path(path)}") + elif os.path.isfile(combined_path): + return LocalDatasetModuleFactoryWithScript( + combined_path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path + ).get_module() + elif os.path.isdir(path): + return LocalDatasetModuleFactoryWithoutScript( + path, data_dir=data_dir, data_files=data_files, download_mode=download_mode + ).get_module() + # Try remotely + elif is_relative_path(path) and path.count("/") <= 1: + try: + _raise_if_offline_mode_is_enabled() + hf_api = HfApi(config.HF_ENDPOINT) + try: + dataset_info = hf_api.dataset_info( + repo_id=path, + revision=revision, + token=download_config.token, + timeout=100.0, + ) + except Exception as e: # noqa catch any exception of hf_hub and consider that the dataset doesn't exist + if isinstance( + e, + ( + OfflineModeIsEnabled, + requests.exceptions.ConnectTimeout, + requests.exceptions.ConnectionError, + ), + ): + raise ConnectionError(f"Couldn't reach '{path}' on the Hub ({type(e).__name__})") + elif "404" in str(e): + msg = f"Dataset '{path}' doesn't exist on the Hub" + raise FileNotFoundError(msg + f" at revision '{revision}'" if revision else msg) + elif "401" in str(e): + msg = f"Dataset '{path}' doesn't exist on the Hub" + msg = msg + f" at revision '{revision}'" if revision else msg + raise DatasetNotFoundError( + msg + ". If the repo is private or gated, make sure to log in with `huggingface-cli login`." + ) + else: + raise e + if filename in [sibling.rfilename for sibling in dataset_info.siblings]: + return HubDatasetModuleFactoryWithScript( + path, + revision=revision, + download_config=download_config, + download_mode=download_mode, + dynamic_modules_path=dynamic_modules_path, + ).get_module() + else: + return HubDatasetModuleFactoryWithoutScript( + path, + revision=revision, + data_dir=data_dir, + data_files=data_files, + download_config=download_config, + download_mode=download_mode, + ).get_module() + except Exception as e1: + # All the attempts failed, before raising the error we should check if the module is already cached + try: + return CachedDatasetModuleFactory(path, dynamic_modules_path=dynamic_modules_path).get_module() + except Exception: + # If it's not in the cache, then it doesn't exist. + if isinstance(e1, OfflineModeIsEnabled): + raise ConnectionError(f"Couldn't reach the Hugging Face Hub for dataset '{path}': {e1}") from None + if isinstance(e1, (DataFilesNotFoundError, DatasetNotFoundError, EmptyDatasetError)): + raise e1 from None + if isinstance(e1, FileNotFoundError): + raise FileNotFoundError( + f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory. " + f"Couldn't find '{path}' on the Hugging Face Hub either: {type(e1).__name__}: {e1}" + ) from None + raise e1 from None + else: + raise FileNotFoundError( + f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory." + ) + + +@deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate") +def metric_module_factory( + path: str, + revision: Optional[Union[str, Version]] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + dynamic_modules_path: Optional[str] = None, + **download_kwargs, +) -> MetricModule: + """ + Download/extract/cache a metric module. + + + + Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate + + + + Metrics codes are cached inside the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks). + + Args: + + path (str): Path or name of the metric script. + + - if ``path`` is a local metric script or a directory containing a local metric script (if the script has the same name as the directory): + -> load the module from the metric script + e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'``. + - if ``path`` is a metric on the Hugging Face Hub (ex: `glue`, `squad`) + -> load the module from the metric script in the GitHub repository at huggingface/datasets + e.g. ``'accuracy'`` or ``'rouge'``. + + revision (Optional ``Union[str, datasets.Version]``): + If specified, the module will be loaded from the datasets repository at this version. + By default: + - it is set to the local version of the lib. + - it will also try to load it from the main branch if it's not available at the local version of the lib. + Specifying a version that is different from your local version of the lib might cause compatibility issues. + download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters. + download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode. + dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules): + Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`. + By default, the datasets and metrics are stored inside the `datasets_modules` module. + **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override + the attributes in download_config if supplied. + + Returns: + MetricModule + """ + with warnings.catch_warnings(): + # Ignore equivalent warnings to the one already issued + warnings.filterwarnings("ignore", message=".*https://huggingface.co/docs/evaluate$", category=FutureWarning) + + if download_config is None: + download_config = DownloadConfig(**download_kwargs) + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + download_config.extract_compressed_file = True + download_config.force_extract = True + + filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1] + if not filename.endswith(".py"): + filename = filename + ".py" + combined_path = os.path.join(path, filename) + # Try locally + if path.endswith(filename): + if os.path.isfile(path): + return LocalMetricModuleFactory( + path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path + ).get_module() + else: + raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(path)}") + elif os.path.isfile(combined_path): + return LocalMetricModuleFactory( + combined_path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path + ).get_module() + elif is_relative_path(path) and path.count("/") == 0: + try: + return GithubMetricModuleFactory( + path, + revision=revision, + download_config=download_config, + download_mode=download_mode, + dynamic_modules_path=dynamic_modules_path, + ).get_module() + except Exception as e1: # noqa all the attempts failed, before raising the error we should check if the module is already cached. + try: + return CachedMetricModuleFactory(path, dynamic_modules_path=dynamic_modules_path).get_module() + except Exception: # noqa if it's not in the cache, then it doesn't exist. + if not isinstance(e1, FileNotFoundError): + raise e1 from None + raise FileNotFoundError( + f"Couldn't find a metric script at {relative_to_absolute_path(combined_path)}. " + f"Metric '{path}' doesn't exist on the Hugging Face Hub either." + ) from None + else: + raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(combined_path)}.") + + +@deprecated("Use 'evaluate.load' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate") +def load_metric( + path: str, + config_name: Optional[str] = None, + process_id: int = 0, + num_process: int = 1, + cache_dir: Optional[str] = None, + experiment_id: Optional[str] = None, + keep_in_memory: bool = False, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + revision: Optional[Union[str, Version]] = None, + **metric_init_kwargs, +) -> Metric: + """Load a `datasets.Metric`. + + + + Use `evaluate.load` instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate + + + + Args: + + path (``str``): + path to the metric processing script with the metric builder. Can be either: + - a local path to processing script or the directory containing the script (if the script has the same name as the directory), + e.g. ``'./metrics/rouge'`` or ``'./metrics/rogue/rouge.py'`` + - a metric identifier on the HuggingFace datasets repo (list all available metrics with ``datasets.list_metrics()``) + e.g. ``'rouge'`` or ``'bleu'`` + config_name (:obj:`str`, optional): selecting a configuration for the metric (e.g. the GLUE metric has a configuration for each subset) + process_id (:obj:`int`, optional): for distributed evaluation: id of the process + num_process (:obj:`int`, optional): for distributed evaluation: total number of processes + cache_dir (Optional str): path to store the temporary predictions and references (default to `~/.cache/huggingface/metrics/`) + experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system. + This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). + keep_in_memory (bool): Whether to store the temporary results in memory (defaults to False) + download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters. + download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode. + revision (Optional ``Union[str, datasets.Version]``): if specified, the module will be loaded from the datasets repository + at this version. By default, it is set to the local version of the lib. Specifying a version that is different from + your local version of the lib might cause compatibility issues. + + Returns: + `datasets.Metric` + + Example: + + ```py + >>> from datasets import load_metric + >>> accuracy = load_metric('accuracy') + >>> accuracy.compute(references=[1, 0], predictions=[1, 1]) + {'accuracy': 0.5} + ``` + """ + with warnings.catch_warnings(): + # Ignore equivalent warnings to the one already issued + warnings.filterwarnings("ignore", message=".*https://huggingface.co/docs/evaluate$", category=FutureWarning) + + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + metric_module = metric_module_factory( + path, revision=revision, download_config=download_config, download_mode=download_mode + ).module_path + metric_cls = import_main_class(metric_module, dataset=False) + metric = metric_cls( + config_name=config_name, + process_id=process_id, + num_process=num_process, + cache_dir=cache_dir, + keep_in_memory=keep_in_memory, + experiment_id=experiment_id, + **metric_init_kwargs, + ) + + # Download and prepare resources for the metric + metric.download_and_prepare(download_config=download_config) + + return metric + + +def load_dataset_builder( + path: str, + name: Optional[str] = None, + data_dir: Optional[str] = None, + data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, + cache_dir: Optional[str] = None, + features: Optional[Features] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + revision: Optional[Union[str, Version]] = None, + token: Optional[Union[bool, str]] = None, + use_auth_token="deprecated", + storage_options: Optional[Dict] = None, + **config_kwargs, +) -> DatasetBuilder: + """Load a dataset builder from the Hugging Face Hub, or a local dataset. A dataset builder can be used to inspect general information that is required to build a dataset (cache directory, config, dataset info, etc.) + without downloading the dataset itself. + + You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`]. + + A dataset is a directory that contains: + + - some data files in generic formats (JSON, CSV, Parquet, text, etc.) + - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures. + + Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online. + + Args: + + path (`str`): + Path or name of the dataset. + Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory. + + For local datasets: + + - if `path` is a local directory (containing data files only) + -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory + e.g. `'./path/to/directory/with/my/csv/data'`. + - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory) + -> load the dataset builder from the dataset script + e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`. + + For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`]) + + - if `path` is a dataset repository on the HF hub (containing data files only) + -> load a generic dataset builder (csv, text etc.) based on the content of the repository + e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files. + - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory) + -> load the dataset builder from the dataset script in the dataset repository + e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`. + + name (`str`, *optional*): + Defining the name of the dataset configuration. + data_dir (`str`, *optional*): + Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`, + the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory. + data_files (`str` or `Sequence` or `Mapping`, *optional*): + Path(s) to source data file(s). + cache_dir (`str`, *optional*): + Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`. + features ([`Features`], *optional*): + Set the features type to use for this dataset. + download_config ([`DownloadConfig`], *optional*): + Specific download configuration parameters. + download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): + Download/generate mode. + revision ([`Version`] or `str`, *optional*): + Version of the dataset script to load. + As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. + You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. + token (`str` or `bool`, *optional*): + Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. + If `True`, or not specified, will get token from `"~/.huggingface"`. + use_auth_token (`str` or `bool`, *optional*): + Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. + If `True`, or not specified, will get token from `"~/.huggingface"`. + + + + `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0. + + + storage_options (`dict`, *optional*, defaults to `None`): + **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any. + + + **config_kwargs (additional keyword arguments): + Keyword arguments to be passed to the [`BuilderConfig`] + and used in the [`DatasetBuilder`]. + + Returns: + [`DatasetBuilder`] + + Example: + + ```py + >>> from datasets import load_dataset_builder + >>> ds_builder = load_dataset_builder('rotten_tomatoes') + >>> ds_builder.info.features + {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), + 'text': Value(dtype='string', id=None)} + ``` + """ + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + "You can remove this warning by passing 'token=' instead.", + FutureWarning, + ) + token = use_auth_token + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + if token is not None: + download_config = download_config.copy() if download_config else DownloadConfig() + download_config.token = token + if storage_options is not None: + download_config = download_config.copy() if download_config else DownloadConfig() + download_config.storage_options.update(storage_options) + dataset_module = dataset_module_factory( + path, + revision=revision, + download_config=download_config, + download_mode=download_mode, + data_dir=data_dir, + data_files=data_files, + ) + # Get dataset builder class from the processing script + builder_kwargs = dataset_module.builder_kwargs + data_dir = builder_kwargs.pop("data_dir", data_dir) + data_files = builder_kwargs.pop("data_files", data_files) + config_name = builder_kwargs.pop( + "config_name", name or dataset_module.builder_configs_parameters.default_config_name + ) + dataset_name = builder_kwargs.pop("dataset_name", None) + hash = builder_kwargs.pop("hash") + info = dataset_module.dataset_infos.get(config_name) if dataset_module.dataset_infos else None + if ( + dataset_module.builder_configs_parameters.metadata_configs + and config_name in dataset_module.builder_configs_parameters.metadata_configs + ): + hash = update_hash_with_config_parameters( + hash, dataset_module.builder_configs_parameters.metadata_configs[config_name] + ) + + if path in _PACKAGED_DATASETS_MODULES and data_files is None: + error_msg = f"Please specify the data files or data directory to load for the {path} dataset builder." + example_extensions = [ + extension for extension in _EXTENSION_TO_MODULE if _EXTENSION_TO_MODULE[extension] == path + ] + if example_extensions: + error_msg += f'\nFor example `data_files={{"train": "path/to/data/train/*.{example_extensions[0]}"}}`' + raise ValueError(error_msg) + + builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name) + # Instantiate the dataset builder + builder_instance: DatasetBuilder = builder_cls( + cache_dir=cache_dir, + dataset_name=dataset_name, + config_name=config_name, + data_dir=data_dir, + data_files=data_files, + hash=hash, + info=info, + features=features, + token=token, + storage_options=storage_options, + **builder_kwargs, + **config_kwargs, + ) + + return builder_instance + + +def load_dataset( + path: str, + name: Optional[str] = None, + data_dir: Optional[str] = None, + data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, + split: Optional[Union[str, Split]] = None, + cache_dir: Optional[str] = None, + features: Optional[Features] = None, + download_config: Optional[DownloadConfig] = None, + download_mode: Optional[Union[DownloadMode, str]] = None, + verification_mode: Optional[Union[VerificationMode, str]] = None, + ignore_verifications="deprecated", + keep_in_memory: Optional[bool] = None, + save_infos: bool = False, + revision: Optional[Union[str, Version]] = None, + token: Optional[Union[bool, str]] = None, + use_auth_token="deprecated", + task="deprecated", + streaming: bool = False, + num_proc: Optional[int] = None, + storage_options: Optional[Dict] = None, + **config_kwargs, +) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]: + """Load a dataset from the Hugging Face Hub, or a local dataset. + + You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`]. + + A dataset is a directory that contains: + + - some data files in generic formats (JSON, CSV, Parquet, text, etc.). + - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures. + + Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online. + + This function does the following under the hood: + + 1. Download and import in the library the dataset script from `path` if it's not already cached inside the library. + + If the dataset has no dataset script, then a generic dataset script is imported instead (JSON, CSV, Parquet, text, etc.) + + Dataset scripts are small python scripts that define dataset builders. They define the citation, info and format of the dataset, + contain the path or URL to the original data files and the code to load examples from the original data files. + + You can find the complete list of datasets in the Datasets [Hub](https://huggingface.co/datasets). + + 2. Run the dataset script which will: + + * Download the dataset file from the original URL (see the script) if it's not already available locally or cached. + * Process and cache the dataset in typed Arrow tables for caching. + + Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python generic types. + They can be directly accessed from disk, loaded in RAM or even streamed over the web. + + 3. Return a dataset built from the requested splits in `split` (default: all). + + It also allows to load a dataset from a local directory or a dataset repository on the Hugging Face Hub without dataset script. + In this case, it automatically loads all the data files from the directory or the dataset repository. + + Args: + + path (`str`): + Path or name of the dataset. + Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory. + + For local datasets: + + - if `path` is a local directory (containing data files only) + -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory + e.g. `'./path/to/directory/with/my/csv/data'`. + - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory) + -> load the dataset builder from the dataset script + e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`. + + For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`]) + + - if `path` is a dataset repository on the HF hub (containing data files only) + -> load a generic dataset builder (csv, text etc.) based on the content of the repository + e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files. + - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory) + -> load the dataset builder from the dataset script in the dataset repository + e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`. + + name (`str`, *optional*): + Defining the name of the dataset configuration. + data_dir (`str`, *optional*): + Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`, + the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory. + data_files (`str` or `Sequence` or `Mapping`, *optional*): + Path(s) to source data file(s). + split (`Split` or `str`): + Which split of the data to load. + If `None`, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`). + If given, will return a single Dataset. + Splits can be combined and specified like in tensorflow-datasets. + cache_dir (`str`, *optional*): + Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`. + features (`Features`, *optional*): + Set the features type to use for this dataset. + download_config ([`DownloadConfig`], *optional*): + Specific download configuration parameters. + download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`): + Download/generate mode. + verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`): + Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...). + + + ignore_verifications (`bool`, defaults to `False`): + Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...). + + + + `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0. + Please use `verification_mode` instead. + + + keep_in_memory (`bool`, defaults to `None`): + Whether to copy the dataset in-memory. If `None`, the dataset + will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to + nonzero. See more details in the [improve performance](../cache#improve-performance) section. + save_infos (`bool`, defaults to `False`): + Save the dataset information (checksums/size/splits/...). + revision ([`Version`] or `str`, *optional*): + Version of the dataset script to load. + As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch. + You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository. + token (`str` or `bool`, *optional*): + Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. + If `True`, or not specified, will get token from `"~/.huggingface"`. + use_auth_token (`str` or `bool`, *optional*): + Optional string or boolean to use as Bearer token for remote files on the Datasets Hub. + If `True`, or not specified, will get token from `"~/.huggingface"`. + + + + `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0. + + + task (`str`): + The task to prepare the dataset for during training and evaluation. Casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`. + + + + `task` was deprecated in version 2.13.0 and will be removed in 3.0.0. + + + streaming (`bool`, defaults to `False`): + If set to `True`, don't download the data files. Instead, it streams the data progressively while + iterating on the dataset. An [`IterableDataset`] or [`IterableDatasetDict`] is returned instead in this case. + + Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example. + Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats + like rar and xz are not yet supported. The tgz format doesn't allow streaming. + num_proc (`int`, *optional*, defaults to `None`): + Number of processes when downloading and generating the dataset locally. + Multiprocessing is disabled by default. + + + storage_options (`dict`, *optional*, defaults to `None`): + **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any. + + + **config_kwargs (additional keyword arguments): + Keyword arguments to be passed to the `BuilderConfig` + and used in the [`DatasetBuilder`]. + + Returns: + [`Dataset`] or [`DatasetDict`]: + - if `split` is not `None`: the dataset requested, + - if `split` is `None`, a [`~datasets.DatasetDict`] with each split. + + or [`IterableDataset`] or [`IterableDatasetDict`]: if `streaming=True` + + - if `split` is not `None`, the dataset is requested + - if `split` is `None`, a [`~datasets.streaming.IterableDatasetDict`] with each split. + + Example: + + Load a dataset from the Hugging Face Hub: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset('rotten_tomatoes', split='train') + + # Map data files to splits + >>> data_files = {'train': 'train.csv', 'test': 'test.csv'} + >>> ds = load_dataset('namespace/your_dataset_name', data_files=data_files) + ``` + + Load a local dataset: + + ```py + # Load a CSV file + >>> from datasets import load_dataset + >>> ds = load_dataset('csv', data_files='path/to/local/my_dataset.csv') + + # Load a JSON file + >>> from datasets import load_dataset + >>> ds = load_dataset('json', data_files='path/to/local/my_dataset.json') + + # Load from a local loading script + >>> from datasets import load_dataset + >>> ds = load_dataset('path/to/local/loading_script/loading_script.py', split='train') + ``` + + Load an [`~datasets.IterableDataset`]: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset('rotten_tomatoes', split='train', streaming=True) + ``` + + Load an image dataset with the `ImageFolder` dataset builder: + + ```py + >>> from datasets import load_dataset + >>> ds = load_dataset('imagefolder', data_dir='/path/to/images', split='train') + ``` + """ + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + "You can remove this warning by passing 'token=' instead.", + FutureWarning, + ) + token = use_auth_token + if ignore_verifications != "deprecated": + verification_mode = VerificationMode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS + warnings.warn( + "'ignore_verifications' was deprecated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.", + FutureWarning, + ) + if task != "deprecated": + warnings.warn( + "'task' was deprecated in version 2.13.0 and will be removed in 3.0.0.\n", + FutureWarning, + ) + else: + task = None + if data_files is not None and not data_files: + raise ValueError(f"Empty 'data_files': '{data_files}'. It should be either non-empty or None (default).") + if Path(path, config.DATASET_STATE_JSON_FILENAME).exists(): + raise ValueError( + "You are trying to load a dataset that was saved using `save_to_disk`. " + "Please use `load_from_disk` instead." + ) + + if streaming and num_proc is not None: + raise NotImplementedError( + "Loading a streaming dataset in parallel with `num_proc` is not implemented. " + "To parallelize streaming, you can wrap the dataset with a PyTorch DataLoader using `num_workers` > 1 instead." + ) + + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + verification_mode = VerificationMode( + (verification_mode or VerificationMode.BASIC_CHECKS) if not save_infos else VerificationMode.ALL_CHECKS + ) + + # Create a dataset builder + builder_instance = load_dataset_builder( + path=path, + name=name, + data_dir=data_dir, + data_files=data_files, + cache_dir=cache_dir, + features=features, + download_config=download_config, + download_mode=download_mode, + revision=revision, + token=token, + storage_options=storage_options, + **config_kwargs, + ) + + # Return iterable dataset in case of streaming + if streaming: + return builder_instance.as_streaming_dataset(split=split) + + # Some datasets are already processed on the HF google storage + # Don't try downloading from Google storage for the packaged datasets as text, json, csv or pandas + try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES + + # Download and prepare data + builder_instance.download_and_prepare( + download_config=download_config, + download_mode=download_mode, + verification_mode=verification_mode, + try_from_hf_gcs=try_from_hf_gcs, + num_proc=num_proc, + storage_options=storage_options, + ) + + # Build dataset for splits + keep_in_memory = ( + keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) + ) + ds = builder_instance.as_dataset(split=split, verification_mode=verification_mode, in_memory=keep_in_memory) + # Rename and cast features to match task schema + if task is not None: + # To avoid issuing the same warning twice + with warnings.catch_warnings(): + warnings.simplefilter("ignore", FutureWarning) + ds = ds.prepare_for_task(task) + if save_infos: + builder_instance._save_infos() + + return ds + + +def load_from_disk( + dataset_path: str, fs="deprecated", keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None +) -> Union[Dataset, DatasetDict]: + """ + Loads a dataset that was previously saved using [`~Dataset.save_to_disk`] from a dataset directory, or + from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`. + + Args: + dataset_path (`str`): + Path (e.g. `"dataset/train"`) or remote URI (e.g. + `"s3://my-bucket/dataset/train"`) of the [`Dataset`] or [`DatasetDict`] directory where the dataset will be + loaded from. + fs (`~filesystems.S3FileSystem` or `fsspec.spec.AbstractFileSystem`, *optional*): + Instance of the remote filesystem used to download the files from. + + + + `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0. + Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`. + + + + keep_in_memory (`bool`, defaults to `None`): + Whether to copy the dataset in-memory. If `None`, the dataset + will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to + nonzero. See more details in the [improve performance](../cache#improve-performance) section. + + storage_options (`dict`, *optional*): + Key/value pairs to be passed on to the file-system backend, if any. + + + + Returns: + [`Dataset`] or [`DatasetDict`]: + - If `dataset_path` is a path of a dataset directory: the dataset requested. + - If `dataset_path` is a path of a dataset dict directory, a [`DatasetDict`] with each split. + + Example: + + ```py + >>> from datasets import load_from_disk + >>> ds = load_from_disk('path/to/dataset/directory') + ``` + """ + if fs != "deprecated": + warnings.warn( + "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n" + "You can remove this warning by passing 'storage_options=fs.storage_options' instead.", + FutureWarning, + ) + storage_options = fs.storage_options + + fs: fsspec.AbstractFileSystem + fs, _, _ = fsspec.get_fs_token_paths(dataset_path, storage_options=storage_options) + if not fs.exists(dataset_path): + raise FileNotFoundError(f"Directory {dataset_path} not found") + if fs.isfile(posixpath.join(dataset_path, config.DATASET_INFO_FILENAME)) and fs.isfile( + posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME) + ): + return Dataset.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options) + elif fs.isfile(posixpath.join(dataset_path, config.DATASETDICT_JSON_FILENAME)): + return DatasetDict.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options) + else: + raise FileNotFoundError( + f"Directory {dataset_path} is neither a `Dataset` directory nor a `DatasetDict` directory." + ) diff --git a/testbed/huggingface__datasets/src/datasets/metric.py b/testbed/huggingface__datasets/src/datasets/metric.py new file mode 100644 index 0000000000000000000000000000000000000000..b103d17ef42339bb1073352d701b65ec213e6174 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/metric.py @@ -0,0 +1,651 @@ +# Copyright 2020 The HuggingFace Datasets Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +""" Metrics base class.""" +import os +import types +import uuid +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import pyarrow as pa +from filelock import BaseFileLock, Timeout + +from . import config +from .arrow_dataset import Dataset +from .arrow_reader import ArrowReader +from .arrow_writer import ArrowWriter +from .download.download_config import DownloadConfig +from .download.download_manager import DownloadManager +from .features import Features +from .info import DatasetInfo, MetricInfo +from .naming import camelcase_to_snakecase +from .utils._filelock import FileLock +from .utils.deprecation_utils import deprecated +from .utils.logging import get_logger +from .utils.py_utils import copyfunc, temp_seed + + +logger = get_logger(__name__) + + +class FileFreeLock(BaseFileLock): + """Thread lock until a file **cannot** be locked""" + + def __init__(self, lock_file, *args, **kwargs): + self.filelock = FileLock(lock_file) + super().__init__(self.filelock.lock_file, *args, **kwargs) + + def _acquire(self): + try: + self.filelock.acquire(timeout=0.01, poll_intervall=0.02) # Try to lock once + except Timeout: + # We couldn't acquire the lock, the file is locked! + self._context.lock_file_fd = self.filelock.lock_file + else: + # We were able to acquire the lock, the file is not yet locked! + self.filelock.release() + self._context.lock_file_fd = None + + def _release(self): + self._context.lock_file_fd = None + + +# lists - summarize long lists similarly to NumPy +# arrays/tensors - let the frameworks control formatting +def summarize_if_long_list(obj): + if not type(obj) == list or len(obj) <= 6: # noqa: E721 + return f"{obj}" + + def format_chunk(chunk): + return ", ".join(repr(x) for x in chunk) + + return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]" + + +class MetricInfoMixin: + """This base class exposes some attributes of MetricInfo + at the base level of the Metric for easy access. + + + + Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate + + + + """ + + def __init__(self, info: MetricInfo): + self._metric_info = info + + @property + def info(self): + """:class:`datasets.MetricInfo` object containing all the metadata in the metric.""" + return self._metric_info + + @property + def name(self) -> str: + return self._metric_info.metric_name + + @property + def experiment_id(self) -> Optional[str]: + return self._metric_info.experiment_id + + @property + def description(self) -> str: + return self._metric_info.description + + @property + def citation(self) -> str: + return self._metric_info.citation + + @property + def features(self) -> Features: + return self._metric_info.features + + @property + def inputs_description(self) -> str: + return self._metric_info.inputs_description + + @property + def homepage(self) -> Optional[str]: + return self._metric_info.homepage + + @property + def license(self) -> str: + return self._metric_info.license + + @property + def codebase_urls(self) -> Optional[List[str]]: + return self._metric_info.codebase_urls + + @property + def reference_urls(self) -> Optional[List[str]]: + return self._metric_info.reference_urls + + @property + def streamable(self) -> bool: + return self._metric_info.streamable + + @property + def format(self) -> Optional[str]: + return self._metric_info.format + + +class Metric(MetricInfoMixin): + """A Metric is the base class and common API for all metrics. + + + + Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate + + + + Args: + config_name (``str``): This is used to define a hash specific to a metrics computation script and prevents the metric's data + to be overridden when the metric loading script is modified. + keep_in_memory (:obj:`bool`): keep all predictions and references in memory. Not possible in distributed settings. + cache_dir (``str``): Path to a directory in which temporary prediction/references data will be stored. + The data directory should be located on a shared file-system in distributed setups. + num_process (``int``): specify the total number of nodes in a distributed settings. + This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). + process_id (``int``): specify the id of the current process in a distributed setup (between 0 and num_process-1) + This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). + seed (:obj:`int`, optional): If specified, this will temporarily set numpy's random seed when :func:`datasets.Metric.compute` is run. + experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system. + This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1). + max_concurrent_cache_files (``int``): Max number of concurrent metrics cache files (default 10000). + timeout (``Union[int, float]``): Timeout in second for distributed setting synchronization. + """ + + @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate") + def __init__( + self, + config_name: Optional[str] = None, + keep_in_memory: bool = False, + cache_dir: Optional[str] = None, + num_process: int = 1, + process_id: int = 0, + seed: Optional[int] = None, + experiment_id: Optional[str] = None, + max_concurrent_cache_files: int = 10000, + timeout: Union[int, float] = 100, + **kwargs, + ): + # prepare info + self.config_name = config_name or "default" + info = self._info() + info.metric_name = camelcase_to_snakecase(self.__class__.__name__) + info.config_name = self.config_name + info.experiment_id = experiment_id or "default_experiment" + MetricInfoMixin.__init__(self, info) # For easy access on low level + + # Safety checks on num_process and process_id + if not isinstance(process_id, int) or process_id < 0: + raise ValueError("'process_id' should be a number greater than 0") + if not isinstance(num_process, int) or num_process <= process_id: + raise ValueError("'num_process' should be a number greater than process_id") + if keep_in_memory and num_process != 1: + raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).") + + self.num_process = num_process + self.process_id = process_id + self.max_concurrent_cache_files = max_concurrent_cache_files + + self.keep_in_memory = keep_in_memory + self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE) + self.data_dir = self._build_data_dir() + if seed is None: + _, seed, pos, *_ = np.random.get_state() + self.seed: int = seed[pos] if pos < 624 else seed[0] + else: + self.seed: int = seed + self.timeout: Union[int, float] = timeout + + # Update 'compute' and 'add' docstring + # methods need to be copied otherwise it changes the docstrings of every instance + self.compute = types.MethodType(copyfunc(self.compute), self) + self.add_batch = types.MethodType(copyfunc(self.add_batch), self) + self.add = types.MethodType(copyfunc(self.add), self) + self.compute.__func__.__doc__ += self.info.inputs_description + self.add_batch.__func__.__doc__ += self.info.inputs_description + self.add.__func__.__doc__ += self.info.inputs_description + + # self.arrow_schema = pa.schema(field for field in self.info.features.type) + self.buf_writer = None + self.writer = None + self.writer_batch_size = None + self.data = None + + # This is the cache file we store our predictions/references in + # Keep it None for now so we can (cloud)pickle the object + self.cache_file_name = None + self.filelock = None + self.rendez_vous_lock = None + + # This is all the cache files on which we have a lock when we are in a distributed setting + self.file_paths = None + self.filelocks = None + + def __len__(self): + """Return the number of examples (predictions or predictions/references pair) + currently stored in the metric's cache. + """ + return 0 if self.writer is None else len(self.writer) + + def __repr__(self): + return ( + f'Metric(name: "{self.name}", features: {self.features}, ' + f'usage: """{self.inputs_description}""", ' + f"stored examples: {len(self)})" + ) + + def _build_data_dir(self): + """Path of this metric in cache_dir: + Will be: + self._data_dir_root/self.name/self.config_name/self.hash (if not none)/ + If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped. + """ + builder_data_dir = self._data_dir_root + builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name) + os.makedirs(builder_data_dir, exist_ok=True) + return builder_data_dir + + def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]: + """Create a new cache file. If the default cache file is used, we generated a new hash.""" + file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow") + filelock = None + for i in range(self.max_concurrent_cache_files): + filelock = FileLock(file_path + ".lock") + try: + filelock.acquire(timeout=timeout) + except Timeout: + # If we have reached the max number of attempts or we are not allow to find a free name (distributed setup) + # We raise an error + if self.num_process != 1: + raise ValueError( + f"Error in _create_cache_file: another metric instance is already using the local cache file at {file_path}. " + f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision " + f"between distributed metric instances." + ) from None + if i == self.max_concurrent_cache_files - 1: + raise ValueError( + f"Cannot acquire lock, too many metric instance are operating concurrently on this file system." + f"You should set a larger value of max_concurrent_cache_files when creating the metric " + f"(current value is {self.max_concurrent_cache_files})." + ) from None + # In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name. + file_uuid = str(uuid.uuid4()) + file_path = os.path.join( + self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow" + ) + else: + break + + return file_path, filelock + + def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]: + """Get a lock on all the cache files in a distributed setup. + We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds). + """ + if self.num_process == 1: + if self.cache_file_name is None: + raise ValueError( + "Metric cache file doesn't exist. Please make sure that you call `add` or `add_batch` " + "at least once before calling `compute`." + ) + file_paths = [self.cache_file_name] + else: + file_paths = [ + os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow") + for process_id in range(self.num_process) + ] + + # Let's acquire a lock on each process files to be sure they are finished writing + filelocks = [] + for process_id, file_path in enumerate(file_paths): + if process_id == 0: # process 0 already has its lock file + filelocks.append(self.filelock) + else: + filelock = FileLock(file_path + ".lock") + try: + filelock.acquire(timeout=self.timeout) + except Timeout: + raise ValueError( + f"Cannot acquire lock on cached file {file_path} for process {process_id}." + ) from None + else: + filelocks.append(filelock) + + return file_paths, filelocks + + def _check_all_processes_locks(self): + expected_lock_file_names = [ + os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock") + for process_id in range(self.num_process) + ] + for expected_lock_file_name in expected_lock_file_names: + nofilelock = FileFreeLock(expected_lock_file_name) + try: + nofilelock.acquire(timeout=self.timeout) + except Timeout: + raise ValueError( + f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist." + ) from None + else: + nofilelock.release() + + def _check_rendez_vous(self): + expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock") + nofilelock = FileFreeLock(expected_lock_file_name) + try: + nofilelock.acquire(timeout=self.timeout) + except Timeout: + raise ValueError( + f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist." + ) from None + else: + nofilelock.release() + lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock") + rendez_vous_lock = FileLock(lock_file_name) + try: + rendez_vous_lock.acquire(timeout=self.timeout) + except Timeout: + raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None + else: + rendez_vous_lock.release() + + def _finalize(self): + """Close all the writing process and load/gather the data + from all the nodes if main node or all_process is True. + """ + if self.writer is not None: + self.writer.finalize() + self.writer = None + # release the locks of the processes > 0 so that process 0 can lock them to read + delete the data + if self.filelock is not None and self.process_id > 0: + self.filelock.release() + + if self.keep_in_memory: + # Read the predictions and references + reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.features)) + self.data = Dataset.from_buffer(self.buf_writer.getvalue()) + + elif self.process_id == 0: + # Let's acquire a lock on each node files to be sure they are finished writing + file_paths, filelocks = self._get_all_cache_files() + + # Read the predictions and references + try: + reader = ArrowReader(path="", info=DatasetInfo(features=self.features)) + self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths])) + except FileNotFoundError: + raise ValueError( + "Error in finalize: another metric instance is already using the local cache file. " + "Please specify an experiment_id to avoid collision between distributed metric instances." + ) from None + + # Store file paths and locks and we will release/delete them after the computation. + self.file_paths = file_paths + self.filelocks = filelocks + + def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]: + """Compute the metrics. + + Usage of positional arguments is not allowed to prevent mistakes. + + Args: + predictions (list/array/tensor, optional): Predictions. + references (list/array/tensor, optional): References. + **kwargs (optional): Keyword arguments that will be forwarded to the metrics :meth:`_compute` + method (see details in the docstring). + + Return: + dict or None + + - Dictionary with the metrics if this metric is run on the main process (``process_id == 0``). + - None if the metric is not run on the main process (``process_id != 0``). + + Example: + + ```py + >>> from datasets import load_metric + >>> metric = load_metric("accuracy") + >>> accuracy = metric.compute(predictions=model_prediction, references=labels) + ``` + """ + all_kwargs = {"predictions": predictions, "references": references, **kwargs} + if predictions is None and references is None: + missing_kwargs = {k: None for k in self.features if k not in all_kwargs} + all_kwargs.update(missing_kwargs) + else: + missing_inputs = [k for k in self.features if k not in all_kwargs] + if missing_inputs: + raise ValueError( + f"Metric inputs are missing: {missing_inputs}. All required inputs are {list(self.features)}" + ) + inputs = {input_name: all_kwargs[input_name] for input_name in self.features} + compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self.features} + + if any(v is not None for v in inputs.values()): + self.add_batch(**inputs) + self._finalize() + + self.cache_file_name = None + self.filelock = None + + if self.process_id == 0: + self.data.set_format(type=self.info.format) + + inputs = {input_name: self.data[input_name] for input_name in self.features} + with temp_seed(self.seed): + output = self._compute(**inputs, **compute_kwargs) + + if self.buf_writer is not None: + self.buf_writer = None + del self.data + self.data = None + else: + # Release locks and delete all the cache files. Process 0 is released last. + for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))): + logger.info(f"Removing {file_path}") + del self.data + self.data = None + del self.writer + self.writer = None + os.remove(file_path) + filelock.release() + + return output + else: + return None + + def add_batch(self, *, predictions=None, references=None, **kwargs): + """Add a batch of predictions and references for the metric's stack. + + Args: + predictions (list/array/tensor, optional): Predictions. + references (list/array/tensor, optional): References. + + Example: + + ```py + >>> from datasets import load_metric + >>> metric = load_metric("accuracy") + >>> metric.add_batch(predictions=model_prediction, references=labels) + ``` + """ + bad_inputs = [input_name for input_name in kwargs if input_name not in self.features] + if bad_inputs: + raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}") + batch = {"predictions": predictions, "references": references, **kwargs} + batch = {intput_name: batch[intput_name] for intput_name in self.features} + batch = self.info.features.encode_batch(batch) + if self.writer is None: + self._init_writer() + try: + self.writer.write_batch(batch) + except pa.ArrowInvalid: + if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch): + col0 = next(iter(batch)) + bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0] + error_msg = ( + f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})" + ) + elif sorted(self.features) != ["references", "predictions"]: + error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n" + error_msg_inputs = ",\n".join( + f"Input {input_name}: {summarize_if_long_list(batch[input_name])}" for input_name in self.features + ) + error_msg += error_msg_inputs + else: + error_msg = ( + f"Predictions and/or references don't match the expected format.\n" + f"Expected format: {self.features},\n" + f"Input predictions: {summarize_if_long_list(predictions)},\n" + f"Input references: {summarize_if_long_list(references)}" + ) + raise ValueError(error_msg) from None + + def add(self, *, prediction=None, reference=None, **kwargs): + """Add one prediction and reference for the metric's stack. + + Args: + prediction (list/array/tensor, optional): Predictions. + reference (list/array/tensor, optional): References. + + Example: + + ```py + >>> from datasets import load_metric + >>> metric = load_metric("accuracy") + >>> metric.add(predictions=model_predictions, references=labels) + ``` + """ + bad_inputs = [input_name for input_name in kwargs if input_name not in self.features] + if bad_inputs: + raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}") + example = {"predictions": prediction, "references": reference, **kwargs} + example = {intput_name: example[intput_name] for intput_name in self.features} + example = self.info.features.encode_example(example) + if self.writer is None: + self._init_writer() + try: + self.writer.write(example) + except pa.ArrowInvalid: + error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n" + error_msg_inputs = ",\n".join( + f"Input {input_name}: {summarize_if_long_list(example[input_name])}" for input_name in self.features + ) + error_msg += error_msg_inputs + raise ValueError(error_msg) from None + + def _init_writer(self, timeout=1): + if self.num_process > 1: + if self.process_id == 0: + file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock") + self.rendez_vous_lock = FileLock(file_path) + try: + self.rendez_vous_lock.acquire(timeout=timeout) + except TimeoutError: + raise ValueError( + f"Error in _init_writer: another metric instance is already using the local cache file at {file_path}. " + f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision " + f"between distributed metric instances." + ) from None + + if self.keep_in_memory: + self.buf_writer = pa.BufferOutputStream() + self.writer = ArrowWriter( + features=self.info.features, stream=self.buf_writer, writer_batch_size=self.writer_batch_size + ) + else: + self.buf_writer = None + + # Get cache file name and lock it + if self.cache_file_name is None or self.filelock is None: + cache_file_name, filelock = self._create_cache_file() # get ready + self.cache_file_name = cache_file_name + self.filelock = filelock + + self.writer = ArrowWriter( + features=self.info.features, path=self.cache_file_name, writer_batch_size=self.writer_batch_size + ) + # Setup rendez-vous here if + if self.num_process > 1: + if self.process_id == 0: + self._check_all_processes_locks() # wait for everyone to be ready + self.rendez_vous_lock.release() # let everyone go + else: + self._check_rendez_vous() # wait for master to be ready and to let everyone go + + def _info(self) -> MetricInfo: + """Construct the MetricInfo object. See `MetricInfo` for details. + + Warning: This function is only called once and the result is cached for all + following .info() calls. + + Returns: + info: (MetricInfo) The metrics information + """ + raise NotImplementedError + + def download_and_prepare( + self, + download_config: Optional[DownloadConfig] = None, + dl_manager: Optional[DownloadManager] = None, + ): + """Downloads and prepares dataset for reading. + + Args: + download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters. + dl_manager (:class:`DownloadManager`, optional): Specific download manager to use. + """ + if dl_manager is None: + if download_config is None: + download_config = DownloadConfig() + download_config.cache_dir = os.path.join(self.data_dir, "downloads") + download_config.force_download = False + + dl_manager = DownloadManager( + dataset_name=self.name, download_config=download_config, data_dir=self.data_dir + ) + + self._download_and_prepare(dl_manager) + + def _download_and_prepare(self, dl_manager): + """Downloads and prepares resources for the metric. + + This is the internal implementation to overwrite called when user calls + `download_and_prepare`. It should download all required resources for the metric. + + Args: + dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data. + """ + return None + + def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]: + """This method defines the common API for all the metrics in the library""" + raise NotImplementedError + + def __del__(self): + if hasattr(self, "filelock") and self.filelock is not None: + self.filelock.release() + if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None: + self.rendez_vous_lock.release() + if hasattr(self, "writer"): # in case it was already deleted + del self.writer + if hasattr(self, "data"): # in case it was already deleted + del self.data diff --git a/testbed/huggingface__datasets/src/datasets/naming.py b/testbed/huggingface__datasets/src/datasets/naming.py new file mode 100644 index 0000000000000000000000000000000000000000..182673b53f594ade28b9743732d8b4b66a04dd5a --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/naming.py @@ -0,0 +1,83 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Utilities for file names.""" +import itertools +import os +import re + + +_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])") +_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])") + +_single_underscore_re = re.compile(r"(?:/\|?*" + + +def camelcase_to_snakecase(name): + """Convert camel-case string to snake-case.""" + name = _uppercase_uppercase_re.sub(r"\1_\2", name) + name = _lowercase_uppercase_re.sub(r"\1_\2", name) + return name.lower() + + +def snakecase_to_camelcase(name): + """Convert snake-case string to camel-case string.""" + name = _single_underscore_re.split(name) + name = [_multiple_underscores_re.split(n) for n in name] + return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "") + + +def filename_prefix_for_name(name): + if os.path.basename(name) != name: + raise ValueError(f"Should be a dataset name, not a path: {name}") + return camelcase_to_snakecase(name) + + +def filename_prefix_for_split(name, split): + if os.path.basename(name) != name: + raise ValueError(f"Should be a dataset name, not a path: {name}") + if not re.match(_split_re, split): + raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.") + return f"{filename_prefix_for_name(name)}-{split}" + + +def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None): + prefix = filename_prefix_for_split(dataset_name, split) + if filetype_suffix: + prefix += f".{filetype_suffix}" + filepath = os.path.join(data_dir, prefix) + return f"{filepath}*" + + +def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None): + prefix = filename_prefix_for_split(dataset_name, split) + prefix = os.path.join(path, prefix) + + if shard_lengths: + num_shards = len(shard_lengths) + filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)] + if filetype_suffix: + filenames = [filename + f".{filetype_suffix}" for filename in filenames] + return filenames + else: + filename = prefix + if filetype_suffix: + filename += f".{filetype_suffix}" + return [filename] diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/__init__.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ddd102890dd9d18b68ed8f005ea31df91242100d --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/packaged_modules/__init__.py @@ -0,0 +1,65 @@ +import inspect +import re +from typing import Dict, List + +from huggingface_hub.utils import insecure_hashlib + +from .arrow import arrow +from .audiofolder import audiofolder +from .csv import csv +from .imagefolder import imagefolder +from .json import json +from .pandas import pandas +from .parquet import parquet +from .sql import sql # noqa F401 +from .text import text + + +def _hash_python_lines(lines: List[str]) -> str: + filtered_lines = [] + for line in lines: + line = re.sub(r"#.*", "", line) # remove comments + if line: + filtered_lines.append(line) + full_str = "\n".join(filtered_lines) + + # Make a hash from all this code + full_bytes = full_str.encode("utf-8") + return insecure_hashlib.sha256(full_bytes).hexdigest() + + +# get importable module names and hash for caching +_PACKAGED_DATASETS_MODULES = { + "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), + "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), + "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), + "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), + "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), + "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), + "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), + "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), +} + +# Used to infer the module to use based on the data files extensions +_EXTENSION_TO_MODULE = { + ".csv": ("csv", {}), + ".tsv": ("csv", {"sep": "\t"}), + ".json": ("json", {}), + ".jsonl": ("json", {}), + ".parquet": ("parquet", {}), + ".arrow": ("arrow", {}), + ".txt": ("text", {}), +} +_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) +_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) +_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) +_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) +_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"} + +# Used to filter data files based on extensions given a module name +_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {} +for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): + _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) + +for _module in _MODULE_TO_EXTENSIONS: + _MODULE_TO_EXTENSIONS[_module].append(".zip") diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/arrow/__init__.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/arrow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/arrow/arrow.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/arrow/arrow.py new file mode 100644 index 0000000000000000000000000000000000000000..dad2cdeffc2a3de13598d234a4784a3b3cc07066 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/packaged_modules/arrow/arrow.py @@ -0,0 +1,73 @@ +import itertools +from dataclasses import dataclass +from typing import Optional + +import pyarrow as pa + +import datasets +from datasets.table import table_cast + + +logger = datasets.utils.logging.get_logger(__name__) + + +@dataclass +class ArrowConfig(datasets.BuilderConfig): + """BuilderConfig for Arrow.""" + + features: Optional[datasets.Features] = None + + +class Arrow(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = ArrowConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + """We handle string, list and dicts in datafiles""" + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + data_files = dl_manager.download_and_extract(self.config.data_files) + if isinstance(data_files, (str, list, tuple)): + files = data_files + if isinstance(files, str): + files = [files] + # Use `dl_manager.iter_files` to skip hidden files in an extracted archive + files = [dl_manager.iter_files(file) for file in files] + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + # Use `dl_manager.iter_files` to skip hidden files in an extracted archive + files = [dl_manager.iter_files(file) for file in files] + # Infer features is they are stoed in the arrow schema + if self.info.features is None: + for file in itertools.chain.from_iterable(files): + with open(file, "rb") as f: + self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema) + break + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.info.features is not None: + # more expensive cast to support nested features with keys in a different order + # allows str <-> int/float or str to Audio for example + pa_table = table_cast(pa_table, self.info.features.arrow_schema) + return pa_table + + def _generate_tables(self, files): + for file_idx, file in enumerate(itertools.chain.from_iterable(files)): + with open(file, "rb") as f: + try: + for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)): + pa_table = pa.Table.from_batches([record_batch]) + # Uncomment for debugging (will print the Arrow table size and elements) + # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") + # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) + yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) + except ValueError as e: + logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") + raise diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/audiofolder/__init__.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/audiofolder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/audiofolder/audiofolder.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/audiofolder/audiofolder.py new file mode 100644 index 0000000000000000000000000000000000000000..51044143039e98af0f9fd7d1ecdf1cab229e58a1 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/packaged_modules/audiofolder/audiofolder.py @@ -0,0 +1,68 @@ +from typing import List + +import datasets +from datasets.tasks import AudioClassification + +from ..folder_based_builder import folder_based_builder + + +logger = datasets.utils.logging.get_logger(__name__) + + +class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig): + """Builder Config for AudioFolder.""" + + drop_labels: bool = None + drop_metadata: bool = None + + +class AudioFolder(folder_based_builder.FolderBasedBuilder): + BASE_FEATURE = datasets.Audio + BASE_COLUMN_NAME = "audio" + BUILDER_CONFIG_CLASS = AudioFolderConfig + EXTENSIONS: List[str] # definition at the bottom of the script + CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label") + + +# Obtained with: +# ``` +# import soundfile as sf +# +# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()] +# +# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30: +# AUDIO_EXTENSIONS.extend([".mp3", ".opus"]) +# ``` +# We intentionally do not run this code on launch because: +# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed +# (2) To ensure the list of supported extensions is deterministic +AUDIO_EXTENSIONS = [ + ".aiff", + ".au", + ".avr", + ".caf", + ".flac", + ".htk", + ".svx", + ".mat4", + ".mat5", + ".mpc2k", + ".ogg", + ".paf", + ".pvf", + ".raw", + ".rf64", + ".sd2", + ".sds", + ".ircam", + ".voc", + ".w64", + ".wav", + ".nist", + ".wavex", + ".wve", + ".xi", + ".mp3", + ".opus", +] +AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/csv/__init__.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/csv/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/folder_based_builder/__init__.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/folder_based_builder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..146ef4e613b9d943b160c04b2286b2a2d331b80a --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py @@ -0,0 +1,406 @@ +import collections +import itertools +import os +from dataclasses import dataclass +from typing import List, Optional, Tuple, Type + +import pandas as pd +import pyarrow as pa +import pyarrow.json as paj + +import datasets +from datasets.features.features import FeatureType +from datasets.tasks.base import TaskTemplate + + +logger = datasets.utils.logging.get_logger(__name__) + + +def count_path_segments(path): + return path.replace("\\", "/").count("/") + + +@dataclass +class FolderBasedBuilderConfig(datasets.BuilderConfig): + """BuilderConfig for AutoFolder.""" + + features: Optional[datasets.Features] = None + drop_labels: bool = None + drop_metadata: bool = None + + +class FolderBasedBuilder(datasets.GeneratorBasedBuilder): + """ + Base class for generic data loaders for vision and image data. + + + Abstract class attributes to be overridden by a child class: + BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...) + BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...) + BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig` + EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files + will be included in a dataset) + CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure + """ + + BASE_FEATURE: Type[FeatureType] + BASE_COLUMN_NAME: str + BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig + EXTENSIONS: List[str] + CLASSIFICATION_TASK: TaskTemplate + + METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"] + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + + # Do an early pass if: + # * `drop_labels` is None (default) or False, to infer the class labels + # * `drop_metadata` is None (default) or False, to find the metadata files + do_analyze = not self.config.drop_labels or not self.config.drop_metadata + labels, path_depths = set(), set() + metadata_files = collections.defaultdict(set) + + def analyze(files_or_archives, downloaded_files_or_dirs, split): + if len(downloaded_files_or_dirs) == 0: + return + # The files are separated from the archives at this point, so check the first sample + # to see if it's a file or a directory and iterate accordingly + if os.path.isfile(downloaded_files_or_dirs[0]): + original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs + for original_file, downloaded_file in zip(original_files, downloaded_files): + original_file, downloaded_file = str(original_file), str(downloaded_file) + _, original_file_ext = os.path.splitext(original_file) + if original_file_ext.lower() in self.EXTENSIONS: + if not self.config.drop_labels: + labels.add(os.path.basename(os.path.dirname(original_file))) + path_depths.add(count_path_segments(original_file)) + elif os.path.basename(original_file) in self.METADATA_FILENAMES: + metadata_files[split].add((original_file, downloaded_file)) + else: + original_file_name = os.path.basename(original_file) + logger.debug( + f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either." + ) + else: + archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs + for archive, downloaded_dir in zip(archives, downloaded_dirs): + archive, downloaded_dir = str(archive), str(downloaded_dir) + for downloaded_dir_file in dl_manager.iter_files(downloaded_dir): + _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) + if downloaded_dir_file_ext in self.EXTENSIONS: + if not self.config.drop_labels: + labels.add(os.path.basename(os.path.dirname(downloaded_dir_file))) + path_depths.add(count_path_segments(downloaded_dir_file)) + elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES: + metadata_files[split].add((None, downloaded_dir_file)) + else: + archive_file_name = os.path.basename(archive) + original_file_name = os.path.basename(downloaded_dir_file) + logger.debug( + f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either." + ) + + data_files = self.config.data_files + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + files, archives = self._split_files_and_archives(files) + downloaded_files = dl_manager.download(files) + downloaded_dirs = dl_manager.download_and_extract(archives) + if do_analyze: # drop_metadata is None or False, drop_labels is None or False + logger.info(f"Searching for labels and/or metadata files in {split_name} data files...") + analyze(files, downloaded_files, split_name) + analyze(archives, downloaded_dirs, split_name) + + if metadata_files: + # add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False + add_metadata = not self.config.drop_metadata + # if `metadata_files` are found, add labels only if + # `drop_labels` is set up to False explicitly (not-default behavior) + add_labels = self.config.drop_labels is False + else: + # if `metadata_files` are not found, don't add metadata + add_metadata = False + # if `metadata_files` are not found and `drop_labels` is None (default) - + # add labels if files are on the same level in directory hierarchy and there is more than one label + add_labels = ( + (len(labels) > 1 and len(path_depths) == 1) + if self.config.drop_labels is None + else not self.config.drop_labels + ) + + if add_labels: + logger.info("Adding the labels inferred from data directories to the dataset's features...") + if add_metadata: + logger.info("Adding metadata to the dataset...") + else: + add_labels, add_metadata, metadata_files = False, False, {} + + splits.append( + datasets.SplitGenerator( + name=split_name, + gen_kwargs={ + "files": list(zip(files, downloaded_files)) + + [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs], + "metadata_files": metadata_files, + "split_name": split_name, + "add_labels": add_labels, + "add_metadata": add_metadata, + }, + ) + ) + + if add_metadata: + # Verify that: + # * all metadata files have the same set of features + # * the `file_name` key is one of the metadata keys and is of type string + features_per_metadata_file: List[Tuple[str, datasets.Features]] = [] + + # Check that all metadata files share the same format + metadata_ext = { + os.path.splitext(original_metadata_file)[-1] + for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values()) + } + if len(metadata_ext) > 1: + raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}") + metadata_ext = metadata_ext.pop() + + for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()): + pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext) + features_per_metadata_file.append( + (downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema)) + ) + for downloaded_metadata_file, metadata_features in features_per_metadata_file: + if metadata_features != features_per_metadata_file[0][1]: + raise ValueError( + f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}" + ) + metadata_features = features_per_metadata_file[0][1] + if "file_name" not in metadata_features: + raise ValueError("`file_name` must be present as dictionary key in metadata files") + if metadata_features["file_name"] != datasets.Value("string"): + raise ValueError("`file_name` key must be a string") + del metadata_features["file_name"] + else: + metadata_features = None + + # Normally, we would do this in _info, but we need to know the labels and/or metadata + # before building the features + if self.config.features is None: + if add_labels: + self.info.features = datasets.Features( + { + self.BASE_COLUMN_NAME: self.BASE_FEATURE(), + "label": datasets.ClassLabel(names=sorted(labels)), + } + ) + self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)] + else: + self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()}) + + if add_metadata: + # Warn if there are duplicated keys in metadata compared to the existing features + # (`BASE_COLUMN_NAME`, optionally "label") + duplicated_keys = set(self.info.features) & set(metadata_features) + if duplicated_keys: + logger.warning( + f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in " + f"the features dictionary." + ) + # skip metadata duplicated keys + self.info.features.update( + { + feature: metadata_features[feature] + for feature in metadata_features + if feature not in duplicated_keys + } + ) + + return splits + + def _split_files_and_archives(self, data_files): + files, archives = [], [] + for data_file in data_files: + _, data_file_ext = os.path.splitext(data_file) + if data_file_ext.lower() in self.EXTENSIONS: + files.append(data_file) + elif os.path.basename(data_file) in self.METADATA_FILENAMES: + files.append(data_file) + else: + archives.append(data_file) + return files, archives + + def _read_metadata(self, metadata_file, metadata_ext: str = ""): + if metadata_ext == ".csv": + # Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module + return pa.Table.from_pandas(pd.read_csv(metadata_file)) + else: + with open(metadata_file, "rb") as f: + return paj.read_json(f) + + def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels): + split_metadata_files = metadata_files.get(split_name, []) + sample_empty_metadata = ( + {k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {} + ) + last_checked_dir = None + metadata_dir = None + metadata_dict = None + downloaded_metadata_file = None + + metadata_ext = "" + if split_metadata_files: + metadata_ext = { + os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files + } + metadata_ext = metadata_ext.pop() + + file_idx = 0 + for original_file, downloaded_file_or_dir in files: + if original_file is not None: + _, original_file_ext = os.path.splitext(original_file) + if original_file_ext.lower() in self.EXTENSIONS: + if add_metadata: + # If the file is a file of a needed type, and we've just entered a new directory, + # find the nereast metadata file (by counting path segments) for the directory + current_dir = os.path.dirname(original_file) + if last_checked_dir is None or last_checked_dir != current_dir: + last_checked_dir = current_dir + metadata_file_candidates = [ + ( + os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)), + metadata_file_candidate, + downloaded_metadata_file, + ) + for metadata_file_candidate, downloaded_metadata_file in split_metadata_files + if metadata_file_candidate + is not None # ignore metadata_files that are inside archives + and not os.path.relpath( + original_file, os.path.dirname(metadata_file_candidate) + ).startswith("..") + ] + if metadata_file_candidates: + _, metadata_file, downloaded_metadata_file = min( + metadata_file_candidates, key=lambda x: count_path_segments(x[0]) + ) + pa_metadata_table = self._read_metadata( + downloaded_metadata_file, metadata_ext=metadata_ext + ) + pa_file_name_array = pa_metadata_table["file_name"] + pa_metadata_table = pa_metadata_table.drop(["file_name"]) + metadata_dir = os.path.dirname(metadata_file) + metadata_dict = { + os.path.normpath(file_name).replace("\\", "/"): sample_metadata + for file_name, sample_metadata in zip( + pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() + ) + } + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." + ) + if metadata_dir is not None and downloaded_metadata_file is not None: + file_relpath = os.path.relpath(original_file, metadata_dir) + file_relpath = file_relpath.replace("\\", "/") + if file_relpath not in metadata_dict: + raise ValueError( + f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}." + ) + sample_metadata = metadata_dict[file_relpath] + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." + ) + else: + sample_metadata = {} + if add_labels: + sample_label = {"label": os.path.basename(os.path.dirname(original_file))} + else: + sample_label = {} + yield ( + file_idx, + { + **sample_empty_metadata, + self.BASE_COLUMN_NAME: downloaded_file_or_dir, + **sample_metadata, + **sample_label, + }, + ) + file_idx += 1 + else: + for downloaded_dir_file in downloaded_file_or_dir: + _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) + if downloaded_dir_file_ext.lower() in self.EXTENSIONS: + if add_metadata: + current_dir = os.path.dirname(downloaded_dir_file) + if last_checked_dir is None or last_checked_dir != current_dir: + last_checked_dir = current_dir + metadata_file_candidates = [ + ( + os.path.relpath( + downloaded_dir_file, os.path.dirname(downloaded_metadata_file) + ), + metadata_file_candidate, + downloaded_metadata_file, + ) + for metadata_file_candidate, downloaded_metadata_file in split_metadata_files + if metadata_file_candidate + is None # ignore metadata_files that are not inside archives + and not os.path.relpath( + downloaded_dir_file, os.path.dirname(downloaded_metadata_file) + ).startswith("..") + ] + if metadata_file_candidates: + _, metadata_file, downloaded_metadata_file = min( + metadata_file_candidates, key=lambda x: count_path_segments(x[0]) + ) + pa_metadata_table = self._read_metadata( + downloaded_metadata_file, metadata_ext=metadata_ext + ) + pa_file_name_array = pa_metadata_table["file_name"] + pa_metadata_table = pa_metadata_table.drop(["file_name"]) + metadata_dir = os.path.dirname(downloaded_metadata_file) + metadata_dict = { + os.path.normpath(file_name).replace("\\", "/"): sample_metadata + for file_name, sample_metadata in zip( + pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() + ) + } + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." + ) + if metadata_dir is not None and downloaded_metadata_file is not None: + downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir) + downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/") + if downloaded_dir_file_relpath not in metadata_dict: + raise ValueError( + f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}." + ) + sample_metadata = metadata_dict[downloaded_dir_file_relpath] + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." + ) + else: + sample_metadata = {} + if add_labels: + sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))} + else: + sample_label = {} + yield ( + file_idx, + { + **sample_empty_metadata, + self.BASE_COLUMN_NAME: downloaded_dir_file, + **sample_metadata, + **sample_label, + }, + ) + file_idx += 1 diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/generator/__init__.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/generator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/generator/generator.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/generator/generator.py new file mode 100644 index 0000000000000000000000000000000000000000..1efa721b159668a72d29f5afa38c36bcaff084ea --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/packaged_modules/generator/generator.py @@ -0,0 +1,31 @@ +from dataclasses import dataclass +from typing import Callable, Optional + +import datasets + + +@dataclass +class GeneratorConfig(datasets.BuilderConfig): + generator: Optional[Callable] = None + gen_kwargs: Optional[dict] = None + features: Optional[datasets.Features] = None + + def __post_init__(self): + assert self.generator is not None, "generator must be specified" + + if self.gen_kwargs is None: + self.gen_kwargs = {} + + +class Generator(datasets.GeneratorBasedBuilder): + BUILDER_CONFIG_CLASS = GeneratorConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=self.config.gen_kwargs)] + + def _generate_examples(self, **gen_kwargs): + for idx, ex in enumerate(self.config.generator(**gen_kwargs)): + yield idx, ex diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/imagefolder/__init__.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/imagefolder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py new file mode 100644 index 0000000000000000000000000000000000000000..bd2dd0d419a626dbb5149cb56abf69c82d35deb4 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py @@ -0,0 +1,104 @@ +from typing import List + +import datasets +from datasets.tasks import ImageClassification + +from ..folder_based_builder import folder_based_builder + + +logger = datasets.utils.logging.get_logger(__name__) + + +class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig): + """BuilderConfig for ImageFolder.""" + + drop_labels: bool = None + drop_metadata: bool = None + + +class ImageFolder(folder_based_builder.FolderBasedBuilder): + BASE_FEATURE = datasets.Image + BASE_COLUMN_NAME = "image" + BUILDER_CONFIG_CLASS = ImageFolderConfig + EXTENSIONS: List[str] # definition at the bottom of the script + CLASSIFICATION_TASK = ImageClassification(image_column="image", label_column="label") + + +# Obtained with: +# ``` +# import PIL.Image +# IMAGE_EXTENSIONS = [] +# PIL.Image.init() +# for ext, format in PIL.Image.EXTENSION.items(): +# if format in PIL.Image.OPEN: +# IMAGE_EXTENSIONS.append(ext[1:]) +# ``` +# We intentionally do not run this code on launch because: +# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed +# (2) To ensure the list of supported extensions is deterministic +IMAGE_EXTENSIONS = [ + ".blp", + ".bmp", + ".dib", + ".bufr", + ".cur", + ".pcx", + ".dcx", + ".dds", + ".ps", + ".eps", + ".fit", + ".fits", + ".fli", + ".flc", + ".ftc", + ".ftu", + ".gbr", + ".gif", + ".grib", + ".h5", + ".hdf", + ".png", + ".apng", + ".jp2", + ".j2k", + ".jpc", + ".jpf", + ".jpx", + ".j2c", + ".icns", + ".ico", + ".im", + ".iim", + ".tif", + ".tiff", + ".jfif", + ".jpe", + ".jpg", + ".jpeg", + ".mpg", + ".mpeg", + ".msp", + ".pcd", + ".pxr", + ".pbm", + ".pgm", + ".ppm", + ".pnm", + ".psd", + ".bw", + ".rgb", + ".rgba", + ".sgi", + ".ras", + ".tga", + ".icb", + ".vda", + ".vst", + ".webp", + ".wmf", + ".emf", + ".xbm", + ".xpm", +] +ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/json/__init__.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/json/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/pandas/pandas.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/pandas/pandas.py new file mode 100644 index 0000000000000000000000000000000000000000..6ad9a6f49931bcd0cc2e395becb4017d3f4a18a7 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/packaged_modules/pandas/pandas.py @@ -0,0 +1,57 @@ +import itertools +from dataclasses import dataclass +from typing import Optional + +import pandas as pd +import pyarrow as pa + +import datasets +from datasets.table import table_cast + + +@dataclass +class PandasConfig(datasets.BuilderConfig): + """BuilderConfig for Pandas.""" + + features: Optional[datasets.Features] = None + + +class Pandas(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = PandasConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + """We handle string, list and dicts in datafiles""" + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + data_files = dl_manager.download_and_extract(self.config.data_files) + if isinstance(data_files, (str, list, tuple)): + files = data_files + if isinstance(files, str): + files = [files] + # Use `dl_manager.iter_files` to skip hidden files in an extracted archive + files = [dl_manager.iter_files(file) for file in files] + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + # Use `dl_manager.iter_files` to skip hidden files in an extracted archive + files = [dl_manager.iter_files(file) for file in files] + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + # more expensive cast to support nested features with keys in a different order + # allows str <-> int/float or str to Audio for example + pa_table = table_cast(pa_table, self.config.features.arrow_schema) + return pa_table + + def _generate_tables(self, files): + for i, file in enumerate(itertools.chain.from_iterable(files)): + with open(file, "rb") as f: + pa_table = pa.Table.from_pandas(pd.read_pickle(f)) + yield i, self._cast_table(pa_table) diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/parquet/parquet.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/parquet/parquet.py new file mode 100644 index 0000000000000000000000000000000000000000..73f814a99a50ee60bdbfd976cc160e0224655d7f --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/packaged_modules/parquet/parquet.py @@ -0,0 +1,97 @@ +import itertools +from dataclasses import dataclass +from typing import List, Optional + +import pyarrow as pa +import pyarrow.parquet as pq + +import datasets +from datasets.table import table_cast + + +logger = datasets.utils.logging.get_logger(__name__) + + +@dataclass +class ParquetConfig(datasets.BuilderConfig): + """BuilderConfig for Parquet.""" + + batch_size: int = 10_000 + columns: Optional[List[str]] = None + features: Optional[datasets.Features] = None + + +class Parquet(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = ParquetConfig + + def _info(self): + if ( + self.config.columns is not None + and self.config.features is not None + and set(self.config.columns) != set(self.config.features) + ): + raise ValueError( + "The columns and features argument must contain the same columns, but got ", + f"{self.config.columns} and {self.config.features}", + ) + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + """We handle string, list and dicts in datafiles""" + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + data_files = dl_manager.download_and_extract(self.config.data_files) + if isinstance(data_files, (str, list, tuple)): + files = data_files + if isinstance(files, str): + files = [files] + # Use `dl_manager.iter_files` to skip hidden files in an extracted archive + files = [dl_manager.iter_files(file) for file in files] + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + # Use `dl_manager.iter_files` to skip hidden files in an extracted archive + files = [dl_manager.iter_files(file) for file in files] + # Infer features if they are stored in the arrow schema + if self.info.features is None: + for file in itertools.chain.from_iterable(files): + with open(file, "rb") as f: + self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f)) + break + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) + if self.config.columns is not None and set(self.config.columns) != set(self.info.features): + self.info.features = datasets.Features( + {col: feat for col, feat in self.info.features.items() if col in self.config.columns} + ) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.info.features is not None: + # more expensive cast to support nested features with keys in a different order + # allows str <-> int/float or str to Audio for example + pa_table = table_cast(pa_table, self.info.features.arrow_schema) + return pa_table + + def _generate_tables(self, files): + if self.config.features is not None and self.config.columns is not None: + if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns): + raise ValueError( + f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" + ) + for file_idx, file in enumerate(itertools.chain.from_iterable(files)): + with open(file, "rb") as f: + parquet_file = pq.ParquetFile(f) + try: + for batch_idx, record_batch in enumerate( + parquet_file.iter_batches(batch_size=self.config.batch_size, columns=self.config.columns) + ): + pa_table = pa.Table.from_batches([record_batch]) + # Uncomment for debugging (will print the Arrow table size and elements) + # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") + # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) + yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) + except ValueError as e: + logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") + raise diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/spark/__init__.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/spark/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/spark/spark.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/spark/spark.py new file mode 100644 index 0000000000000000000000000000000000000000..fee5f7c4c6123985beb2026ba4a01f80d7625205 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/packaged_modules/spark/spark.py @@ -0,0 +1,349 @@ +import os +import posixpath +import uuid +from dataclasses import dataclass +from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union + +import numpy as np +import pyarrow as pa + +import datasets +from datasets.arrow_writer import ArrowWriter, ParquetWriter +from datasets.config import MAX_SHARD_SIZE +from datasets.filesystems import ( + is_remote_filesystem, + rename, +) +from datasets.iterable_dataset import _BaseExamplesIterable +from datasets.utils.py_utils import convert_file_size_to_int + + +logger = datasets.utils.logging.get_logger(__name__) + +if TYPE_CHECKING: + import pyspark + + +@dataclass +class SparkConfig(datasets.BuilderConfig): + """BuilderConfig for Spark.""" + + features: Optional[datasets.Features] = None + + +def _reorder_dataframe_by_partition(df: "pyspark.sql.DataFrame", new_partition_order: List[int]): + df_combined = df.select("*").where(f"part_id = {new_partition_order[0]}") + for partition_id in new_partition_order[1:]: + partition_df = df.select("*").where(f"part_id = {partition_id}") + df_combined = df_combined.union(partition_df) + return df_combined + + +def _generate_iterable_examples( + df: "pyspark.sql.DataFrame", + partition_order: List[int], +): + import pyspark + + def generate_fn(): + df_with_partition_id = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id")) + partition_df = _reorder_dataframe_by_partition(df_with_partition_id, partition_order) + row_id = 0 + # pipeline next partition in parallel to hide latency + rows = partition_df.toLocalIterator(prefetchPartitions=True) + curr_partition = -1 + for row in rows: + row_as_dict = row.asDict() + part_id = row_as_dict["part_id"] + row_as_dict.pop("part_id") + if curr_partition != part_id: + curr_partition = part_id + row_id = 0 + yield f"{part_id}_{row_id}", row_as_dict + row_id += 1 + + return generate_fn + + +class SparkExamplesIterable(_BaseExamplesIterable): + def __init__( + self, + df: "pyspark.sql.DataFrame", + partition_order=None, + ): + self.df = df + self.partition_order = partition_order or range(self.df.rdd.getNumPartitions()) + self.generate_examples_fn = _generate_iterable_examples(self.df, self.partition_order) + + def __iter__(self): + yield from self.generate_examples_fn() + + def shuffle_data_sources(self, generator: np.random.Generator) -> "SparkExamplesIterable": + partition_order = list(range(self.df.rdd.getNumPartitions())) + generator.shuffle(partition_order) + return SparkExamplesIterable(self.df, partition_order=partition_order) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> "SparkExamplesIterable": + partition_order = self.split_shard_indices_by_worker(worker_id, num_workers) + return SparkExamplesIterable(self.df, partition_order=partition_order) + + @property + def n_shards(self) -> int: + return len(self.partition_order) + + +class Spark(datasets.DatasetBuilder): + BUILDER_CONFIG_CLASS = SparkConfig + + def __init__( + self, + df: "pyspark.sql.DataFrame", + cache_dir: str = None, + working_dir: str = None, + **config_kwargs, + ): + import pyspark + + self._spark = pyspark.sql.SparkSession.builder.getOrCreate() + self.df = df + self._working_dir = working_dir + + super().__init__( + cache_dir=cache_dir, + config_name=str(self.df.semanticHash()), + **config_kwargs, + ) + + def _validate_cache_dir(self): + # Define this so that we don't reference self in create_cache_and_write_probe, which will result in a pickling + # error due to pickling the SparkContext. + cache_dir = self._cache_dir + + # Returns the path of the created file. + def create_cache_and_write_probe(context): + # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories + # already exist. + os.makedirs(cache_dir, exist_ok=True) + probe_file = os.path.join(cache_dir, "fs_test" + uuid.uuid4().hex) + # Opening the file in append mode will create a new file unless it already exists, in which case it will not + # change the file contents. + open(probe_file, "a") + return [probe_file] + + if self._spark.conf.get("spark.master", "").startswith("local"): + return + + # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS + # accessible to the driver. + # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. + if self._cache_dir: + probe = ( + self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect() + ) + if os.path.isfile(probe[0]): + return + + raise ValueError( + "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" + ) + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager): + return [datasets.SplitGenerator(name=datasets.Split.TRAIN)] + + def _repartition_df_if_needed(self, max_shard_size): + import pyspark + + def get_arrow_batch_size(it): + for batch in it: + yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]}) + + df_num_rows = self.df.count() + sample_num_rows = df_num_rows if df_num_rows <= 100 else 100 + # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. + approx_bytes_per_row = ( + self.df.limit(sample_num_rows) + .repartition(1) + .mapInArrow(get_arrow_batch_size, "batch_bytes: long") + .agg(pyspark.sql.functions.sum("batch_bytes").alias("sample_bytes")) + .collect()[0] + .sample_bytes + / sample_num_rows + ) + approx_total_size = approx_bytes_per_row * df_num_rows + if approx_total_size > max_shard_size: + # Make sure there is at least one row per partition. + new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size)) + self.df = self.df.repartition(new_num_partitions) + + def _prepare_split_single( + self, + fpath: str, + file_format: str, + max_shard_size: int, + ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: + import pyspark + + writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter + working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath + embed_local_files = file_format == "parquet" + + # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to + # pickling the SparkContext. + features = self.config.features + writer_batch_size = self._writer_batch_size + storage_options = self._fs.storage_options + + def write_arrow(it): + # Within the same SparkContext, no two task attempts will share the same attempt ID. + task_id = pyspark.TaskContext().taskAttemptId() + first_batch = next(it, None) + if first_batch is None: + # Some partitions might not receive any data. + return pa.RecordBatch.from_arrays( + [[task_id], [0], [0]], + names=["task_id", "num_examples", "num_bytes"], + ) + shard_id = 0 + writer = writer_class( + features=features, + path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), + writer_batch_size=writer_batch_size, + storage_options=storage_options, + embed_local_files=embed_local_files, + ) + table = pa.Table.from_batches([first_batch]) + writer.write_table(table) + for batch in it: + if max_shard_size is not None and writer._num_bytes >= max_shard_size: + num_examples, num_bytes = writer.finalize() + writer.close() + yield pa.RecordBatch.from_arrays( + [[task_id], [num_examples], [num_bytes]], + names=["task_id", "num_examples", "num_bytes"], + ) + shard_id += 1 + writer = writer_class( + features=writer._features, + path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), + writer_batch_size=writer_batch_size, + storage_options=storage_options, + embed_local_files=embed_local_files, + ) + table = pa.Table.from_batches([batch]) + writer.write_table(table) + + if writer._num_bytes > 0: + num_examples, num_bytes = writer.finalize() + writer.close() + yield pa.RecordBatch.from_arrays( + [[task_id], [num_examples], [num_bytes]], + names=["task_id", "num_examples", "num_bytes"], + ) + + if working_fpath != fpath: + for file in os.listdir(os.path.dirname(working_fpath)): + dest = os.path.join(os.path.dirname(fpath), os.path.basename(file)) + shutil.move(file, dest) + + stats = ( + self.df.mapInArrow(write_arrow, "task_id: long, num_examples: long, num_bytes: long") + .groupBy("task_id") + .agg( + pyspark.sql.functions.sum("num_examples").alias("total_num_examples"), + pyspark.sql.functions.sum("num_bytes").alias("total_num_bytes"), + pyspark.sql.functions.count("num_bytes").alias("num_shards"), + pyspark.sql.functions.collect_list("num_examples").alias("shard_lengths"), + ) + .collect() + ) + for row in stats: + yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) + + def _prepare_split( + self, + split_generator: "datasets.SplitGenerator", + file_format: str = "arrow", + max_shard_size: Optional[Union[str, int]] = None, + num_proc: Optional[int] = None, + **kwargs, + ): + self._validate_cache_dir() + + max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE) + self._repartition_df_if_needed(max_shard_size) + is_local = not is_remote_filesystem(self._fs) + path_join = os.path.join if is_local else posixpath.join + + SUFFIX = "-TTTTT-SSSSS-of-NNNNN" + fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}" + fpath = path_join(self._output_dir, fname) + + total_num_examples = 0 + total_num_bytes = 0 + total_shards = 0 + task_id_and_num_shards = [] + all_shard_lengths = [] + + for task_id, content in self._prepare_split_single(fpath, file_format, max_shard_size): + ( + num_examples, + num_bytes, + num_shards, + shard_lengths, + ) = content + if num_bytes > 0: + total_num_examples += num_examples + total_num_bytes += num_bytes + total_shards += num_shards + task_id_and_num_shards.append((task_id, num_shards)) + all_shard_lengths.extend(shard_lengths) + + split_generator.split_info.num_examples = total_num_examples + split_generator.split_info.num_bytes = total_num_bytes + + # should rename everything at the end + logger.debug(f"Renaming {total_shards} shards.") + if total_shards > 1: + split_generator.split_info.shard_lengths = all_shard_lengths + + # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a + # pickling error due to pickling the SparkContext. + fs = self._fs + + # use the -SSSSS-of-NNNNN pattern + def _rename_shard( + task_id: int, + shard_id: int, + global_shard_id: int, + ): + rename( + fs, + fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), + fpath.replace("TTTTT-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"), + ) + + args = [] + global_shard_id = 0 + for i in range(len(task_id_and_num_shards)): + task_id, num_shards = task_id_and_num_shards[i] + for shard_id in range(num_shards): + args.append([task_id, shard_id, global_shard_id]) + global_shard_id += 1 + self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect() + else: + # don't use any pattern + shard_id = 0 + task_id = task_id_and_num_shards[0][0] + self._rename( + fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), + fpath.replace(SUFFIX, ""), + ) + + def _get_examples_iterable_for_split( + self, + split_generator: "datasets.SplitGenerator", + ) -> SparkExamplesIterable: + return SparkExamplesIterable(self.df) diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/sql/__init__.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/sql/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/sql/sql.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/sql/sql.py new file mode 100644 index 0000000000000000000000000000000000000000..b0791ba88594fb8e76c957a11cca9936cf321bb4 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/packaged_modules/sql/sql.py @@ -0,0 +1,118 @@ +import sys +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union + +import pandas as pd +import pyarrow as pa + +import datasets +import datasets.config +from datasets.features.features import require_storage_cast +from datasets.table import table_cast + + +if TYPE_CHECKING: + import sqlite3 + + import sqlalchemy + + +logger = datasets.utils.logging.get_logger(__name__) + + +@dataclass +class SqlConfig(datasets.BuilderConfig): + """BuilderConfig for SQL.""" + + sql: Union[str, "sqlalchemy.sql.Selectable"] = None + con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None + index_col: Optional[Union[str, List[str]]] = None + coerce_float: bool = True + params: Optional[Union[List, Tuple, Dict]] = None + parse_dates: Optional[Union[List, Dict]] = None + columns: Optional[List[str]] = None + chunksize: Optional[int] = 10_000 + features: Optional[datasets.Features] = None + + def __post_init__(self): + if self.sql is None: + raise ValueError("sql must be specified") + if self.con is None: + raise ValueError("con must be specified") + + def create_config_id( + self, + config_kwargs: dict, + custom_features: Optional[datasets.Features] = None, + ) -> str: + config_kwargs = config_kwargs.copy() + # We need to stringify the Selectable object to make its hash deterministic + + # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html + sql = config_kwargs["sql"] + if not isinstance(sql, str): + if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules: + import sqlalchemy + + if isinstance(sql, sqlalchemy.sql.Selectable): + engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://") + sql_str = str(sql.compile(dialect=engine.dialect)) + config_kwargs["sql"] = sql_str + else: + raise TypeError( + f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" + ) + else: + raise TypeError( + f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" + ) + con = config_kwargs["con"] + if not isinstance(con, str): + config_kwargs["con"] = id(con) + logger.info( + f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead." + ) + + return super().create_config_id(config_kwargs, custom_features=custom_features) + + @property + def pd_read_sql_kwargs(self): + pd_read_sql_kwargs = { + "index_col": self.index_col, + "columns": self.columns, + "params": self.params, + "coerce_float": self.coerce_float, + "parse_dates": self.parse_dates, + } + return pd_read_sql_kwargs + + +class Sql(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = SqlConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})] + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + schema = self.config.features.arrow_schema + if all(not require_storage_cast(feature) for feature in self.config.features.values()): + # cheaper cast + pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) + else: + # more expensive cast; allows str <-> int/float or str to Audio for example + pa_table = table_cast(pa_table, schema) + return pa_table + + def _generate_tables(self): + chunksize = self.config.chunksize + sql_reader = pd.read_sql( + self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs + ) + sql_reader = [sql_reader] if chunksize is None else sql_reader + for chunk_idx, df in enumerate(sql_reader): + pa_table = pa.Table.from_pandas(df) + yield chunk_idx, self._cast_table(pa_table) diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/text/__init__.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/text/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/src/datasets/packaged_modules/text/text.py b/testbed/huggingface__datasets/src/datasets/packaged_modules/text/text.py new file mode 100644 index 0000000000000000000000000000000000000000..0f88475203cd018ab3b36eb3407e11bde1d26673 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/packaged_modules/text/text.py @@ -0,0 +1,128 @@ +import itertools +import warnings +from dataclasses import InitVar, dataclass +from io import StringIO +from typing import Optional + +import pyarrow as pa + +import datasets +from datasets.features.features import require_storage_cast +from datasets.table import table_cast + + +logger = datasets.utils.logging.get_logger(__name__) + + +@dataclass +class TextConfig(datasets.BuilderConfig): + """BuilderConfig for text files.""" + + features: Optional[datasets.Features] = None + encoding: str = "utf-8" + errors: InitVar[Optional[str]] = "deprecated" + encoding_errors: Optional[str] = None + chunksize: int = 10 << 20 # 10MB + keep_linebreaks: bool = False + sample_by: str = "line" + + def __post_init__(self, errors): + if errors != "deprecated": + warnings.warn( + "'errors' was deprecated in favor of 'encoding_errors' in version 2.14.0 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'encoding_errors={errors}' instead.", + FutureWarning, + ) + self.encoding_errors = errors + + +class Text(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = TextConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + """The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]]. + + If str or List[str], then the dataset returns only the 'train' split. + If dict, then keys should be from the `datasets.Split` enum. + """ + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + data_files = dl_manager.download_and_extract(self.config.data_files) + if isinstance(data_files, (str, list, tuple)): + files = data_files + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + schema = self.config.features.arrow_schema + if all(not require_storage_cast(feature) for feature in self.config.features.values()): + # cheaper cast + pa_table = pa_table.cast(schema) + else: + # more expensive cast; allows str <-> int/float or str to Audio for example + pa_table = table_cast(pa_table, schema) + return pa_table + else: + return pa_table.cast(pa.schema({"text": pa.string()})) + + def _generate_tables(self, files): + pa_table_names = list(self.config.features) if self.config.features is not None else ["text"] + for file_idx, file in enumerate(itertools.chain.from_iterable(files)): + # open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n" + with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: + if self.config.sample_by == "line": + batch_idx = 0 + while True: + batch = f.read(self.config.chunksize) + if not batch: + break + batch += f.readline() # finish current line + # StringIO.readlines, by default splits only on "\n" (and keeps line breaks) + batch = StringIO(batch).readlines() + if not self.config.keep_linebreaks: + batch = [line.rstrip("\n") for line in batch] + pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names) + # Uncomment for debugging (will print the Arrow table size and elements) + # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") + # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) + yield (file_idx, batch_idx), self._cast_table(pa_table) + batch_idx += 1 + elif self.config.sample_by == "paragraph": + batch_idx = 0 + batch = "" + while True: + new_batch = f.read(self.config.chunksize) + if not new_batch: + break + batch += new_batch + batch += f.readline() # finish current line + batch = batch.split("\n\n") + pa_table = pa.Table.from_arrays( + [pa.array([example for example in batch[:-1] if example])], names=pa_table_names + ) + # Uncomment for debugging (will print the Arrow table size and elements) + # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") + # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) + yield (file_idx, batch_idx), self._cast_table(pa_table) + batch_idx += 1 + batch = batch[-1] + if batch: + pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names) + yield (file_idx, batch_idx), self._cast_table(pa_table) + elif self.config.sample_by == "document": + text = f.read() + pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names) + yield file_idx, self._cast_table(pa_table) diff --git a/testbed/huggingface__datasets/src/datasets/parallel/__init__.py b/testbed/huggingface__datasets/src/datasets/parallel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d83093588514bec18b3536f4287a699939af499e --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/parallel/__init__.py @@ -0,0 +1 @@ +from .parallel import parallel_backend, parallel_map, ParallelBackendConfig # noqa F401 diff --git a/testbed/huggingface__datasets/src/datasets/parallel/parallel.py b/testbed/huggingface__datasets/src/datasets/parallel/parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..4e1a8546c586b94094f915e64268c58155c99fba --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/parallel/parallel.py @@ -0,0 +1,113 @@ +import contextlib +from multiprocessing import Pool, RLock + +from tqdm.auto import tqdm + +from ..utils import experimental, logging + + +logger = logging.get_logger(__name__) + + +class ParallelBackendConfig: + backend_name = None + + +@experimental +def parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): + """ + **Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either + multiprocessing.Pool or joblib for parallelization. + + Args: + function (`Callable[[Any], Any]`): Function to be applied to `iterable`. + iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to. + num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib). + types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements. + disable_tqdm (`bool`): Whether to disable the tqdm progressbar. + desc (`str`): Prefix for the tqdm progressbar. + single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`. + Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an + element of `iterable`, and `rank` is used for progress bar. + """ + if ParallelBackendConfig.backend_name is None: + return _map_with_multiprocessing_pool( + function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func + ) + + return _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func) + + +def _map_with_multiprocessing_pool(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): + num_proc = num_proc if num_proc <= len(iterable) else len(iterable) + split_kwds = [] # We organize the splits ourselve (contiguous splits) + for index in range(num_proc): + div = len(iterable) // num_proc + mod = len(iterable) % num_proc + start = div * index + min(index, mod) + end = start + div + (1 if index < mod else 0) + split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc)) + + if len(iterable) != sum(len(i[1]) for i in split_kwds): + raise ValueError( + f"Error dividing inputs iterable among processes. " + f"Total number of objects {len(iterable)}, " + f"length: {sum(len(i[1]) for i in split_kwds)}" + ) + + logger.info( + f"Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}" + ) + initargs, initializer = None, None + if not disable_tqdm: + initargs, initializer = (RLock(),), tqdm.set_lock + with Pool(num_proc, initargs=initargs, initializer=initializer) as pool: + mapped = pool.map(single_map_nested_func, split_kwds) + logger.info(f"Finished {num_proc} processes") + mapped = [obj for proc_res in mapped for obj in proc_res] + logger.info(f"Unpacked {len(mapped)} objects") + + return mapped + + +def _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func): + # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib, + # and it requires monkey-patching joblib internal classes which is subject to change + import joblib + + with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc): + return joblib.Parallel()( + joblib.delayed(single_map_nested_func)((function, obj, types, None, True, None)) for obj in iterable + ) + + +@experimental +@contextlib.contextmanager +def parallel_backend(backend_name: str): + """ + **Experimental.** Configures the parallel backend for parallelized dataset loading, which uses the parallelization + implemented by joblib. + + Args: + backend_name (str): Name of backend for parallelization implementation, has to be supported by joblib. + + Example usage: + ```py + with parallel_backend('spark'): + dataset = load_dataset(..., num_proc=2) + ``` + """ + ParallelBackendConfig.backend_name = backend_name + + if backend_name == "spark": + from joblibspark import register_spark + + register_spark() + + # TODO: call create_cache_and_write_probe if "download" in steps + # TODO: raise NotImplementedError when Dataset.map etc is called + + try: + yield + finally: + ParallelBackendConfig.backend_name = None diff --git a/testbed/huggingface__datasets/src/datasets/search.py b/testbed/huggingface__datasets/src/datasets/search.py new file mode 100644 index 0000000000000000000000000000000000000000..5ec41bbc3e00c34d6d10e75ea05264caabc3256e --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/search.py @@ -0,0 +1,779 @@ +import importlib.util +import os +import tempfile +from pathlib import PurePath +from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Union + +import fsspec +import numpy as np + +from .utils import logging +from .utils import tqdm as hf_tqdm + + +if TYPE_CHECKING: + from .arrow_dataset import Dataset # noqa: F401 + + try: + from elasticsearch import Elasticsearch # noqa: F401 + + except ImportError: + pass + try: + import faiss # noqa: F401 + + except ImportError: + pass + +_has_elasticsearch = importlib.util.find_spec("elasticsearch") is not None +_has_faiss = importlib.util.find_spec("faiss") is not None + + +logger = logging.get_logger(__name__) + + +class MissingIndex(Exception): + pass + + +class SearchResults(NamedTuple): + scores: List[float] + indices: List[int] + + +class BatchedSearchResults(NamedTuple): + total_scores: List[List[float]] + total_indices: List[List[int]] + + +class NearestExamplesResults(NamedTuple): + scores: List[float] + examples: dict + + +class BatchedNearestExamplesResults(NamedTuple): + total_scores: List[List[float]] + total_examples: List[dict] + + +class BaseIndex: + """Base class for indexing""" + + def search(self, query, k: int = 10, **kwargs) -> SearchResults: + """ + To implement. + This method has to return the scores and the indices of the retrieved examples given a certain query. + """ + raise NotImplementedError + + def search_batch(self, queries, k: int = 10, **kwargs) -> BatchedSearchResults: + """Find the nearest examples indices to the query. + + Args: + queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `column` is a text index or as a numpy array if `column` is a vector index. + k (`int`): The number of examples to retrieve per query. + + Ouput: + total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query. + total_indices (`List[List[int]]`): The indices of the retrieved examples per query. + """ + total_scores, total_indices = [], [] + for query in queries: + scores, indices = self.search(query, k) + total_scores.append(scores) + total_indices.append(indices) + return BatchedSearchResults(total_scores, total_indices) + + def save(self, file: Union[str, PurePath]): + """Serialize the index on disk""" + raise NotImplementedError + + @classmethod + def load(cls, file: Union[str, PurePath]) -> "BaseIndex": + """Deserialize the index from disk""" + raise NotImplementedError + + +class ElasticSearchIndex(BaseIndex): + """ + Sparse index using Elasticsearch. It is used to index text and run queries based on BM25 similarity. + An Elasticsearch server needs to be accessible, and a python client is declared with + ``` + es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}]) + ``` + for example. + """ + + def __init__( + self, + host: Optional[str] = None, + port: Optional[int] = None, + es_client: Optional["Elasticsearch"] = None, + es_index_name: Optional[str] = None, + es_index_config: Optional[dict] = None, + ): + if not _has_elasticsearch: + raise ImportError( + "You must install ElasticSearch to use ElasticSearchIndex. To do so you can run `pip install elasticsearch==7.7.1 for example`" + ) + if es_client is not None and (host is not None or port is not None): + raise ValueError("Please specify either `es_client` or `(host, port)`, but not both.") + host = host or "localhost" + port = port or 9200 + + import elasticsearch.helpers # noqa: F401 - need this to properly load all the es features + from elasticsearch import Elasticsearch # noqa: F811 + + self.es_client = es_client if es_client is not None else Elasticsearch([{"host": host, "port": str(port)}]) + self.es_index_name = ( + es_index_name + if es_index_name is not None + else "huggingface_datasets_" + os.path.basename(tempfile.NamedTemporaryFile().name) + ) + self.es_index_config = ( + es_index_config + if es_index_config is not None + else { + "settings": { + "number_of_shards": 1, + "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, + }, + "mappings": {"properties": {"text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}}}, + } + ) + + def add_documents(self, documents: Union[List[str], "Dataset"], column: Optional[str] = None): + """ + Add documents to the index. + If the documents are inside a certain column, you can specify it using the `column` argument. + """ + index_name = self.es_index_name + index_config = self.es_index_config + self.es_client.indices.create(index=index_name, body=index_config) + number_of_docs = len(documents) + progress = hf_tqdm(unit="docs", total=number_of_docs) + successes = 0 + + def passage_generator(): + if column is not None: + for i, example in enumerate(documents): + yield {"text": example[column], "_id": i} + else: + for i, example in enumerate(documents): + yield {"text": example, "_id": i} + + # create the ES index + import elasticsearch as es + + for ok, action in es.helpers.streaming_bulk( + client=self.es_client, + index=index_name, + actions=passage_generator(), + ): + progress.update(1) + successes += ok + if successes != len(documents): + logger.warning( + f"Some documents failed to be added to ElasticSearch. Failures: {len(documents)-successes}/{len(documents)}" + ) + logger.info(f"Indexed {successes:d} documents") + + def search(self, query: str, k=10, **kwargs) -> SearchResults: + """Find the nearest examples indices to the query. + + Args: + query (`str`): The query as a string. + k (`int`): The number of examples to retrieve. + + Ouput: + scores (`List[List[float]`): The retrieval scores of the retrieved examples. + indices (`List[List[int]]`): The indices of the retrieved examples. + """ + response = self.es_client.search( + index=self.es_index_name, + body={"query": {"multi_match": {"query": query, "fields": ["text"], "type": "cross_fields"}}, "size": k}, + **kwargs, + ) + hits = response["hits"]["hits"] + return SearchResults([hit["_score"] for hit in hits], [int(hit["_id"]) for hit in hits]) + + def search_batch(self, queries, k: int = 10, max_workers=10, **kwargs) -> BatchedSearchResults: + import concurrent.futures + + total_scores, total_indices = [None] * len(queries), [None] * len(queries) + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_index = {executor.submit(self.search, query, k, **kwargs): i for i, query in enumerate(queries)} + for future in concurrent.futures.as_completed(future_to_index): + index = future_to_index[future] + results: SearchResults = future.result() + total_scores[index] = results.scores + total_indices[index] = results.indices + return BatchedSearchResults(total_indices=total_indices, total_scores=total_scores) + + +class FaissIndex(BaseIndex): + """ + Dense index using Faiss. It is used to index vectors. + Faiss is a library for efficient similarity search and clustering of dense vectors. + It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM. + You can find more information about Faiss here: + - For index types and the string factory: https://github.com/facebookresearch/faiss/wiki/The-index-factory + - For GPU settings: https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU + """ + + def __init__( + self, + device: Optional[Union[int, List[int]]] = None, + string_factory: Optional[str] = None, + metric_type: Optional[int] = None, + custom_index: Optional["faiss.Index"] = None, + ): + """ + Create a Dense index using Faiss. You can specify `device` if you want to run it on GPU (`device` must be the GPU index). + You can find more information about Faiss here: + - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory + """ + if string_factory is not None and custom_index is not None: + raise ValueError("Please specify either `string_factory` or `custom_index` but not both.") + if device is not None and custom_index is not None: + raise ValueError( + "Cannot pass both 'custom_index' and 'device'. " + "Pass 'custom_index' already transferred to the target device instead." + ) + self.device = device + self.string_factory = string_factory + self.metric_type = metric_type + self.faiss_index = custom_index + if not _has_faiss: + raise ImportError( + "You must install Faiss to use FaissIndex. To do so you can run `conda install -c pytorch faiss-cpu` or `conda install -c pytorch faiss-gpu`. " + "A community supported package is also available on pypi: `pip install faiss-cpu` or `pip install faiss-gpu`. " + "Note that pip may not have the latest version of FAISS, and thus, some of the latest features and bug fixes may not be available." + ) + + def add_vectors( + self, + vectors: Union[np.array, "Dataset"], + column: Optional[str] = None, + batch_size: int = 1000, + train_size: Optional[int] = None, + faiss_verbose: Optional[bool] = None, + ): + """ + Add vectors to the index. + If the arrays are inside a certain column, you can specify it using the `column` argument. + """ + import faiss # noqa: F811 + + # Create index + if self.faiss_index is None: + size = len(vectors[0]) if column is None else len(vectors[0][column]) + if self.string_factory is not None: + if self.metric_type is None: + index = faiss.index_factory(size, self.string_factory) + else: + index = faiss.index_factory(size, self.string_factory, self.metric_type) + else: + if self.metric_type is None: + index = faiss.IndexFlat(size) + else: + index = faiss.IndexFlat(size, self.metric_type) + + self.faiss_index = self._faiss_index_to_device(index, self.device) + logger.info(f"Created faiss index of type {type(self.faiss_index)}") + + # Set verbosity level + if faiss_verbose is not None: + self.faiss_index.verbose = faiss_verbose + if hasattr(self.faiss_index, "index") and self.faiss_index.index is not None: + self.faiss_index.index.verbose = faiss_verbose + if hasattr(self.faiss_index, "quantizer") and self.faiss_index.quantizer is not None: + self.faiss_index.quantizer.verbose = faiss_verbose + if hasattr(self.faiss_index, "clustering_index") and self.faiss_index.clustering_index is not None: + self.faiss_index.clustering_index.verbose = faiss_verbose + + # Train + if train_size is not None: + train_vecs = vectors[:train_size] if column is None else vectors[:train_size][column] + logger.info(f"Training the index with the first {len(train_vecs)} vectors") + self.faiss_index.train(train_vecs) + else: + logger.info("Ignored the training step of the faiss index as `train_size` is None.") + + # Add vectors + logger.info(f"Adding {len(vectors)} vectors to the faiss index") + for i in hf_tqdm(range(0, len(vectors), batch_size)): + vecs = vectors[i : i + batch_size] if column is None else vectors[i : i + batch_size][column] + self.faiss_index.add(vecs) + + @staticmethod + def _faiss_index_to_device(index: "faiss.Index", device: Optional[Union[int, List[int]]] = None) -> "faiss.Index": + """ + Sends a faiss index to a device. + A device can either be a positive integer (GPU id), a negative integer (all GPUs), + or a list of positive integers (select GPUs to use), or `None` for CPU. + """ + + # If device is not specified, then it runs on CPU. + if device is None: + return index + + import faiss # noqa: F811 + + # If the device id is given as an integer + if isinstance(device, int): + # Positive integers are directly mapped to GPU ids + if device > -1: + faiss_res = faiss.StandardGpuResources() + index = faiss.index_cpu_to_gpu(faiss_res, device, index) + # And negative integers mean using all GPUs + else: + index = faiss.index_cpu_to_all_gpus(index) + # Device ids given as a list mean mapping to those devices specified. + elif isinstance(device, (list, tuple)): + index = faiss.index_cpu_to_gpus_list(index, gpus=list(device)) + else: + raise TypeError( + f"The argument type: {type(device)} is not expected. " + + "Please pass in either nothing, a positive int, a negative int, or a list of positive ints." + ) + + return index + + def search(self, query: np.array, k=10, **kwargs) -> SearchResults: + """Find the nearest examples indices to the query. + + Args: + query (`np.array`): The query as a numpy array. + k (`int`): The number of examples to retrieve. + + Ouput: + scores (`List[List[float]`): The retrieval scores of the retrieved examples. + indices (`List[List[int]]`): The indices of the retrieved examples. + """ + if len(query.shape) != 1 and (len(query.shape) != 2 or query.shape[0] != 1): + raise ValueError("Shape of query is incorrect, it has to be either a 1D array or 2D (1, N)") + + queries = query.reshape(1, -1) + if not queries.flags.c_contiguous: + queries = np.asarray(queries, order="C") + scores, indices = self.faiss_index.search(queries, k, **kwargs) + return SearchResults(scores[0], indices[0].astype(int)) + + def search_batch(self, queries: np.array, k=10, **kwargs) -> BatchedSearchResults: + """Find the nearest examples indices to the queries. + + Args: + queries (`np.array`): The queries as a numpy array. + k (`int`): The number of examples to retrieve. + + Ouput: + total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query. + total_indices (`List[List[int]]`): The indices of the retrieved examples per query. + """ + if len(queries.shape) != 2: + raise ValueError("Shape of query must be 2D") + if not queries.flags.c_contiguous: + queries = np.asarray(queries, order="C") + scores, indices = self.faiss_index.search(queries, k, **kwargs) + return BatchedSearchResults(scores, indices.astype(int)) + + def save(self, file: Union[str, PurePath], storage_options: Optional[Dict] = None): + """Serialize the FaissIndex on disk""" + import faiss # noqa: F811 + + if self.device is not None and isinstance(self.device, (int, list, tuple)): + index = faiss.index_gpu_to_cpu(self.faiss_index) + else: + index = self.faiss_index + + with fsspec.open(str(file), "wb", **(storage_options or {})) as f: + faiss.write_index(index, faiss.BufferedIOWriter(faiss.PyCallbackIOWriter(f.write))) + + @classmethod + def load( + cls, + file: Union[str, PurePath], + device: Optional[Union[int, List[int]]] = None, + storage_options: Optional[Dict] = None, + ) -> "FaissIndex": + """Deserialize the FaissIndex from disk""" + import faiss # noqa: F811 + + # Instances of FaissIndex is essentially just a wrapper for faiss indices. + faiss_index = cls(device=device) + with fsspec.open(str(file), "rb", **(storage_options or {})) as f: + index = faiss.read_index(faiss.BufferedIOReader(faiss.PyCallbackIOReader(f.read))) + faiss_index.faiss_index = faiss_index._faiss_index_to_device(index, faiss_index.device) + return faiss_index + + +class IndexableMixin: + """Add indexing features to `datasets.Dataset`""" + + def __init__(self): + self._indexes: Dict[str, BaseIndex] = {} + + def __len__(self): + raise NotImplementedError + + def __getitem__(self, key): + raise NotImplementedError + + def is_index_initialized(self, index_name: str) -> bool: + return index_name in self._indexes + + def _check_index_is_initialized(self, index_name: str): + if not self.is_index_initialized(index_name): + raise MissingIndex( + f"Index with index_name '{index_name}' not initialized yet. Please make sure that you call `add_faiss_index` or `add_elasticsearch_index` first." + ) + + def list_indexes(self) -> List[str]: + """List the `colindex_nameumns`/identifiers of all the attached indexes.""" + return list(self._indexes) + + def get_index(self, index_name: str) -> BaseIndex: + """List the `index_name`/identifiers of all the attached indexes. + + Args: + index_name (`str`): Index name. + + Returns: + [`BaseIndex`] + """ + self._check_index_is_initialized(index_name) + return self._indexes[index_name] + + def add_faiss_index( + self, + column: str, + index_name: Optional[str] = None, + device: Optional[Union[int, List[int]]] = None, + string_factory: Optional[str] = None, + metric_type: Optional[int] = None, + custom_index: Optional["faiss.Index"] = None, + batch_size: int = 1000, + train_size: Optional[int] = None, + faiss_verbose: bool = False, + ): + """Add a dense index using Faiss for fast retrieval. + The index is created using the vectors of the specified column. + You can specify `device` if you want to run it on GPU (`device` must be the GPU index, see more below). + You can find more information about Faiss here: + - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory + + Args: + column (`str`): The column of the vectors to add to the index. + index_name (Optional `str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`. + By default it corresponds to `column`. + device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. + If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. + string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP. + metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. + custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs. + batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000. + + train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index. + faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index. + """ + index_name = index_name if index_name is not None else column + faiss_index = FaissIndex( + device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index + ) + faiss_index.add_vectors( + self, column=column, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose + ) + self._indexes[index_name] = faiss_index + + def add_faiss_index_from_external_arrays( + self, + external_arrays: np.array, + index_name: str, + device: Optional[Union[int, List[int]]] = None, + string_factory: Optional[str] = None, + metric_type: Optional[int] = None, + custom_index: Optional["faiss.Index"] = None, + batch_size: int = 1000, + train_size: Optional[int] = None, + faiss_verbose: bool = False, + ): + """Add a dense index using Faiss for fast retrieval. + The index is created using the vectors of `external_arrays`. + You can specify `device` if you want to run it on GPU (`device` must be the GPU index). + You can find more information about Faiss here: + - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory + + Args: + external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`. + It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`. + index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`. + device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. + If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. + string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP. + metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. + custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs. + batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000. + + train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index. + faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index. + """ + faiss_index = FaissIndex( + device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index + ) + faiss_index.add_vectors( + external_arrays, column=None, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose + ) + self._indexes[index_name] = faiss_index + + def save_faiss_index(self, index_name: str, file: Union[str, PurePath], storage_options: Optional[Dict] = None): + """Save a FaissIndex on disk. + + Args: + index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`. + file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`). + storage_options (`dict`, *optional*): + Key/value pairs to be passed on to the file-system backend, if any. + + + + """ + index = self.get_index(index_name) + if not isinstance(index, FaissIndex): + raise ValueError(f"Index '{index_name}' is not a FaissIndex but a '{type(index)}'") + index.save(file, storage_options=storage_options) + logger.info(f"Saved FaissIndex {index_name} at {file}") + + def load_faiss_index( + self, + index_name: str, + file: Union[str, PurePath], + device: Optional[Union[int, List[int]]] = None, + storage_options: Optional[Dict] = None, + ): + """Load a FaissIndex from disk. + + If you want to do additional configurations, you can have access to the faiss index object by doing + `.get_index(index_name).faiss_index` to make it fit your needs. + + Args: + index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to + call `.get_nearest` or `.search`. + file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`). + device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs. + If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU. + storage_options (`dict`, *optional*): + Key/value pairs to be passed on to the file-system backend, if any. + + + + """ + index = FaissIndex.load(file, device=device, storage_options=storage_options) + if index.faiss_index.ntotal != len(self): + raise ValueError( + f"Index size should match Dataset size, but Index '{index_name}' at {file} has {index.faiss_index.ntotal} elements while the dataset has {len(self)} examples." + ) + self._indexes[index_name] = index + logger.info(f"Loaded FaissIndex {index_name} from {file}") + + def add_elasticsearch_index( + self, + column: str, + index_name: Optional[str] = None, + host: Optional[str] = None, + port: Optional[int] = None, + es_client: Optional["Elasticsearch"] = None, + es_index_name: Optional[str] = None, + es_index_config: Optional[dict] = None, + ): + """Add a text index using ElasticSearch for fast retrieval. + + Args: + column (`str`): The column of the documents to add to the index. + index_name (Optional `str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`. + By default it corresponds to `column`. + host (Optional `str`, defaults to localhost): + host of where ElasticSearch is running + port (Optional `str`, defaults to 9200): + port of where ElasticSearch is running + es_client (Optional `elasticsearch.Elasticsearch`): + The elasticsearch client used to create the index if host and port are None. + es_index_name (Optional `str`): The elasticsearch index name used to create the index. + es_index_config (Optional `dict`): + The configuration of the elasticsearch index. + Default config is: + + Config:: + + { + "settings": { + "number_of_shards": 1, + "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, + }, + "mappings": { + "properties": { + "text": { + "type": "text", + "analyzer": "standard", + "similarity": "BM25" + }, + } + }, + } + """ + index_name = index_name if index_name is not None else column + es_index = ElasticSearchIndex( + host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config + ) + es_index.add_documents(self, column=column) + self._indexes[index_name] = es_index + + def load_elasticsearch_index( + self, + index_name: str, + es_index_name: str, + host: Optional[str] = None, + port: Optional[int] = None, + es_client: Optional["Elasticsearch"] = None, + es_index_config: Optional[dict] = None, + ): + """Load an existing text index using ElasticSearch for fast retrieval. + + Args: + index_name (`str`): + The `index_name`/identifier of the index. This is the index name that is used to call `get_nearest` or `search`. + es_index_name (`str`): + The name of elasticsearch index to load. + host (`str`, *optional*, defaults to `localhost`): + Host of where ElasticSearch is running. + port (`str`, *optional*, defaults to `9200`): + Port of where ElasticSearch is running. + es_client (`elasticsearch.Elasticsearch`, *optional*): + The elasticsearch client used to create the index if host and port are `None`. + es_index_config (`dict`, *optional*): + The configuration of the elasticsearch index. + Default config is: + ``` + { + "settings": { + "number_of_shards": 1, + "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, + }, + "mappings": { + "properties": { + "text": { + "type": "text", + "analyzer": "standard", + "similarity": "BM25" + }, + } + }, + } + ``` + """ + self._indexes[index_name] = ElasticSearchIndex( + host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config + ) + + def drop_index(self, index_name: str): + """Drop the index with the specified column. + + Args: + index_name (`str`): + The `index_name`/identifier of the index. + """ + del self._indexes[index_name] + + def search(self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs) -> SearchResults: + """Find the nearest examples indices in the dataset to the query. + + Args: + index_name (`str`): + The name/identifier of the index. + query (`Union[str, np.ndarray]`): + The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index. + k (`int`): + The number of examples to retrieve. + + Returns: + `(scores, indices)`: + A tuple of `(scores, indices)` where: + - **scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples + - **indices** (`List[List[int]]`): the indices of the retrieved examples + """ + self._check_index_is_initialized(index_name) + return self._indexes[index_name].search(query, k, **kwargs) + + def search_batch( + self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs + ) -> BatchedSearchResults: + """Find the nearest examples indices in the dataset to the query. + + Args: + index_name (`str`): + The `index_name`/identifier of the index. + queries (`Union[List[str], np.ndarray]`): + The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index. + k (`int`): + The number of examples to retrieve per query. + + Returns: + `(total_scores, total_indices)`: + A tuple of `(total_scores, total_indices)` where: + - **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query + - **total_indices** (`List[List[int]]`): the indices of the retrieved examples per query + """ + self._check_index_is_initialized(index_name) + return self._indexes[index_name].search_batch(queries, k, **kwargs) + + def get_nearest_examples( + self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs + ) -> NearestExamplesResults: + """Find the nearest examples in the dataset to the query. + + Args: + index_name (`str`): + The index_name/identifier of the index. + query (`Union[str, np.ndarray]`): + The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index. + k (`int`): + The number of examples to retrieve. + + Returns: + `(scores, examples)`: + A tuple of `(scores, examples)` where: + - **scores** (`List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples + - **examples** (`dict`): the retrieved examples + """ + self._check_index_is_initialized(index_name) + scores, indices = self.search(index_name, query, k, **kwargs) + top_indices = [i for i in indices if i >= 0] + return NearestExamplesResults(scores[: len(top_indices)], self[top_indices]) + + def get_nearest_examples_batch( + self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs + ) -> BatchedNearestExamplesResults: + """Find the nearest examples in the dataset to the query. + + Args: + index_name (`str`): + The `index_name`/identifier of the index. + queries (`Union[List[str], np.ndarray]`): + The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index. + k (`int`): + The number of examples to retrieve per query. + + Returns: + `(total_scores, total_examples)`: + A tuple of `(total_scores, total_examples)` where: + - **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query + - **total_examples** (`List[dict]`): the retrieved examples per query + """ + self._check_index_is_initialized(index_name) + total_scores, total_indices = self.search_batch(index_name, queries, k, **kwargs) + total_scores = [ + scores_i[: len([i for i in indices_i if i >= 0])] + for scores_i, indices_i in zip(total_scores, total_indices) + ] + total_samples = [self[[i for i in indices if i >= 0]] for indices in total_indices] + return BatchedNearestExamplesResults(total_scores, total_samples) diff --git a/testbed/huggingface__datasets/src/datasets/splits.py b/testbed/huggingface__datasets/src/datasets/splits.py new file mode 100644 index 0000000000000000000000000000000000000000..817995ad217b0d6e056e86c805b2d880feb812eb --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/splits.py @@ -0,0 +1,638 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Splits related API.""" + + +import abc +import collections +import copy +import dataclasses +import re +from dataclasses import dataclass +from typing import Dict, List, Optional, Union + +from .arrow_reader import FileInstructions, make_file_instructions +from .naming import _split_re +from .utils.py_utils import NonMutableDict, asdict + + +@dataclass +class SplitInfo: + name: str = dataclasses.field(default="", metadata={"include_in_asdict_even_if_is_default": True}) + num_bytes: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True}) + num_examples: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True}) + shard_lengths: Optional[List[int]] = None + + # Deprecated + # For backward compatibility, this field needs to always be included in files like + # dataset_infos.json and dataset_info.json files + # To do so, we always include it in the output of datasets.utils.py_utils.asdict(split_info) + dataset_name: Optional[str] = dataclasses.field( + default=None, metadata={"include_in_asdict_even_if_is_default": True} + ) + + @property + def file_instructions(self): + """Returns the list of dict(filename, take, skip).""" + # `self.dataset_name` is assigned in `SplitDict.add()`. + instructions = make_file_instructions( + name=self.dataset_name, + split_infos=[self], + instruction=str(self.name), + ) + return instructions.file_instructions + + +@dataclass +class SubSplitInfo: + """Wrapper around a sub split info. + This class expose info on the subsplit: + ``` + ds, info = datasets.load_dataset(..., split='train[75%:]', with_info=True) + info.splits['train[75%:]'].num_examples + ``` + """ + + instructions: FileInstructions + + @property + def num_examples(self): + """Returns the number of example in the subsplit.""" + return self.instructions.num_examples + + @property + def file_instructions(self): + """Returns the list of dict(filename, take, skip).""" + return self.instructions.file_instructions + + +class SplitBase(metaclass=abc.ABCMeta): + # pylint: disable=line-too-long + """Abstract base class for Split compositionality. + + See the + [guide on splits](../loading#slice-splits) + for more information. + + There are three parts to the composition: + 1) The splits are composed (defined, merged, split,...) together before + calling the `.as_dataset()` function. This is done with the `__add__`, + `__getitem__`, which return a tree of `SplitBase` (whose leaf + are the `NamedSplit` objects) + + ``` + split = datasets.Split.TRAIN + datasets.Split.TEST.subsplit(datasets.percent[:50]) + ``` + + 2) The `SplitBase` is forwarded to the `.as_dataset()` function + to be resolved into actual read instruction. This is done by the + `.get_read_instruction()` method which takes the real dataset splits + (name, number of shards,...) and parse the tree to return a + `SplitReadInstruction()` object + + ``` + read_instruction = split.get_read_instruction(self.info.splits) + ``` + + 3) The `SplitReadInstruction` is then used in the `tf.data.Dataset` pipeline + to define which files to read and how to skip examples within file. + + """ + + # pylint: enable=line-too-long + + @abc.abstractmethod + def get_read_instruction(self, split_dict): + """Parse the descriptor tree and compile all read instructions together. + + Args: + split_dict: `dict`, The `dict[split_name, SplitInfo]` of the dataset + + Returns: + split_read_instruction: `SplitReadInstruction` + """ + raise NotImplementedError("Abstract method") + + def __eq__(self, other): + """Equality: datasets.Split.TRAIN == 'train'.""" + if isinstance(other, (NamedSplit, str)): + return False + raise NotImplementedError("Equality is not implemented between merged/sub splits.") + + def __ne__(self, other): + """InEquality: datasets.Split.TRAIN != 'test'.""" + return not self.__eq__(other) + + def __add__(self, other): + """Merging: datasets.Split.TRAIN + datasets.Split.TEST.""" + return _SplitMerged(self, other) + + def subsplit(self, arg=None, k=None, percent=None, weighted=None): # pylint: disable=redefined-outer-name + """Divides this split into subsplits. + + There are 3 ways to define subsplits, which correspond to the 3 + arguments `k` (get `k` even subsplits), `percent` (get a slice of the + dataset with `datasets.percent`), and `weighted` (get subsplits with proportions + specified by `weighted`). + + Example:: + + ``` + # 50% train, 50% test + train, test = split.subsplit(k=2) + # 50% train, 25% test, 25% validation + train, test, validation = split.subsplit(weighted=[2, 1, 1]) + # Extract last 20% + subsplit = split.subsplit(datasets.percent[-20:]) + ``` + + Warning: k and weighted will be converted into percent which mean that + values below the percent will be rounded up or down. The final split may be + bigger to deal with remainders. For instance: + + ``` + train, test, valid = split.subsplit(k=3) # 33%, 33%, 34% + s1, s2, s3, s4 = split.subsplit(weighted=[2, 2, 1, 1]) # 33%, 33%, 16%, 18% + ``` + + Args: + arg: If no kwargs are given, `arg` will be interpreted as one of + `k`, `percent`, or `weighted` depending on the type. + For example: + ``` + split.subsplit(10) # Equivalent to split.subsplit(k=10) + split.subsplit(datasets.percent[:-20]) # percent=datasets.percent[:-20] + split.subsplit([1, 1, 2]) # weighted=[1, 1, 2] + ``` + k: `int` If set, subdivide the split into `k` equal parts. + percent: `datasets.percent slice`, return a single subsplit corresponding to + a slice of the original split. For example: + `split.subsplit(datasets.percent[-20:]) # Last 20% of the dataset`. + weighted: `list[int]`, return a list of subsplits whose proportions match + the normalized sum of the list. For example: + `split.subsplit(weighted=[1, 1, 2]) # 25%, 25%, 50%`. + + Returns: + A subsplit or list of subsplits extracted from this split object. + """ + # Note that the percent kwargs redefine the outer name datasets.percent. This + # is done for consistency (.subsplit(percent=datasets.percent[:40])) + if sum(bool(x) for x in (arg, k, percent, weighted)) != 1: + raise ValueError("Only one argument of subsplit should be set.") + + # Auto deduce k + if isinstance(arg, int): + k = arg + elif isinstance(arg, slice): + percent = arg + elif isinstance(arg, list): + weighted = arg + + if not (k or percent or weighted): + raise ValueError( + f"Invalid split argument {arg}. Only list, slice and int supported. " + "One of k, weighted or percent should be set to a non empty value." + ) + + def assert_slices_coverage(slices): + # Ensure that the expended slices cover all percents. + assert sum((list(range(*s.indices(100))) for s in slices), []) == list(range(100)) + + if k: + if not 0 < k <= 100: + raise ValueError(f"Subsplit k should be between 0 and 100, got {k}") + shift = 100 // k + slices = [slice(i * shift, (i + 1) * shift) for i in range(k)] + # Round up last element to ensure all elements are taken + slices[-1] = slice(slices[-1].start, 100) + # Internal check to ensure full coverage + assert_slices_coverage(slices) + return tuple(_SubSplit(self, s) for s in slices) + elif percent: + return _SubSplit(self, percent) + elif weighted: + # Normalize the weighted sum + total = sum(weighted) + weighted = [100 * x // total for x in weighted] + # Create the slice for each of the elements + start = 0 + stop = 0 + slices = [] + for v in weighted: + stop += v + slices.append(slice(start, stop)) + start = stop + # Round up last element to ensure all elements are taken + slices[-1] = slice(slices[-1].start, 100) + # Internal check to ensure full coverage + assert_slices_coverage(slices) + return tuple(_SubSplit(self, s) for s in slices) + else: + # Should not be possible + raise ValueError("Could not determine the split") + + +# 2 requirements: +# 1. datasets.percent be sliceable +# 2. datasets.percent be documented +# +# Instances are not documented, so we want datasets.percent to be a class, but to +# have it be sliceable, we need this metaclass. +class PercentSliceMeta(type): + def __getitem__(cls, slice_value): + if not isinstance(slice_value, slice): + raise ValueError(f"datasets.percent should only be called with slice, not {slice_value}") + return slice_value + + +class PercentSlice(metaclass=PercentSliceMeta): + # pylint: disable=line-too-long + """Syntactic sugar for defining slice subsplits: `datasets.percent[75:-5]`. + + See the + [guide on splits](../loading#slice-splits) + for more information. + """ + + # pylint: enable=line-too-long + pass + + +percent = PercentSlice # pylint: disable=invalid-name + + +class _SplitMerged(SplitBase): + """Represent two split descriptors merged together.""" + + def __init__(self, split1, split2): + self._split1 = split1 + self._split2 = split2 + + def get_read_instruction(self, split_dict): + read_instruction1 = self._split1.get_read_instruction(split_dict) + read_instruction2 = self._split2.get_read_instruction(split_dict) + return read_instruction1 + read_instruction2 + + def __repr__(self): + return f"({repr(self._split1)} + {repr(self._split2)})" + + +class _SubSplit(SplitBase): + """Represent a sub split of a split descriptor.""" + + def __init__(self, split, slice_value): + self._split = split + self._slice_value = slice_value + + def get_read_instruction(self, split_dict): + return self._split.get_read_instruction(split_dict)[self._slice_value] + + def __repr__(self): + slice_str = "{start}:{stop}" + if self._slice_value.step is not None: + slice_str += ":{step}" + slice_str = slice_str.format( + start="" if self._slice_value.start is None else self._slice_value.start, + stop="" if self._slice_value.stop is None else self._slice_value.stop, + step=self._slice_value.step, + ) + return f"{repr(self._split)}(datasets.percent[{slice_str}])" + + +class NamedSplit(SplitBase): + """Descriptor corresponding to a named split (train, test, ...). + + Example: + Each descriptor can be composed with other using addition or slice: + + ```py + split = datasets.Split.TRAIN.subsplit(datasets.percent[0:25]) + datasets.Split.TEST + ``` + + The resulting split will correspond to 25% of the train split merged with + 100% of the test split. + + A split cannot be added twice, so the following will fail: + + ```py + split = ( + datasets.Split.TRAIN.subsplit(datasets.percent[:25]) + + datasets.Split.TRAIN.subsplit(datasets.percent[75:]) + ) # Error + split = datasets.Split.TEST + datasets.Split.ALL # Error + ``` + + The slices can be applied only one time. So the following are valid: + + ```py + split = ( + datasets.Split.TRAIN.subsplit(datasets.percent[:25]) + + datasets.Split.TEST.subsplit(datasets.percent[:50]) + ) + split = (datasets.Split.TRAIN + datasets.Split.TEST).subsplit(datasets.percent[:50]) + ``` + + But this is not valid: + + ```py + train = datasets.Split.TRAIN + test = datasets.Split.TEST + split = train.subsplit(datasets.percent[:25]).subsplit(datasets.percent[:25]) + split = (train.subsplit(datasets.percent[:25]) + test).subsplit(datasets.percent[:50]) + ``` + """ + + def __init__(self, name): + self._name = name + split_names_from_instruction = [split_instruction.split("[")[0] for split_instruction in name.split("+")] + for split_name in split_names_from_instruction: + if not re.match(_split_re, split_name): + raise ValueError(f"Split name should match '{_split_re}' but got '{split_name}'.") + + def __str__(self): + return self._name + + def __repr__(self): + return f"NamedSplit({self._name!r})" + + def __eq__(self, other): + """Equality: datasets.Split.TRAIN == 'train'.""" + if isinstance(other, NamedSplit): + return self._name == other._name # pylint: disable=protected-access + elif isinstance(other, SplitBase): + return False + elif isinstance(other, str): # Other should be string + return self._name == other + else: + raise ValueError(f"Equality not supported between split {self} and {other}") + + def __lt__(self, other): + return self._name < other._name # pylint: disable=protected-access + + def __hash__(self): + return hash(self._name) + + def get_read_instruction(self, split_dict): + return SplitReadInstruction(split_dict[self._name]) + + +class NamedSplitAll(NamedSplit): + """Split corresponding to the union of all defined dataset splits.""" + + def __init__(self): + super().__init__("all") + + def __repr__(self): + return "NamedSplitAll()" + + def get_read_instruction(self, split_dict): + # Merge all dataset split together + read_instructions = [SplitReadInstruction(s) for s in split_dict.values()] + return sum(read_instructions, SplitReadInstruction()) + + +class Split: + # pylint: disable=line-too-long + """`Enum` for dataset splits. + + Datasets are typically split into different subsets to be used at various + stages of training and evaluation. + + - `TRAIN`: the training data. + - `VALIDATION`: the validation data. If present, this is typically used as + evaluation data while iterating on a model (e.g. changing hyperparameters, + model architecture, etc.). + - `TEST`: the testing data. This is the data to report metrics on. Typically + you do not want to use this during model iteration as you may overfit to it. + - `ALL`: the union of all defined dataset splits. + + All splits, including compositions inherit from `datasets.SplitBase`. + + See the [guide](../load_hub#splits) on splits for more information. + + Example: + + ```py + >>> datasets.SplitGenerator( + ... name=datasets.Split.TRAIN, + ... gen_kwargs={"split_key": "train", "files": dl_manager.download_and extract(url)}, + ... ), + ... datasets.SplitGenerator( + ... name=datasets.Split.VALIDATION, + ... gen_kwargs={"split_key": "validation", "files": dl_manager.download_and extract(url)}, + ... ), + ... datasets.SplitGenerator( + ... name=datasets.Split.TEST, + ... gen_kwargs={"split_key": "test", "files": dl_manager.download_and extract(url)}, + ... ) + ``` + """ + + # pylint: enable=line-too-long + TRAIN = NamedSplit("train") + TEST = NamedSplit("test") + VALIDATION = NamedSplit("validation") + ALL = NamedSplitAll() + + def __new__(cls, name): + """Create a custom split with datasets.Split('custom_name').""" + return NamedSplitAll() if name == "all" else NamedSplit(name) + + +# Similar to SplitInfo, but contain an additional slice info +SlicedSplitInfo = collections.namedtuple( + "SlicedSplitInfo", + [ + "split_info", + "slice_value", + ], +) # noqa: E231 + + +class SplitReadInstruction: + """Object containing the reading instruction for the dataset. + + Similarly to `SplitDescriptor` nodes, this object can be composed with itself, + but the resolution happens instantaneously, instead of keeping track of the + tree, such as all instructions are compiled and flattened in a single + SplitReadInstruction object containing the list of files and slice to use. + + Once resolved, the instructions can be accessed with: + + ``` + read_instructions.get_list_sliced_split_info() # List of splits to use + ``` + + """ + + def __init__(self, split_info=None): + self._splits = NonMutableDict(error_msg="Overlap between splits. Split {key} has been added with " "itself.") + + if split_info: + self.add(SlicedSplitInfo(split_info=split_info, slice_value=None)) + + def add(self, sliced_split): + """Add a SlicedSplitInfo the read instructions.""" + # TODO(epot): Check that the number of examples per shard % 100 == 0 + # Otherwise the slices value may be unbalanced and not exactly reflect the + # requested slice. + self._splits[sliced_split.split_info.name] = sliced_split + + def __add__(self, other): + """Merging split together.""" + # Will raise error if a split has already be added (NonMutableDict) + # TODO(epot): If a split is already added but there is no overlap between + # the slices, should merge the slices (ex: [:10] + [80:]) + split_instruction = SplitReadInstruction() + split_instruction._splits.update(self._splits) # pylint: disable=protected-access + split_instruction._splits.update(other._splits) # pylint: disable=protected-access + return split_instruction + + def __getitem__(self, slice_value): + """Sub-splits.""" + # Will raise an error if a split has already been sliced + split_instruction = SplitReadInstruction() + for v in self._splits.values(): + if v.slice_value is not None: + raise ValueError(f"Trying to slice Split {v.split_info.name} which has already been sliced") + v = v._asdict() + v["slice_value"] = slice_value + split_instruction.add(SlicedSplitInfo(**v)) + return split_instruction + + def get_list_sliced_split_info(self): + return list(self._splits.values()) + + +class SplitDict(dict): + """Split info object.""" + + def __init__(self, *args, dataset_name=None, **kwargs): + super().__init__(*args, **kwargs) + self.dataset_name = dataset_name + + def __getitem__(self, key: Union[SplitBase, str]): + # 1st case: The key exists: `info.splits['train']` + if str(key) in self: + return super().__getitem__(str(key)) + # 2nd case: Uses instructions: `info.splits['train[50%]']` + else: + instructions = make_file_instructions( + name=self.dataset_name, + split_infos=self.values(), + instruction=key, + ) + return SubSplitInfo(instructions) + + def __setitem__(self, key: Union[SplitBase, str], value: SplitInfo): + if key != value.name: + raise ValueError(f"Cannot add elem. (key mismatch: '{key}' != '{value.name}')") + if key in self: + raise ValueError(f"Split {key} already present") + super().__setitem__(key, value) + + def add(self, split_info: SplitInfo): + """Add the split info.""" + if split_info.name in self: + raise ValueError(f"Split {split_info.name} already present") + split_info.dataset_name = self.dataset_name + super().__setitem__(split_info.name, split_info) + + @property + def total_num_examples(self): + """Return the total number of examples.""" + return sum(s.num_examples for s in self.values()) + + @classmethod + def from_split_dict(cls, split_infos: Union[List, Dict], dataset_name: Optional[str] = None): + """Returns a new SplitDict initialized from a Dict or List of `split_infos`.""" + if isinstance(split_infos, dict): + split_infos = list(split_infos.values()) + + if dataset_name is None: + dataset_name = split_infos[0].get("dataset_name") if split_infos else None + + split_dict = cls(dataset_name=dataset_name) + + for split_info in split_infos: + if isinstance(split_info, dict): + split_info = SplitInfo(**split_info) + split_dict.add(split_info) + + return split_dict + + def to_split_dict(self): + """Returns a list of SplitInfo protos that we have.""" + out = [] + for split_name, split_info in self.items(): + split_info = copy.deepcopy(split_info) + split_info.name = split_name + out.append(split_info) + return out + + def copy(self): + return SplitDict.from_split_dict(self.to_split_dict(), self.dataset_name) + + def _to_yaml_list(self) -> list: + out = [asdict(s) for s in self.to_split_dict()] + # we don't need the shard lengths in YAML, since it depends on max_shard_size and num_proc + for split_info_dict in out: + split_info_dict.pop("shard_lengths", None) + # we don't need the dataset_name attribute that is deprecated + for split_info_dict in out: + split_info_dict.pop("dataset_name", None) + return out + + @classmethod + def _from_yaml_list(cls, yaml_data: list) -> "SplitDict": + return cls.from_split_dict(yaml_data) + + +@dataclass +class SplitGenerator: + """Defines the split information for the generator. + + This should be used as returned value of + `GeneratorBasedBuilder._split_generators`. + See `GeneratorBasedBuilder._split_generators` for more info and example + of usage. + + Args: + name (`str`): + Name of the `Split` for which the generator will + create the examples. + **gen_kwargs (additional keyword arguments): + Keyword arguments to forward to the `DatasetBuilder._generate_examples` method + of the builder. + + Example: + + ```py + >>> datasets.SplitGenerator( + ... name=datasets.Split.TRAIN, + ... gen_kwargs={"split_key": "train", "files": dl_manager.download_and_extract(url)}, + ... ) + ``` + """ + + name: str + gen_kwargs: Dict = dataclasses.field(default_factory=dict) + split_info: SplitInfo = dataclasses.field(init=False) + + def __post_init__(self): + self.name = str(self.name) # Make sure we convert NamedSplits in strings + NamedSplit(self.name) # check that it's a valid split name + self.split_info = SplitInfo(name=self.name) diff --git a/testbed/huggingface__datasets/src/datasets/streaming.py b/testbed/huggingface__datasets/src/datasets/streaming.py new file mode 100644 index 0000000000000000000000000000000000000000..d9e7e185a95bd4a4343e231f1ce150f0d4d8372c --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/streaming.py @@ -0,0 +1,140 @@ +import importlib +import inspect +from functools import wraps +from typing import TYPE_CHECKING, Optional + +from .download.download_config import DownloadConfig +from .download.streaming_download_manager import ( + xbasename, + xdirname, + xet_parse, + xexists, + xgetsize, + xglob, + xgzip_open, + xisdir, + xisfile, + xjoin, + xlistdir, + xnumpy_load, + xopen, + xpandas_read_csv, + xpandas_read_excel, + xPath, + xpyarrow_parquet_read_table, + xrelpath, + xsio_loadmat, + xsplit, + xsplitext, + xwalk, + xxml_dom_minidom_parse, +) +from .utils.logging import get_logger +from .utils.patching import patch_submodule +from .utils.py_utils import get_imports + + +logger = get_logger(__name__) + + +if TYPE_CHECKING: + from .builder import DatasetBuilder + + +def extend_module_for_streaming(module_path, download_config: Optional[DownloadConfig] = None): + """Extend the module to support streaming. + + We patch some functions in the module to use `fsspec` to support data streaming: + - We use `fsspec.open` to open and read remote files. We patch the module function: + - `open` + - We use the "::" hop separator to join paths and navigate remote compressed/archive files. We patch the module + functions: + - `os.path.join` + - `pathlib.Path.joinpath` and `pathlib.Path.__truediv__` (called when using the "/" operator) + + The patched functions are replaced with custom functions defined to work with the + :class:`~download.streaming_download_manager.StreamingDownloadManager`. + + Args: + module_path: Path to the module to be extended. + download_config : mainly use use_auth_token or storage_options to support different platforms and auth types. + """ + + module = importlib.import_module(module_path) + + # TODO(QL): always update the module to add subsequent new authentication without removing old ones + if hasattr(module, "_patched_for_streaming") and module._patched_for_streaming: + if isinstance(module._patched_for_streaming, DownloadConfig): + module._patched_for_streaming.token = download_config.token + module._patched_for_streaming.storage_options = download_config.storage_options + return + + def wrap_auth(function): + @wraps(function) + def wrapper(*args, **kwargs): + return function(*args, download_config=download_config, **kwargs) + + wrapper._decorator_name_ = "wrap_auth" + return wrapper + + # open files in a streaming fashion + patch_submodule(module, "open", wrap_auth(xopen)).start() + patch_submodule(module, "os.listdir", wrap_auth(xlistdir)).start() + patch_submodule(module, "os.walk", wrap_auth(xwalk)).start() + patch_submodule(module, "glob.glob", wrap_auth(xglob)).start() + # allow to navigate in remote zip files + patch_submodule(module, "os.path.join", xjoin).start() + patch_submodule(module, "os.path.dirname", xdirname).start() + patch_submodule(module, "os.path.basename", xbasename).start() + patch_submodule(module, "os.path.relpath", xrelpath).start() + patch_submodule(module, "os.path.split", xsplit).start() + patch_submodule(module, "os.path.splitext", xsplitext).start() + # allow checks on paths + patch_submodule(module, "os.path.exists", wrap_auth(xexists)).start() + patch_submodule(module, "os.path.isdir", wrap_auth(xisdir)).start() + patch_submodule(module, "os.path.isfile", wrap_auth(xisfile)).start() + patch_submodule(module, "os.path.getsize", wrap_auth(xgetsize)).start() + patch_submodule(module, "pathlib.Path", xPath).start() + # file readers + patch_submodule(module, "gzip.open", wrap_auth(xgzip_open)).start() + patch_submodule(module, "numpy.load", wrap_auth(xnumpy_load)).start() + patch_submodule(module, "pandas.read_csv", wrap_auth(xpandas_read_csv), attrs=["__version__"]).start() + patch_submodule(module, "pandas.read_excel", wrap_auth(xpandas_read_excel), attrs=["__version__"]).start() + patch_submodule(module, "scipy.io.loadmat", wrap_auth(xsio_loadmat), attrs=["__version__"]).start() + patch_submodule(module, "xml.etree.ElementTree.parse", wrap_auth(xet_parse)).start() + patch_submodule(module, "xml.dom.minidom.parse", wrap_auth(xxml_dom_minidom_parse)).start() + # pyarrow: do not patch pyarrow attribute in packaged modules + if not module.__name__.startswith("datasets.packaged_modules."): + patch_submodule(module, "pyarrow.parquet.read_table", wrap_auth(xpyarrow_parquet_read_table)).start() + module._patched_for_streaming = download_config + + +def extend_dataset_builder_for_streaming(builder: "DatasetBuilder"): + """Extend the dataset builder module and the modules imported by it to support streaming. + + Args: + builder (:class:`DatasetBuilder`): Dataset builder instance. + """ + # this extends the open and os.path.join functions for data streaming + download_config = DownloadConfig(storage_options=builder.storage_options, token=builder.token) + extend_module_for_streaming(builder.__module__, download_config=download_config) + # if needed, we also have to extend additional internal imports (like wmt14 -> wmt_utils) + if not builder.__module__.startswith("datasets."): # check that it's not a packaged builder like csv + for imports in get_imports(inspect.getfile(builder.__class__)): + if imports[0] == "internal": + internal_import_name = imports[1] + internal_module_name = ".".join(builder.__module__.split(".")[:-1] + [internal_import_name]) + extend_module_for_streaming(internal_module_name, download_config=download_config) + + # builders can inherit from other builders that might use streaming functionality + # (for example, ImageFolder and AudioFolder inherit from FolderBuilder which implements examples generation) + # but these parents builders are not patched automatically as they are not instantiated, so we patch them here + from .builder import DatasetBuilder + + parent_builder_modules = [ + cls.__module__ + for cls in type(builder).__mro__[1:] # make sure it's not the same module we've already patched + if issubclass(cls, DatasetBuilder) and cls.__module__ != DatasetBuilder.__module__ + ] # check it's not a standard builder from datasets.builder + for module in parent_builder_modules: + extend_module_for_streaming(module, download_config=download_config) diff --git a/testbed/huggingface__datasets/src/datasets/table.py b/testbed/huggingface__datasets/src/datasets/table.py new file mode 100644 index 0000000000000000000000000000000000000000..1a2df53d456396c8fb6497423c3bd59b19c52480 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/table.py @@ -0,0 +1,2415 @@ +import copy +import os +import warnings +from functools import partial +from itertools import groupby +from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union + +import numpy as np +import pyarrow as pa +import pyarrow.compute as pc + +from . import config +from .utils.logging import get_logger + + +if TYPE_CHECKING: + from .features.features import Features, FeatureType + + +logger = get_logger(__name__) + + +def inject_arrow_table_documentation(arrow_table_method): + def wrapper(fn): + fn.__doc__ = arrow_table_method.__doc__ + (fn.__doc__ if fn.__doc__ is not None else "") + fn.__doc__ = fn.__doc__.replace("pyarrow.Table", "Table") + if hasattr(arrow_table_method, "__annotations__"): + fn.__annotations__ = arrow_table_method.__annotations__ + return fn + + return wrapper + + +def _in_memory_arrow_table_from_file(filename: str) -> pa.Table: + in_memory_stream = pa.input_stream(filename) + opened_stream = pa.ipc.open_stream(in_memory_stream) + pa_table = opened_stream.read_all() + return pa_table + + +def _in_memory_arrow_table_from_buffer(buffer: pa.Buffer) -> pa.Table: + stream = pa.BufferReader(buffer) + opened_stream = pa.ipc.open_stream(stream) + table = opened_stream.read_all() + return table + + +def _memory_mapped_record_batch_reader_from_file(filename: str) -> pa.RecordBatchStreamReader: + memory_mapped_stream = pa.memory_map(filename) + return pa.ipc.open_stream(memory_mapped_stream) + + +def read_schema_from_file(filename: str) -> pa.Schema: + """ + Infer arrow table schema from file without loading whole file into memory. + Usefull especially while having very big files. + """ + with pa.memory_map(filename) as memory_mapped_stream: + schema = pa.ipc.open_stream(memory_mapped_stream).schema + return schema + + +def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table: + opened_stream = _memory_mapped_record_batch_reader_from_file(filename) + pa_table = opened_stream.read_all() + return pa_table + + +def _deepcopy(x, memo: dict): + """deepcopy a regular class instance""" + cls = x.__class__ + result = cls.__new__(cls) + memo[id(x)] = result + for k, v in x.__dict__.items(): + setattr(result, k, copy.deepcopy(v, memo)) + return result + + +def _interpolation_search(arr: List[int], x: int) -> int: + """ + Return the position i of a sorted array so that arr[i] <= x < arr[i+1] + + Args: + arr (`List[int]`): non-empty sorted list of integers + x (`int`): query + + Returns: + `int`: the position i so that arr[i] <= x < arr[i+1] + + Raises: + `IndexError`: if the array is empty or if the query is outside the array values + """ + i, j = 0, len(arr) - 1 + while i < j and arr[i] <= x < arr[j]: + k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i])) + if arr[k] <= x < arr[k + 1]: + return k + elif arr[k] < x: + i, j = k + 1, j + else: + i, j = i, k + raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.") + + +class IndexedTableMixin: + def __init__(self, table: pa.Table): + self._schema: pa.Schema = table.schema + self._batches: List[pa.RecordBatch] = [ + recordbatch for recordbatch in table.to_batches() if len(recordbatch) > 0 + ] + self._offsets: np.ndarray = np.cumsum([0] + [len(b) for b in self._batches], dtype=np.int64) + + def fast_gather(self, indices: Union[List[int], np.ndarray]) -> pa.Table: + """ + Create a pa.Table by gathering the records at the records at the specified indices. Should be faster + than pa.concat_tables(table.fast_slice(int(i) % table.num_rows, 1) for i in indices) since NumPy can compute + the binary searches in parallel, highly optimized C + """ + if not len(indices): + raise ValueError("Indices must be non-empty") + batch_indices = np.searchsorted(self._offsets, indices, side="right") - 1 + return pa.Table.from_batches( + [ + self._batches[batch_idx].slice(i - self._offsets[batch_idx], 1) + for batch_idx, i in zip(batch_indices, indices) + ], + schema=self._schema, + ) + + def fast_slice(self, offset=0, length=None) -> pa.Table: + """ + Slice the Table using interpolation search. + The behavior is the same as `pyarrow.Table.slice` but it's significantly faster. + + Interpolation search is used to find the start and end indexes of the batches we want to keep. + The batches to keep are then concatenated to form the sliced Table. + """ + if offset < 0: + raise IndexError("Offset must be non-negative") + elif offset >= self._offsets[-1] or (length is not None and length <= 0): + return pa.Table.from_batches([], schema=self._schema) + i = _interpolation_search(self._offsets, offset) + if length is None or length + offset >= self._offsets[-1]: + batches = self._batches[i:] + batches[0] = batches[0].slice(offset - self._offsets[i]) + else: + j = _interpolation_search(self._offsets, offset + length - 1) + batches = self._batches[i : j + 1] + batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j]) + batches[0] = batches[0].slice(offset - self._offsets[i]) + return pa.Table.from_batches(batches, schema=self._schema) + + +class Table(IndexedTableMixin): + """ + Wraps a pyarrow Table by using composition. + This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. + + It implements all the basic attributes/methods of the pyarrow Table class except + the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, + append_column, remove_column, set_column, rename_columns` and `drop`. + + The implementation of these methods differs for the subclasses. + """ + + def __init__(self, table: pa.Table): + super().__init__(table) + self.table = table + + def __deepcopy__(self, memo: dict): + # arrow tables are immutable, so there's no need to copy self.table + # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason + # by adding it to the memo, self.table won't be copied + memo[id(self.table)] = self.table + # same for the recordbatches used by the index + memo[id(self._batches)] = list(self._batches) + return _deepcopy(self, memo) + + def validate(self, *args, **kwargs): + """ + Perform validation checks. An exception is raised if validation fails. + + By default only cheap validation checks are run. Pass `full=True` + for thorough validation checks (potentially `O(n)`). + + Args: + full (`bool`, defaults to `False`): + If `True`, run expensive checks, otherwise cheap checks only. + + Raises: + `pa.lib.ArrowInvalid`: if validation fails + """ + return self.table.validate(*args, **kwargs) + + def equals(self, *args, **kwargs): + """ + Check if contents of two tables are equal. + + Args: + other ([`~datasets.table.Table`]): + Table to compare against. + check_metadata `bool`, defaults to `False`): + Whether schema metadata equality should be checked as well. + + Returns: + `bool` + """ + args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) + kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} + return self.table.equals(*args, **kwargs) + + def to_batches(self, *args, **kwargs): + """ + Convert Table to list of (contiguous) `RecordBatch` objects. + + Args: + max_chunksize (`int`, defaults to `None`): + Maximum size for `RecordBatch` chunks. Individual chunks may be + smaller depending on the chunk layout of individual columns. + + Returns: + `List[pyarrow.RecordBatch]` + """ + return self.table.to_batches(*args, **kwargs) + + def to_pydict(self, *args, **kwargs): + """ + Convert the Table to a `dict` or `OrderedDict`. + + Returns: + `dict` + """ + return self.table.to_pydict(*args, **kwargs) + + def to_pylist(self, *args, **kwargs): + """ + Convert the Table to a list + + Returns: + `list` + """ + try: + return self.table.to_pylist(*args, **kwargs) + except AttributeError: # pyarrow <7 does not have to_pylist, so we use to_pydict + pydict = self.table.to_pydict(*args, **kwargs) + return [{k: pydict[k][i] for k in pydict} for i in range(len(self.table))] + + def to_pandas(self, *args, **kwargs): + """ + Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. + + Args: + memory_pool (`MemoryPool`, defaults to `None`): + Arrow MemoryPool to use for allocations. Uses the default memory + pool is not passed. + strings_to_categorical (`bool`, defaults to `False`): + Encode string (UTF8) and binary types to `pandas.Categorical`. + categories (`list`, defaults to `empty`): + List of fields that should be returned as `pandas.Categorical`. Only + applies to table-like data structures. + zero_copy_only (`bool`, defaults to `False`): + Raise an `ArrowException` if this function call would require copying + the underlying data. + integer_object_nulls (`bool`, defaults to `False`): + Cast integers with nulls to objects. + date_as_object (`bool`, defaults to `True`): + Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. + timestamp_as_object (`bool`, defaults to `False`): + Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is + useful if you have timestamps that don't fit in the normal date + range of nanosecond timestamps (1678 CE-2262 CE). + If `False`, all timestamps are converted to `datetime64[ns]` dtype. + use_threads (`bool`, defaults to `True`): + Whether to parallelize the conversion using multiple threads. + deduplicate_objects (`bool`, defaults to `False`): + Do not create multiple copies Python objects when created, to save + on memory use. Conversion will be slower. + ignore_metadata (`bool`, defaults to `False`): + If `True`, do not use the 'pandas' metadata to reconstruct the + DataFrame index, if present. + safe (`bool`, defaults to `True`): + For certain data types, a cast is needed in order to store the + data in a pandas DataFrame or Series (e.g. timestamps are always + stored as nanoseconds in pandas). This option controls whether it + is a safe cast or not. + split_blocks (`bool`, defaults to `False`): + If `True`, generate one internal "block" for each column when + creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this + can temporarily reduce memory note that various pandas operations + can trigger "consolidation" which may balloon memory use. + self_destruct (`bool`, defaults to `False`): + EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow + memory while converting the Arrow object to pandas. If you use the + object after calling `to_pandas` with this option it will crash your + program. + types_mapper (`function`, defaults to `None`): + A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. + This can be used to override the default pandas type for conversion + of built-in pyarrow types or in absence of `pandas_metadata` in the + Table schema. The function receives a pyarrow DataType and is + expected to return a pandas `ExtensionDtype` or `None` if the + default conversion should be used for that type. If you have + a dictionary mapping, you can pass `dict.get` as function. + + Returns: + `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object + """ + return self.table.to_pandas(*args, **kwargs) + + def to_string(self, *args, **kwargs): + return self.table.to_string(*args, **kwargs) + + def to_reader(self, max_chunksize: Optional[int] = None): + """ + Convert the Table to a RecordBatchReader. + + Note that this method is zero-copy, it merely exposes the same data under a different API. + + Args: + max_chunksize (`int`, defaults to `None`) + Maximum size for RecordBatch chunks. Individual chunks may be smaller depending + on the chunk layout of individual columns. + + Returns: + `pyarrow.RecordBatchReader` + """ + return self.table.to_reader(max_chunksize=max_chunksize) + + def field(self, *args, **kwargs): + """ + Select a schema field by its column name or numeric index. + + Args: + i (`Union[int, str]`): + The index or name of the field to retrieve. + + Returns: + `pyarrow.Field` + """ + return self.table.field(*args, **kwargs) + + def column(self, *args, **kwargs): + """ + Select a column by its column name, or numeric index. + + Args: + i (`Union[int, str]`): + The index or name of the column to retrieve. + + Returns: + `pyarrow.ChunkedArray` + """ + return self.table.column(*args, **kwargs) + + def itercolumns(self, *args, **kwargs): + """ + Iterator over all columns in their numerical order. + + Yields: + `pyarrow.ChunkedArray` + """ + return self.table.itercolumns(*args, **kwargs) + + @property + def schema(self): + """ + Schema of the table and its columns. + + Returns: + `pyarrow.Schema` + """ + return self.table.schema + + @property + def columns(self): + """ + List of all columns in numerical order. + + Returns: + `List[pa.ChunkedArray]` + """ + return self.table.columns + + @property + def num_columns(self): + """ + Number of columns in this table. + + Returns: + int + """ + return self.table.num_columns + + @property + def num_rows(self): + """ + Number of rows in this table. + + Due to the definition of a table, all columns have the same number of + rows. + + Returns: + int + """ + return self.table.num_rows + + @property + def shape(self): + """ + Dimensions of the table: (#rows, #columns). + + Returns: + `(int, int)`: Number of rows and number of columns. + """ + return self.table.shape + + @property + def nbytes(self): + """ + Total number of bytes consumed by the elements of the table. + """ + return self.table.nbytes + + @property + def column_names(self): + """ + Names of the table's columns. + """ + return self.table.column_names + + def __eq__(self, other): + return self.equals(other) + + def __getitem__(self, i): + return self.table[i] + + def __len__(self): + return len(self.table) + + def __repr__(self): + return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) + + def __str__(self): + return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) + + def slice(self, *args, **kwargs): + """ + Compute zero-copy slice of this Table. + + Args: + offset (`int`, defaults to `0`): + Offset from start of table to slice. + length (`int`, defaults to `None`): + Length of slice (default is until end of table starting from + offset). + + Returns: + `datasets.table.Table` + """ + raise NotImplementedError() + + def filter(self, *args, **kwargs): + """ + Select records from a Table. See `pyarrow.compute.filter` for full usage. + """ + raise NotImplementedError() + + def flatten(self, *args, **kwargs): + """ + Flatten this Table. Each column with a struct type is flattened + into one column per struct field. Other columns are left unchanged. + + Args: + memory_pool (`MemoryPool`, defaults to `None`): + For memory allocations, if required, otherwise use default pool. + + Returns: + `datasets.table.Table` + """ + raise NotImplementedError() + + def combine_chunks(self, *args, **kwargs): + """ + Make a new table by combining the chunks this table has. + + All the underlying chunks in the `ChunkedArray` of each column are + concatenated into zero or one chunk. + + Args: + memory_pool (`MemoryPool`, defaults to `None`): + For memory allocations, if required, otherwise use default pool. + + Returns: + `datasets.table.Table` + """ + raise NotImplementedError() + + def cast(self, *args, **kwargs): + """ + Cast table values to another schema. + + Args: + target_schema (`Schema`): + Schema to cast to, the names and order of fields must match. + safe (`bool`, defaults to `True`): + Check for overflows or other unsafe conversions. + + Returns: + `datasets.table.Table` + """ + raise NotImplementedError() + + def replace_schema_metadata(self, *args, **kwargs): + """ + EXPERIMENTAL: Create shallow copy of table by replacing schema + key-value metadata with the indicated new metadata (which may be None, + which deletes any existing metadata + + Args: + metadata (`dict`, defaults to `None`): + + Returns: + `datasets.table.Table`: shallow_copy + """ + raise NotImplementedError() + + def add_column(self, *args, **kwargs): + """ + Add column to Table at position. + + A new table is returned with the column added, the original table + object is left unchanged. + + Args: + i (`int`): + Index to place the column at. + field_ (`Union[str, pyarrow.Field]`): + If a string is passed then the type is deduced from the column + data. + column (`Union[pyarrow.Array, List[pyarrow.Array]]`): + Column data. + + Returns: + `datasets.table.Table`: New table with the passed column added. + """ + raise NotImplementedError() + + def append_column(self, *args, **kwargs): + """ + Append column at end of columns. + + Args: + field_ (`Union[str, pyarrow.Field]`): + If a string is passed then the type is deduced from the column + data. + column (`Union[pyarrow.Array, List[pyarrow.Array]]`): + Column data. + + Returns: + `datasets.table.Table`: New table with the passed column added. + """ + raise NotImplementedError() + + def remove_column(self, *args, **kwargs): + """ + Create new Table with the indicated column removed. + + Args: + i (`int`): + Index of column to remove. + + Returns: + `datasets.table.Table`: New table without the column. + """ + raise NotImplementedError() + + def set_column(self, *args, **kwargs): + """ + Replace column in Table at position. + + Args: + i (`int`): + Index to place the column at. + field_ (`Union[str, pyarrow.Field]`): + If a string is passed then the type is deduced from the column + data. + column (`Union[pyarrow.Array, List[pyarrow.Array]]`): + Column data. + + Returns: + `datasets.table.Table`: New table with the passed column set. + """ + raise NotImplementedError() + + def rename_columns(self, *args, **kwargs): + """ + Create new table with columns renamed to provided names. + """ + raise NotImplementedError() + + def drop(self, *args, **kwargs): + """ + Drop one or more columns and return a new table. + + Args: + columns (`List[str]`): + List of field names referencing existing columns. + + Raises: + `KeyError` : if any of the passed columns name are not existing. + + Returns: + `datasets.table.Table`: New table without the columns. + """ + raise NotImplementedError() + + def select(self, *args, **kwargs): + """ + Select columns of the table. + + Returns a new table with the specified columns, and metadata preserved. + + Args: + columns (:obj:`Union[List[str], List[int]]`): + The column names or integer indices to select. + + Returns: + `datasets.table.Table`: table with only a subset of the columns + """ + raise NotImplementedError() + + +class TableBlock(Table): + """ + `TableBlock` is the allowed class inside a `ConcanetationTable`. + Only `MemoryMappedTable` and `InMemoryTable` are `TableBlock`. + This is because we don't want a `ConcanetationTable` made out of other `ConcanetationTables`. + """ + + pass + + +class InMemoryTable(TableBlock): + """ + The table is said in-memory when it is loaded into the user's RAM. + + Pickling it does copy all the data using memory. + Its implementation is simple and uses the underlying pyarrow Table methods directly. + + This is different from the `MemoryMapped` table, for which pickling doesn't copy all the + data in memory. For a `MemoryMapped`, unpickling instead reloads the table from the disk. + + `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for + data bigger than memory or when you want the memory footprint of your application to + stay low. + """ + + @classmethod + def from_file(cls, filename: str): + table = _in_memory_arrow_table_from_file(filename) + return cls(table) + + @classmethod + def from_buffer(cls, buffer: pa.Buffer): + table = _in_memory_arrow_table_from_buffer(buffer) + return cls(table) + + @classmethod + def from_pandas(cls, *args, **kwargs): + """ + Convert pandas.DataFrame to an Arrow Table. + + The column types in the resulting Arrow Table are inferred from the + dtypes of the pandas.Series in the DataFrame. In the case of non-object + Series, the NumPy dtype is translated to its Arrow equivalent. In the + case of `object`, we need to guess the datatype by looking at the + Python objects in this Series. + + Be aware that Series of the `object` dtype don't carry enough + information to always lead to a meaningful Arrow type. In the case that + we cannot infer a type, e.g. because the DataFrame is of length 0 or + the Series only contains `None/nan` objects, the type is set to + null. This behavior can be avoided by constructing an explicit schema + and passing it to this function. + + Args: + df (`pandas.DataFrame`): + schema (`pyarrow.Schema`, *optional*): + The expected schema of the Arrow Table. This can be used to + indicate the type of columns if we cannot infer it automatically. + If passed, the output will have exactly this schema. Columns + specified in the schema that are not found in the DataFrame columns + or its index will raise an error. Additional columns or index + levels in the DataFrame which are not specified in the schema will + be ignored. + preserve_index (`bool`, *optional*): + Whether to store the index as an additional column in the resulting + `Table`. The default of None will store the index as a column, + except for RangeIndex which is stored as metadata only. Use + `preserve_index=True` to force it to be stored as a column. + nthreads (`int`, defaults to `None` (may use up to system CPU count threads)) + If greater than 1, convert columns to Arrow in parallel using + indicated number of threads. + columns (`List[str]`, *optional*): + List of column to be converted. If `None`, use all columns. + safe (`bool`, defaults to `True`): + Check for overflows or other unsafe conversions, + + Returns: + `datasets.table.Table`: + + Examples: + ```python + >>> import pandas as pd + >>> import pyarrow as pa + >>> df = pd.DataFrame({ + ... 'int': [1, 2], + ... 'str': ['a', 'b'] + ... }) + >>> pa.Table.from_pandas(df) + + ``` + """ + return cls(pa.Table.from_pandas(*args, **kwargs)) + + @classmethod + def from_arrays(cls, *args, **kwargs): + """ + Construct a Table from Arrow arrays. + + Args: + arrays (`List[Union[pyarrow.Array, pyarrow.ChunkedArray]]`): + Equal-length arrays that should form the table. + names (`List[str]`, *optional*): + Names for the table columns. If not passed, schema must be passed. + schema (`Schema`, defaults to `None`): + Schema for the created table. If not passed, names must be passed. + metadata (`Union[dict, Mapping]`, defaults to `None`): + Optional metadata for the schema (if inferred). + + Returns: + `datasets.table.Table` + """ + return cls(pa.Table.from_arrays(*args, **kwargs)) + + @classmethod + def from_pydict(cls, *args, **kwargs): + """ + Construct a Table from Arrow arrays or columns. + + Args: + mapping (`Union[dict, Mapping]`): + A mapping of strings to Arrays or Python lists. + schema (`Schema`, defaults to `None`): + If not passed, will be inferred from the Mapping values + metadata (`Union[dict, Mapping]`, defaults to `None`): + Optional metadata for the schema (if inferred). + + Returns: + `datasets.table.Table` + """ + return cls(pa.Table.from_pydict(*args, **kwargs)) + + @classmethod + def from_pylist(cls, mapping, *args, **kwargs): + """ + Construct a Table from list of rows / dictionaries. + + Args: + mapping (`List[dict]`): + A mapping of strings to row values. + schema (`Schema`, defaults to `None`): + If not passed, will be inferred from the Mapping values + metadata (`Union[dict, Mapping]`, defaults to `None`): + Optional metadata for the schema (if inferred). + + Returns: + `datasets.table.Table` + """ + return cls(pa.Table.from_pylist(mapping, *args, **kwargs)) + + @classmethod + def from_batches(cls, *args, **kwargs): + """ + Construct a Table from a sequence or iterator of Arrow `RecordBatches`. + + Args: + batches (`Union[Sequence[pyarrow.RecordBatch], Iterator[pyarrow.RecordBatch]]`): + Sequence of `RecordBatch` to be converted, all schemas must be equal. + schema (`Schema`, defaults to `None`): + If not passed, will be inferred from the first `RecordBatch`. + + Returns: + `datasets.table.Table`: + """ + return cls(pa.Table.from_batches(*args, **kwargs)) + + def slice(self, offset=0, length=None): + """ + Compute zero-copy slice of this Table. + + Args: + offset (`int`, defaults to `0`): + Offset from start of table to slice. + length (`int`, defaults to `None`): + Length of slice (default is until end of table starting from + offset). + + Returns: + `datasets.table.Table` + """ + # Use fast slicing here + return InMemoryTable(self.fast_slice(offset=offset, length=length)) + + def filter(self, *args, **kwargs): + """ + Select records from a Table. See `pyarrow.compute.filter` for full usage. + """ + return InMemoryTable(self.table.filter(*args, **kwargs)) + + def flatten(self, *args, **kwargs): + """ + Flatten this Table. Each column with a struct type is flattened + into one column per struct field. Other columns are left unchanged. + + Args: + memory_pool (`MemoryPool`, defaults to `None`): + For memory allocations, if required, otherwise use default pool. + + Returns: + `datasets.table.Table` + """ + return InMemoryTable(table_flatten(self.table, *args, **kwargs)) + + def combine_chunks(self, *args, **kwargs): + """ + Make a new table by combining the chunks this table has. + + All the underlying chunks in the `ChunkedArray` of each column are + concatenated into zero or one chunk. + + Args: + memory_pool (`MemoryPool`, defaults to `None`): + For memory allocations, if required, otherwise use default pool. + + Returns: + `datasets.table.Table` + """ + return InMemoryTable(self.table.combine_chunks(*args, **kwargs)) + + def cast(self, *args, **kwargs): + """ + Cast table values to another schema. + + Args: + target_schema (`Schema`): + Schema to cast to, the names and order of fields must match. + safe (`bool`, defaults to `True`): + Check for overflows or other unsafe conversions. + + Returns: + `datasets.table.Table` + """ + return InMemoryTable(table_cast(self.table, *args, **kwargs)) + + def replace_schema_metadata(self, *args, **kwargs): + """ + EXPERIMENTAL: Create shallow copy of table by replacing schema + key-value metadata with the indicated new metadata (which may be `None`, + which deletes any existing metadata). + + Args: + metadata (`dict`, defaults to `None`): + + Returns: + `datasets.table.Table`: shallow_copy + """ + return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs)) + + def add_column(self, *args, **kwargs): + """ + Add column to Table at position. + + A new table is returned with the column added, the original table + object is left unchanged. + + Args: + i (`int`): + Index to place the column at. + field_ (`Union[str, pyarrow.Field]`): + If a string is passed then the type is deduced from the column + data. + column (`Union[pyarrow.Array, List[pyarrow.Array]]`): + Column data. + + Returns: + `datasets.table.Table`: New table with the passed column added. + """ + return InMemoryTable(self.table.add_column(*args, **kwargs)) + + def append_column(self, *args, **kwargs): + """ + Append column at end of columns. + + Args: + field_ (`Union[str, pyarrow.Field]`): + If a string is passed then the type is deduced from the column + data. + column (`Union[pyarrow.Array, List[pyarrow.Array]]`): + Column data. + + Returns: + `datasets.table.Table`: + New table with the passed column added. + """ + return InMemoryTable(self.table.append_column(*args, **kwargs)) + + def remove_column(self, *args, **kwargs): + """ + Create new Table with the indicated column removed. + + Args: + i (`int`): + Index of column to remove. + + Returns: + `datasets.table.Table`: + New table without the column. + """ + return InMemoryTable(self.table.remove_column(*args, **kwargs)) + + def set_column(self, *args, **kwargs): + """ + Replace column in Table at position. + + Args: + i (`int`): + Index to place the column at. + field_ (`Union[str, pyarrow.Field]`): + If a string is passed then the type is deduced from the column + data. + column (`Union[pyarrow.Array, List[pyarrow.Array]]`): + Column data. + + Returns: + `datasets.table.Table`: + New table with the passed column set. + """ + return InMemoryTable(self.table.set_column(*args, **kwargs)) + + def rename_columns(self, *args, **kwargs): + """ + Create new table with columns renamed to provided names. + """ + return InMemoryTable(self.table.rename_columns(*args, **kwargs)) + + def drop(self, *args, **kwargs): + """ + Drop one or more columns and return a new table. + + Args: + columns (`List[str]`): + List of field names referencing existing columns. + + Raises: + `KeyError` : if any of the passed columns name are not existing. + + Returns: + `datasets.table.Table`: + New table without the columns. + """ + return InMemoryTable(self.table.drop(*args, **kwargs)) + + def select(self, *args, **kwargs): + """ + Select columns of the table. + + Returns a new table with the specified columns, and metadata preserved. + + Args: + columns (:obj:`Union[List[str], List[int]]`): + The column names or integer indices to select. + + Returns: + :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. + """ + return InMemoryTable(self.table.select(*args, **kwargs)) + + +# The MemoryMappedTable needs replays to properly reload tables from the disk +Replay = Tuple[str, tuple, dict] + + +class MemoryMappedTable(TableBlock): + """ + The table is said memory mapped when it doesn't use the user's RAM but loads the data + from the disk instead. + + Pickling it doesn't copy the data into memory. + Instead, only the path to the memory mapped arrow file is pickled, as well as the list + of transforms to "replay" when reloading the table from the disk. + + Its implementation requires to store an history of all the transforms that were applied + to the underlying pyarrow Table, so that they can be "replayed" when reloading the Table + from the disk. + + This is different from the `InMemoryTable` table, for which pickling does copy all the + data in memory. + + `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for + data bigger than memory or when you want the memory footprint of your application to + stay low. + """ + + def __init__(self, table: pa.Table, path: str, replays: Optional[List[Replay]] = None): + super().__init__(table) + self.path = os.path.abspath(path) + self.replays: List[Replay] = replays if replays is not None else [] + + @classmethod + def from_file(cls, filename: str, replays=None): + table = _memory_mapped_arrow_table_from_file(filename) + table = cls._apply_replays(table, replays) + return cls(table, filename, replays) + + def __getstate__(self): + return {"path": self.path, "replays": self.replays} + + def __setstate__(self, state): + path = state["path"] + replays = state["replays"] + table = _memory_mapped_arrow_table_from_file(path) + table = self._apply_replays(table, replays) + MemoryMappedTable.__init__(self, table, path=path, replays=replays) + + @staticmethod + def _apply_replays(table: pa.Table, replays: Optional[List[Replay]] = None) -> pa.Table: + if replays is not None: + for name, args, kwargs in replays: + if name == "cast": + table = table_cast(table, *args, **kwargs) + elif name == "flatten": + table = table_flatten(table, *args, **kwargs) + else: + table = getattr(table, name)(*args, **kwargs) + return table + + def _append_replay(self, replay: Replay) -> List[Replay]: + replays = copy.deepcopy(self.replays) + replays.append(replay) + return replays + + def slice(self, offset=0, length=None): + """ + Compute zero-copy slice of this Table. + + Args: + offset (`int`, defaults to `0`): + Offset from start of table to slice. + length (`int`, defaults to `None`): + Length of slice (default is until end of table starting from + offset). + + Returns: + `datasets.table.Table` + """ + replay = ("slice", (offset, length), {}) + replays = self._append_replay(replay) + # Use fast slicing here + return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays) + + def filter(self, *args, **kwargs): + """ + Select records from a Table. See `pyarrow.compute.filter` for full usage. + """ + replay = ("filter", copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays) + + def flatten(self, *args, **kwargs): + """ + Flatten this Table. Each column with a struct type is flattened + into one column per struct field. Other columns are left unchanged. + + Args: + memory_pool (`MemoryPool`, defaults to `None`): + For memory allocations, if required, otherwise use default pool. + + Returns: + `datasets.table.Table` + """ + replay = ("flatten", copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(table_flatten(self.table, *args, **kwargs), self.path, replays) + + def combine_chunks(self, *args, **kwargs): + """ + Make a new table by combining the chunks this table has. + + All the underlying chunks in the ChunkedArray of each column are + concatenated into zero or one chunk. + + Args: + memory_pool (`MemoryPool`, defaults to `None`): + For memory allocations, if required, otherwise use default pool. + + Returns: + `datasets.table.Table` + """ + replay = ("combine_chunks", copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays) + + def cast(self, *args, **kwargs): + """ + Cast table values to another schema + + Args: + target_schema (`Schema`): + Schema to cast to, the names and order of fields must match. + safe (`bool`, defaults to `True`): + Check for overflows or other unsafe conversions. + + Returns: + `datasets.table.Table` + """ + replay = ("cast", copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(table_cast(self.table, *args, **kwargs), self.path, replays) + + def replace_schema_metadata(self, *args, **kwargs): + """ + EXPERIMENTAL: Create shallow copy of table by replacing schema + key-value metadata with the indicated new metadata (which may be None, + which deletes any existing metadata. + + Args: + metadata (`dict`, defaults to `None`): + + Returns: + `datasets.table.Table`: shallow_copy + """ + replay = ("replace_schema_metadata", copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays) + + def add_column(self, *args, **kwargs): + """ + Add column to Table at position. + + A new table is returned with the column added, the original table + object is left unchanged. + + Args: + i (`int`): + Index to place the column at. + field_ (`Union[str, pyarrow.Field]`): + If a string is passed then the type is deduced from the column + data. + column (`Union[pyarrow.Array, List[pyarrow.Array]]`): + Column data. + + Returns: + `datasets.table.Table`: New table with the passed column added. + """ + replay = ("add_column", copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays) + + def append_column(self, *args, **kwargs): + """ + Append column at end of columns. + + Args: + field_ (`Union[str, pyarrow.Field]`): + If a string is passed then the type is deduced from the column + data. + column (`Union[pyarrow.Array, List[pyarrow.Array]]`): + Column data. + + Returns: + `datasets.table.Table`: + New table with the passed column added. + """ + replay = ("append_column", copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays) + + def remove_column(self, *args, **kwargs): + """ + Create new Table with the indicated column removed. + + Args: + i (`int`): + Index of column to remove. + + Returns: + `datasets.table.Table`: + New table without the column. + """ + replay = ("remove_column", copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays) + + def set_column(self, *args, **kwargs): + """ + Replace column in Table at position. + + Args: + i (`int`): + Index to place the column at. + field_ (`Union[str, pyarrow.Field]`): + If a string is passed then the type is deduced from the column + data. + column (`Union[pyarrow.Array, List[pyarrow.Array]]`): + Column data. + + Returns: + `datasets.table.Table`: + New table with the passed column set. + """ + replay = ("set_column", copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays) + + def rename_columns(self, *args, **kwargs): + """ + Create new table with columns renamed to provided names. + """ + replay = ("rename_columns", copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays) + + def drop(self, *args, **kwargs): + """ + Drop one or more columns and return a new table. + + Args: + columns (`List[str]`): + List of field names referencing existing columns. + + Raises: + `KeyError` : if any of the passed columns name are not existing. + + Returns: + `datasets.table.Table`: + New table without the columns. + """ + replay = ("drop", copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays) + + def select(self, *args, **kwargs): + """ + Select columns of the table. + + Returns a new table with the specified columns, and metadata preserved. + + Args: + columns (:obj:`Union[List[str], List[int]]`): + The column names or integer indices to select. + + Returns: + :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. + """ + replay = ("select", copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.select(*args, **kwargs), self.path, replays) + + +# A ConcatenationTable is the concatenation of several tables. +# The ``blocks`` attributes stores a list of list of blocks. +# The first axis concatenates the tables along the axis 0 (it appends rows), +# while the second axis concatenates tables along the axis 1 (it appends columns). +TableBlockContainer = TypeVar("TableBlockContainer", TableBlock, List[TableBlock], List[List[TableBlock]]) + + +class ConcatenationTable(Table): + """ + The table comes from the concatenation of several tables called blocks. + It enables concatenation on both axis 0 (append rows) and axis 1 (append columns). + + The underlying tables are called "blocks" and can be either `InMemoryTable` + or `MemoryMappedTable` objects. + This allows to combine tables that come from memory or that are memory mapped. + When a `ConcatenationTable` is pickled, then each block is pickled: + - the `InMemoryTable` objects are pickled by copying all the data in memory. + - the MemoryMappedTable objects are pickled without copying the data into memory. + Instead, only the path to the memory mapped arrow file is pickled, as well as the list + of transforms to "replays" when reloading the table from the disk. + + Its implementation requires to store each block separately. + The `blocks` attributes stores a list of list of blocks. + The first axis concatenates the tables along the axis 0 (it appends rows), + while the second axis concatenates tables along the axis 1 (it appends columns). + + If some columns are missing when concatenating on axis 0, they are filled with null values. + This is done using `pyarrow.concat_tables(tables, promote=True)`. + + You can access the fully combined table by accessing the `ConcatenationTable.table` attribute, + and the blocks by accessing the `ConcatenationTable.blocks` attribute. + """ + + def __init__(self, table: pa.Table, blocks: List[List[TableBlock]]): + super().__init__(table) + self.blocks = blocks + # Check that all the blocks have the right type. + # Only InMemoryTable and MemoryMappedTable are allowed. + for subtables in blocks: + for subtable in subtables: + if not isinstance(subtable, TableBlock): + raise TypeError( + "The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects" + f", but got {subtable}." + ) + + def __getstate__(self): + return {"blocks": self.blocks} + + def __setstate__(self, state): + blocks = state["blocks"] + table = self._concat_blocks_horizontally_and_vertically(blocks) + ConcatenationTable.__init__(self, table, blocks=blocks) + + @staticmethod + def _concat_blocks(blocks: List[Union[TableBlock, pa.Table]], axis: int = 0) -> pa.Table: + pa_tables = [table.table if hasattr(table, "table") else table for table in blocks] + if axis == 0: + # we set promote=True to fill missing columns with null values + if config.PYARROW_VERSION.major < 14: + return pa.concat_tables(pa_tables, promote=True) + else: + return pa.concat_tables(pa_tables, promote_options="default") + elif axis == 1: + for i, table in enumerate(pa_tables): + if i == 0: + pa_table = table + else: + for name, col in zip(table.column_names, table.columns): + pa_table = pa_table.append_column(name, col) + return pa_table + else: + raise ValueError("'axis' must be either 0 or 1") + + @classmethod + def _concat_blocks_horizontally_and_vertically(cls, blocks: List[List[TableBlock]]) -> pa.Table: + pa_tables_to_concat_vertically = [] + for i, tables in enumerate(blocks): + if not tables: + continue + pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1) + pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated) + return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0) + + @classmethod + def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int] = None) -> TableBlockContainer: + if axis is not None: + merged_blocks = [] + for is_in_memory, block_group in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)): + if is_in_memory: + block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))] + merged_blocks += list(block_group) + else: # both + merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks] + if all(len(row_block) == 1 for row_block in merged_blocks): + merged_blocks = cls._merge_blocks( + [block for row_block in merged_blocks for block in row_block], axis=0 + ) + return merged_blocks + + @classmethod + def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer: + if isinstance(blocks, TableBlock): + return blocks + elif isinstance(blocks[0], TableBlock): + return cls._merge_blocks(blocks, axis=0) + else: + return cls._merge_blocks(blocks) + + @classmethod + def from_blocks(cls, blocks: TableBlockContainer) -> "ConcatenationTable": + blocks = cls._consolidate_blocks(blocks) + if isinstance(blocks, TableBlock): + table = blocks + return cls(table.table, [[table]]) + elif isinstance(blocks[0], TableBlock): + table = cls._concat_blocks(blocks, axis=0) + blocks = [[t] for t in blocks] + return cls(table, blocks) + else: + table = cls._concat_blocks_horizontally_and_vertically(blocks) + return cls(table, blocks) + + @classmethod + def from_tables(cls, tables: List[Union[pa.Table, Table]], axis: int = 0) -> "ConcatenationTable": + """Create `ConcatenationTable` from list of tables. + + Args: + tables (list of `Table` or list of `pyarrow.Table`): + List of tables. + axis (`{0, 1}`, defaults to `0`, meaning over rows): + Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns + (horizontally). + + + """ + + def to_blocks(table: Union[pa.Table, Table]) -> List[List[TableBlock]]: + if isinstance(table, pa.Table): + return [[InMemoryTable(table)]] + elif isinstance(table, ConcatenationTable): + return copy.deepcopy(table.blocks) + else: + return [[table]] + + def _slice_row_block(row_block: List[TableBlock], length: int) -> Tuple[List[TableBlock], List[TableBlock]]: + sliced = [table.slice(0, length) for table in row_block] + remainder = [table.slice(length, len(row_block[0]) - length) for table in row_block] + return sliced, remainder + + def _split_both_like( + result: List[List[TableBlock]], blocks: List[List[TableBlock]] + ) -> Tuple[List[List[TableBlock]], List[List[TableBlock]]]: + """ + Make sure each row_block contain the same num_rows to be able to concatenate them on axis=1. + + To do so, we modify both blocks sets to have the same row_blocks boundaries. + For example, if `result` has 2 row_blocks of 3 rows and `blocks` has 3 row_blocks of 2 rows, + we modify both to have 4 row_blocks of size 2, 1, 1 and 2: + + [ x x x | x x x ] + + [ y y | y y | y y ] + ----------------------------- + = [ x x | x | x | x x ] + [ y y | y | y | y y ] + + """ + result, blocks = list(result), list(blocks) + new_result, new_blocks = [], [] + while result and blocks: + # we slice the longest row block to save two row blocks of same length + # and we replace the long row block by its remainder if necessary + if len(result[0][0]) > len(blocks[0][0]): + new_blocks.append(blocks[0]) + sliced, result[0] = _slice_row_block(result[0], len(blocks.pop(0)[0])) + new_result.append(sliced) + elif len(result[0][0]) < len(blocks[0][0]): + new_result.append(result[0]) + sliced, blocks[0] = _slice_row_block(blocks[0], len(result.pop(0)[0])) + new_blocks.append(sliced) + else: + new_result.append(result.pop(0)) + new_blocks.append(blocks.pop(0)) + if result or blocks: + raise ValueError("Failed to concatenate on axis=1 because tables don't have the same number of rows") + return new_result, new_blocks + + def _extend_blocks( + result: List[List[TableBlock]], blocks: List[List[TableBlock]], axis: int = 0 + ) -> List[List[TableBlock]]: + if axis == 0: + result.extend(blocks) + elif axis == 1: + # We make sure each row_block have the same num_rows + result, blocks = _split_both_like(result, blocks) + for i, row_block in enumerate(blocks): + result[i].extend(row_block) + return result + + blocks = to_blocks(tables[0]) + for table in tables[1:]: + table_blocks = to_blocks(table) + blocks = _extend_blocks(blocks, table_blocks, axis=axis) + return cls.from_blocks(blocks) + + @property + def _slices(self): + offset = 0 + for tables in self.blocks: + length = len(tables[0]) + yield (offset, length) + offset += length + + def slice(self, offset=0, length=None): + """ + Compute zero-copy slice of this Table. + + Args: + offset (`int`, defaults to `0`): + Offset from start of table to slice. + length (`int`, defaults to `None`): + Length of slice (default is until end of table starting from + offset). + + Returns: + `datasets.table.Table` + """ + table = self.table.slice(offset, length=length) + length = length if length is not None else self.num_rows - offset + blocks = [] + for tables in self.blocks: + n_rows = len(tables[0]) + if length == 0: + break + elif n_rows <= offset: + offset = offset - n_rows + elif n_rows <= offset + length: + blocks.append([t.slice(offset) for t in tables]) + length, offset = length + offset - n_rows, 0 + else: + blocks.append([t.slice(offset, length) for t in tables]) + length, offset = 0, 0 + return ConcatenationTable(table, blocks) + + def filter(self, mask, *args, **kwargs): + """ + Select records from a Table. See `pyarrow.compute.filter` for full usage. + """ + table = self.table.filter(mask, *args, **kwargs) + blocks = [] + for (offset, length), tables in zip(self._slices, self.blocks): + submask = mask.slice(offset, length) + blocks.append([t.filter(submask, *args, **kwargs) for t in tables]) + return ConcatenationTable(table, blocks) + + def flatten(self, *args, **kwargs): + """ + Flatten this Table. Each column with a struct type is flattened + into one column per struct field. Other columns are left unchanged. + + Args: + memory_pool (`MemoryPool`, defaults to `None`): + For memory allocations, if required, otherwise use default pool. + + Returns: + `datasets.table.Table` + """ + table = table_flatten(self.table, *args, **kwargs) + blocks = [] + for tables in self.blocks: + blocks.append([t.flatten(*args, **kwargs) for t in tables]) + return ConcatenationTable(table, blocks) + + def combine_chunks(self, *args, **kwargs): + """ + Make a new table by combining the chunks this table has. + + All the underlying chunks in the `ChunkedArray` of each column are + concatenated into zero or one chunk. + + Args: + memory_pool (`MemoryPool`, defaults to `None`): + For memory allocations, if required, otherwise use default pool. + + Returns: + `datasets.table.Table` + """ + table = self.table.combine_chunks(*args, **kwargs) + blocks = [] + for tables in self.blocks: + blocks.append([t.combine_chunks(*args, **kwargs) for t in tables]) + return ConcatenationTable(table, blocks) + + def cast(self, target_schema, *args, **kwargs): + """ + Cast table values to another schema. + + Args: + target_schema (`Schema`): + Schema to cast to, the names and order of fields must match. + safe (`bool`, defaults to `True`): + Check for overflows or other unsafe conversions. + + Returns: + `datasets.table.Table` + """ + from .features import Features + + table = table_cast(self.table, target_schema, *args, **kwargs) + target_features = Features.from_arrow_schema(target_schema) + blocks = [] + for subtables in self.blocks: + new_tables = [] + fields = list(target_schema) + for subtable in subtables: + subfields = [] + for name in subtable.column_names: + subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name))) + subfeatures = Features({subfield.name: target_features[subfield.name] for subfield in subfields}) + subschema = subfeatures.arrow_schema + new_tables.append(subtable.cast(subschema, *args, **kwargs)) + blocks.append(new_tables) + return ConcatenationTable(table, blocks) + + def replace_schema_metadata(self, *args, **kwargs): + """ + EXPERIMENTAL: Create shallow copy of table by replacing schema + key-value metadata with the indicated new metadata (which may be `None`, + which deletes any existing metadata). + + Args: + metadata (`dict`, defaults to `None`): + + Returns: + `datasets.table.Table`: shallow_copy + """ + table = self.table.replace_schema_metadata(*args, **kwargs) + blocks = [] + for tables in self.blocks: + blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables]) + return ConcatenationTable(table, self.blocks) + + def add_column(self, *args, **kwargs): + """ + Add column to Table at position. + + A new table is returned with the column added, the original table + object is left unchanged. + + Args: + i (`int`): + Index to place the column at. + field_ (`Union[str, pyarrow.Field]`): + If a string is passed then the type is deduced from the column + data. + column (`Union[pyarrow.Array, List[pyarrow.Array]]`): + Column data. + + Returns: + `datasets.table.Table`: New table with the passed column added. + """ + raise NotImplementedError() + + def append_column(self, *args, **kwargs): + """ + Append column at end of columns. + + Args: + field_ (`Union[str, pyarrow.Field]`): + If a string is passed then the type is deduced from the column + data. + column (`Union[pyarrow.Array, List[pyarrow.Array]]`): + Column data. + + Returns: + `datasets.table.Table`: + New table with the passed column added. + """ + raise NotImplementedError() + + def remove_column(self, i, *args, **kwargs): + """ + Create new Table with the indicated column removed. + + Args: + i (`int`): + Index of column to remove. + + Returns: + `datasets.table.Table`: + New table without the column. + """ + table = self.table.remove_column(i, *args, **kwargs) + name = self.table.column_names[i] + blocks = [] + for tables in self.blocks: + blocks.append( + [ + t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t + for t in tables + ] + ) + return ConcatenationTable(table, blocks) + + def set_column(self, *args, **kwargs): + """ + Replace column in Table at position. + + Args: + i (`int`): + Index to place the column at. + field_ (`Union[str, pyarrow.Field]`): + If a string is passed then the type is deduced from the column + data. + column (`Union[pyarrow.Array, List[pyarrow.Array]]`): + Column data. + + Returns: + `datasets.table.Table`: + New table with the passed column set. + """ + raise NotImplementedError() + + def rename_columns(self, names, *args, **kwargs): + """ + Create new table with columns renamed to provided names. + """ + table = self.table.rename_columns(names, *args, **kwargs) + names = dict(zip(self.table.column_names, names)) + blocks = [] + for tables in self.blocks: + blocks.append( + [t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables] + ) + return ConcatenationTable(table, blocks) + + def drop(self, columns, *args, **kwargs): + """ + Drop one or more columns and return a new table. + + Args: + columns (`List[str]`): + List of field names referencing existing columns. + + Raises: + `KeyError` : if any of the passed columns name are not existing. + + Returns: + `datasets.table.Table`: + New table without the columns. + """ + table = self.table.drop(columns, *args, **kwargs) + blocks = [] + for tables in self.blocks: + blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) + return ConcatenationTable(table, blocks) + + def select(self, columns, *args, **kwargs): + """ + Select columns of the table. + + Returns a new table with the specified columns, and metadata preserved. + + Args: + columns (:obj:`Union[List[str], List[int]]`): + The column names or integer indices to select. + + Returns: + :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. + """ + table = self.table.select(columns, *args, **kwargs) + blocks = [] + for tables in self.blocks: + blocks.append([t.select([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) + return ConcatenationTable(table, blocks) + + +def concat_tables(tables: List[Table], axis: int = 0) -> Table: + """ + Concatenate tables. + + Args: + tables (list of `Table`): + List of tables to be concatenated. + axis (`{0, 1}`, defaults to `0`, meaning over rows): + Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns + (horizontally). + + + Returns: + `datasets.table.Table`: + If the number of input tables is > 1, then the returned table is a `datasets.table.ConcatenationTable`. + Otherwise if there's only one table, it is returned as is. + """ + tables = list(tables) + if len(tables) == 1: + return tables[0] + return ConcatenationTable.from_tables(tables, axis=axis) + + +def list_table_cache_files(table: Table) -> List[str]: + """ + Get the cache files that are loaded by the table. + Cache file are used when parts of the table come from the disk via memory mapping. + + Returns: + `List[str]`: + A list of paths to the cache files loaded by the table. + """ + if isinstance(table, ConcatenationTable): + cache_files = [] + for subtables in table.blocks: + for subtable in subtables: + cache_files += list_table_cache_files(subtable) + return cache_files + elif isinstance(table, MemoryMappedTable): + return [table.path] + else: + return [] + + +def _wrap_for_chunked_arrays(func): + """Apply the function on each chunk of a `pyarrow.ChunkedArray`, or on the array directly""" + + def wrapper(array, *args, **kwargs): + if isinstance(array, pa.ChunkedArray): + return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) + else: + return func(array, *args, **kwargs) + + return wrapper + + +def _is_extension_type(pa_type: pa.DataType) -> bool: + """ + Check (recursively) if a pyarrow type is an extension type. + """ + if isinstance(pa_type, pa.StructType): + return any(_is_extension_type(field.type) for field in pa_type) + elif isinstance(pa_type, (pa.ListType, pa.FixedSizeListType, pa.LargeListType)): + return _is_extension_type(pa_type.value_type) + elif isinstance(pa_type, pa.ExtensionType): + return True + else: + return False + + +def array_concat(arrays: List[pa.Array]): + """Improved version of pa.concat_arrays + + It supports concatenating pa.ExtensionArray objects by concatenating the underlying storages. + + Args: + arrays (List[pa.Array]): List of arrays to contatenate + + Raises: + pa.ArrowInvalid: if the arrow array concatenation fails + ValueError: if the list of arrays is empty + TypeError: if the arrays to be concatenated have different types + + Returns: + array (:obj:`pyarrow.Array`): the concatenated array + """ + arrays = list(arrays) + array_types = {array.type for array in arrays} + + if not array_types: + raise ValueError("Couldn't concatenate empty list of arrays") + if len(array_types) > 1: + array_types = list(array_types) + raise TypeError(f"Couldn't concatenate arrays with different types {array_types[0]} and {array_types[1]}") + + array_type = arrays[0].type + arrays = [chunk for arr in arrays for chunk in (arr.chunks if isinstance(arr, pa.ChunkedArray) else (arr,))] + + if not _is_extension_type(array_type): + return pa.concat_arrays(arrays) + + def _offsets_concat(offsets): + offset = offsets[0] + concatenated_offsets = offset + for offset in offsets[1:]: + offset = pc.subtract(offset, offset[0]) + offset = pc.add(offset[1:], concatenated_offsets[-1]) + concatenated_offsets = pa.concat_arrays([concatenated_offsets, offset]) + return concatenated_offsets + + def _concat_arrays(arrays): + array_type = arrays[0].type + if isinstance(array_type, pa.ExtensionType): + return array_type.wrap_array(_concat_arrays([array.storage for array in arrays])) + elif pa.types.is_struct(array_type): + return pa.StructArray.from_arrays( + [_concat_arrays([array.field(field.name) for array in arrays]) for field in array_type], + fields=list(array_type), + mask=pa.concat_arrays([array.is_null() for array in arrays]), + ) + elif pa.types.is_list(array_type): + if any(array.null_count > 0 for array in arrays): + if config.PYARROW_VERSION.major < 10: + warnings.warn( + "None values are converted to empty lists in `pyarrow<10.0.0` when concatenating list arrays with None values. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676." + ) + else: + return pa.ListArray.from_arrays( + _offsets_concat([array.offsets for array in arrays]), + _concat_arrays([array.values for array in arrays]), + mask=pa.concat_arrays([array.is_null() for array in arrays]), + ) + return pa.ListArray.from_arrays( + _offsets_concat([array.offsets for array in arrays]), + _concat_arrays([array.values for array in arrays]), + ) + elif pa.types.is_fixed_size_list(array_type): + if config.PYARROW_VERSION.major < 15: + # PyArrow bug: https://github.com/apache/arrow/issues/35360 + return pa.FixedSizeListArray.from_arrays( + _concat_arrays([array.values[array.offset * array.type.list_size :] for array in arrays]), + array_type.list_size, + ) + else: + return pa.FixedSizeListArray.from_arrays( + _concat_arrays([array.values for array in arrays]), + array_type.value_type, + array_type.list_size, + ) + return pa.concat_arrays(arrays) + + return _concat_arrays(arrays) + + +@_wrap_for_chunked_arrays +def array_cast(array: pa.Array, pa_type: pa.DataType, allow_number_to_str=True): + """Improved version of `pa.Array.cast` + + It supports casting `pa.StructArray` objects to re-order the fields. + It also let you control certain aspects of the casting, e.g. whether + to disable numbers (`floats` or `ints`) to strings. + + Args: + array (`pa.Array`): + PyArrow array to cast + pa_type (`pa.DataType`): + Target PyArrow type + allow_number_to_str (`bool`, defaults to `True`): + Whether to allow casting numbers to strings. + Defaults to `True`. + + Raises: + `pa.ArrowInvalidError`: if the arrow data casting fails + `TypeError`: if the target type is not supported according, e.g. + + - if a field is missing + - if casting from numbers to strings and `allow_number_to_str` is `False` + + Returns: + `List[pyarrow.Array]`: the casted array + """ + _c = partial(array_cast, allow_number_to_str=allow_number_to_str) + if isinstance(array, pa.ExtensionArray): + array = array.storage + if isinstance(pa_type, pa.ExtensionType): + return pa_type.wrap_array(_c(array, pa_type.storage_type)) + elif array.type == pa_type: + return array + elif pa.types.is_struct(array.type): + if pa.types.is_struct(pa_type) and ({field.name for field in pa_type} == {field.name for field in array.type}): + if array.type.num_fields == 0: + return array + arrays = [_c(array.field(field.name), field.type) for field in pa_type] + return pa.StructArray.from_arrays(arrays, fields=list(pa_type), mask=array.is_null()) + elif pa.types.is_list(array.type): + if pa.types.is_fixed_size_list(pa_type): + if pa_type.list_size * len(array) == len(array.values): + return pa.FixedSizeListArray.from_arrays( + _c(array.values, pa_type.value_type), + pa_type.list_size, + ) + elif pa.types.is_list(pa_type): + if array.null_count > 0: + if config.PYARROW_VERSION.major < 10: + warnings.warn( + f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {pa_type}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676." + ) + else: + return pa.ListArray.from_arrays( + array.offsets, _c(array.values, pa_type.value_type), mask=array.is_null() + ) + return pa.ListArray.from_arrays(array.offsets, _c(array.values, pa_type.value_type)) + elif pa.types.is_fixed_size_list(array.type): + array_values = array.values + if config.PYARROW_VERSION.major < 15: + # PyArrow bug: https://github.com/apache/arrow/issues/35360 + array_values = array.values[array.offset * array.type.list_size :] + if pa.types.is_fixed_size_list(pa_type): + return pa.FixedSizeListArray.from_arrays( + _c(array_values, pa_type.value_type), + pa_type.list_size, + ) + elif pa.types.is_list(pa_type): + offsets_arr = pa.array(np.arange(len(array) + 1) * array.type.list_size, pa.int32()) + if array.null_count > 0: + if config.PYARROW_VERSION.major < 10: + warnings.warn( + f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {pa_type}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676." + ) + else: + return pa.ListArray.from_arrays( + offsets_arr, _c(array_values, pa_type.value_type), mask=array.is_null() + ) + return pa.ListArray.from_arrays(offsets_arr, _c(array_values, pa_type.value_type)) + else: + if ( + not allow_number_to_str + and pa.types.is_string(pa_type) + and (pa.types.is_floating(array.type) or pa.types.is_integer(array.type)) + ): + raise TypeError( + f"Couldn't cast array of type {array.type} to {pa_type} since allow_number_to_str is set to {allow_number_to_str}" + ) + if pa.types.is_null(pa_type) and not pa.types.is_null(array.type): + raise TypeError(f"Couldn't cast array of type {array.type} to {pa_type}") + return array.cast(pa_type) + raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{pa_type}") + + +@_wrap_for_chunked_arrays +def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_to_str=True): + """Cast an array to the arrow type that corresponds to the requested feature type. + For custom features like [`Audio`] or [`Image`], it takes into account the "cast_storage" methods + they defined to enable casting from other arrow types. + + Args: + array (`pa.Array`): + The PyArrow array to cast. + feature (`datasets.features.FeatureType`): + The target feature type. + allow_number_to_str (`bool`, defaults to `True`): + Whether to allow casting numbers to strings. + Defaults to `True`. + + Raises: + `pa.ArrowInvalidError`: if the arrow data casting fails + `TypeError`: if the target type is not supported according, e.g. + + - if a field is missing + - if casting from numbers to strings and `allow_number_to_str` is `False` + + Returns: + array (`pyarrow.Array`): the casted array + """ + from .features.features import Sequence, get_nested_type + + _c = partial(cast_array_to_feature, allow_number_to_str=allow_number_to_str) + + if isinstance(array, pa.ExtensionArray): + array = array.storage + if hasattr(feature, "cast_storage"): + return feature.cast_storage(array) + + elif pa.types.is_struct(array.type): + # feature must be a dict or Sequence(subfeatures_dict) + if isinstance(feature, Sequence) and isinstance(feature.feature, dict): + feature = { + name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items() + } + if isinstance(feature, dict) and {field.name for field in array.type} == set(feature): + if array.type.num_fields == 0: + return array + arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()] + return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) + elif pa.types.is_list(array.type): + # feature must be either [subfeature] or Sequence(subfeature) + if isinstance(feature, list): + casted_values = _c(array.values, feature[0]) + if casted_values.type == array.values.type: + return array + else: + if array.null_count > 0: + if config.PYARROW_VERSION.major < 10: + warnings.warn( + f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676." + ) + else: + return pa.ListArray.from_arrays(array.offsets, casted_values, mask=array.is_null()) + return pa.ListArray.from_arrays(array.offsets, casted_values) + elif isinstance(feature, Sequence): + if feature.length > -1: + if feature.length * len(array) == len(array.values): + return pa.FixedSizeListArray.from_arrays(_c(array.values, feature.feature), feature.length) + else: + casted_values = _c(array.values, feature.feature) + if casted_values.type == array.values.type: + return array + else: + if array.null_count > 0: + if config.PYARROW_VERSION.major < 10: + warnings.warn( + f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676." + ) + else: + return pa.ListArray.from_arrays( + array.offsets, _c(array.values, feature.feature), mask=array.is_null() + ) + return pa.ListArray.from_arrays(array.offsets, _c(array.values, feature.feature)) + elif pa.types.is_fixed_size_list(array.type): + # feature must be either [subfeature] or Sequence(subfeature) + array_values = array.values + if config.PYARROW_VERSION.major < 15: + # PyArrow bug: https://github.com/apache/arrow/issues/35360 + array_values = array.values[array.offset * array.type.list_size :] + if isinstance(feature, list): + if array.null_count > 0: + if config.PYARROW_VERSION.major < 10: + warnings.warn( + f"None values are converted to empty lists when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`" + ) + else: + return pa.ListArray.from_arrays(array.offsets, _c(array_values, feature[0]), mask=array.is_null()) + return pa.ListArray.from_arrays(array.offsets, _c(array_values, feature[0])) + elif isinstance(feature, Sequence): + if feature.length > -1: + if feature.length * len(array) == len(array_values): + return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length) + else: + offsets_arr = pa.array(np.arange(len(array) + 1) * array.type.list_size, pa.int32()) + if array.null_count > 0: + if config.PYARROW_VERSION.major < 10: + warnings.warn( + f"None values are converted to empty lists when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`" + ) + else: + return pa.ListArray.from_arrays( + offsets_arr, _c(array_values, feature.feature), mask=array.is_null() + ) + return pa.ListArray.from_arrays(offsets_arr, _c(array_values, feature.feature)) + if pa.types.is_null(array.type): + return array_cast(array, get_nested_type(feature), allow_number_to_str=allow_number_to_str) + elif not isinstance(feature, (Sequence, dict, list, tuple)): + return array_cast(array, feature(), allow_number_to_str=allow_number_to_str) + raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}") + + +@_wrap_for_chunked_arrays +def embed_array_storage(array: pa.Array, feature: "FeatureType"): + """Embed data into an arrays's storage. + For custom features like Audio or Image, it takes into account the "embed_storage" methods + they defined to enable embedding external data (e.g. an image file) into an other arrow types. + + + + Args: + array (`pa.Array`): + The PyArrow array in which to embed data. + feature (`datasets.features.FeatureType`): + Array features. + + Raises: + `TypeError`: if the target type is not supported according, e.g. + + - if a field is missing + + Returns: + array (`pyarrow.Array`): the casted array + """ + from .features import Sequence + + _e = embed_array_storage + + if isinstance(array, pa.ExtensionArray): + array = array.storage + if hasattr(feature, "embed_storage"): + return feature.embed_storage(array) + elif pa.types.is_struct(array.type): + # feature must be a dict or Sequence(subfeatures_dict) + if isinstance(feature, Sequence) and isinstance(feature.feature, dict): + feature = { + name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items() + } + if isinstance(feature, dict): + arrays = [_e(array.field(name), subfeature) for name, subfeature in feature.items()] + return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) + elif pa.types.is_list(array.type): + # feature must be either [subfeature] or Sequence(subfeature) + if isinstance(feature, list): + if array.null_count > 0: + if config.PYARROW_VERSION.major < 10: + warnings.warn( + f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`" + ) + else: + return pa.ListArray.from_arrays(array.offsets, _e(array.values, feature[0]), mask=array.is_null()) + return pa.ListArray.from_arrays(array.offsets, _e(array.values, feature[0])) + elif isinstance(feature, Sequence): + if feature.length > -1: + if feature.length * len(array) == len(array.values): + return pa.FixedSizeListArray.from_arrays(_e(array.values, feature.feature), feature.length) + else: + casted_values = _e(array.values, feature.feature) + if casted_values.type == array.values.type: + return array + else: + if array.null_count > 0: + if config.PYARROW_VERSION.major < 10: + warnings.warn( + f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`" + ) + else: + return pa.ListArray.from_arrays( + array.offsets, _e(array.values, feature.feature), mask=array.is_null() + ) + return pa.ListArray.from_arrays(array.offsets, _e(array.values, feature.feature)) + elif pa.types.is_fixed_size_list(array.type): + # feature must be either [subfeature] or Sequence(subfeature) + array_values = array.values + if config.PYARROW_VERSION.major < 15: + # PyArrow bug: https://github.com/apache/arrow/issues/35360 + array_values = array.values[array.offset * array.type.list_size :] + if isinstance(feature, list): + if array.null_count > 0: + if config.PYARROW_VERSION.major < 10: + warnings.warn( + f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`" + ) + else: + return pa.ListArray.from_arrays(array.offsets, _e(array_values, feature[0]), mask=array.is_null()) + return pa.ListArray.from_arrays(array.offsets, _e(array_values, feature[0])) + elif isinstance(feature, Sequence): + if feature.length > -1: + if feature.length * len(array) == len(array_values): + return pa.FixedSizeListArray.from_arrays(_e(array_values, feature.feature), feature.length) + else: + offsets_arr = pa.array(np.arange(len(array) + 1) * array.type.list_size, pa.int32()) + if array.null_count > 0: + if config.PYARROW_VERSION.major < 10: + warnings.warn( + f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`" + ) + else: + return pa.ListArray.from_arrays( + offsets_arr, _e(array_values, feature.feature), mask=array.is_null() + ) + return pa.ListArray.from_arrays(offsets_arr, _e(array_values, feature.feature)) + if not isinstance(feature, (Sequence, dict, list, tuple)): + return array + raise TypeError(f"Couldn't embed array of type\n{array.type}\nwith\n{feature}") + + +def cast_table_to_features(table: pa.Table, features: "Features"): + """Cast a table to the arrow schema that corresponds to the requested features. + + Args: + table (`pyarrow.Table`): + PyArrow table to cast. + features ([`Features`]): + Target features. + + Returns: + table (`pyarrow.Table`): the casted table + """ + if sorted(table.column_names) != sorted(features): + raise ValueError(f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match") + arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()] + return pa.Table.from_arrays(arrays, schema=features.arrow_schema) + + +def cast_table_to_schema(table: pa.Table, schema: pa.Schema): + """Cast a table to the arrow schema. Different from `cast_table_to_features`, this method can preserve nullability. + + Args: + table (`pa.Table`): + PyArrow table to cast. + features ([`Features`]): + Target features. + + Returns: + `pa.Table`: the casted table + """ + from .features import Features + + features = Features.from_arrow_schema(schema) + if sorted(table.column_names) != sorted(features): + raise ValueError(f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match") + arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()] + return pa.Table.from_arrays(arrays, schema=schema) + + +def embed_table_storage(table: pa.Table): + """Embed external data into a table's storage. + + + + Args: + table (`pyarrow.Table`): + PyArrow table in which to embed data. + + Returns: + table (`pyarrow.Table`): the table with embedded data + """ + from .features.features import Features, require_storage_embed + + features = Features.from_arrow_schema(table.schema) + arrays = [ + embed_array_storage(table[name], feature) if require_storage_embed(feature) else table[name] + for name, feature in features.items() + ] + return pa.Table.from_arrays(arrays, schema=features.arrow_schema) + + +def table_cast(table: pa.Table, schema: pa.Schema): + """Improved version of `pa.Table.cast`. + + It supports casting to feature types stored in the schema metadata. + + Args: + table (`pyarrow.Table`): + PyArrow table to cast. + schema (`pyarrow.Schema`): + Target PyArrow schema. + + Returns: + table (`pyarrow.Table`): the casted table + """ + if table.schema != schema: + return cast_table_to_schema(table, schema) + elif table.schema.metadata != schema.metadata: + return table.replace_schema_metadata(schema.metadata) + else: + return table + + +def table_flatten(table: pa.Table): + """Improved version of `pa.Table.flatten`. + + It behaves as `pa.Table.flatten` in a sense it does 1-step flatten of the columns with a struct type into one column per struct field, + but updates the metadata and skips decodable features unless the `decode` attribute of these features is set to False. + + Args: + table (`pa.Table`): + PyArrow table to flatten. + + Returns: + `Table`: the flattened table + """ + from .features import Features + + features = Features.from_arrow_schema(table.schema) + if any(hasattr(subfeature, "flatten") and subfeature.flatten() == subfeature for subfeature in features.values()): + flat_arrays = [] + flat_column_names = [] + for field in table.schema: + array = table.column(field.name) + subfeature = features[field.name] + if pa.types.is_struct(field.type) and ( + not hasattr(subfeature, "flatten") or subfeature.flatten() != subfeature + ): + flat_arrays.extend(array.flatten()) + flat_column_names.extend([f"{field.name}.{subfield.name}" for subfield in field.type]) + else: + flat_arrays.append(array) + flat_column_names.append(field.name) + flat_table = pa.Table.from_arrays( + flat_arrays, + names=flat_column_names, + ) + else: + flat_table = table.flatten() + # Preserve complex types in the metadata + flat_features = features.flatten(max_depth=2) + flat_features = Features({column_name: flat_features[column_name] for column_name in flat_table.column_names}) + return flat_table.replace_schema_metadata(flat_features.arrow_schema.metadata) + + +def table_visitor(table: pa.Table, function: Callable[[pa.Array], None]): + """Visit all arrays in a table and apply a function to them. + + Args: + table (`pyarrow.Table`): + PyArrow table to visit. + function (`Callable[[pa.Array], None]`): + Function to apply to each array. + """ + from .features import Features, Sequence + + features = Features.from_arrow_schema(table.schema) + + def _visit(array, feature): + if isinstance(array, pa.ChunkedArray): + for chunk in array.chunks: + _visit(chunk, feature) + else: + if isinstance(array, pa.ExtensionArray): + array = array.storage + function(array, feature) + if pa.types.is_struct(array.type) and not hasattr(feature, "cast_storage"): + if isinstance(feature, Sequence) and isinstance(feature.feature, dict): + feature = { + name: Sequence(subfeature, length=feature.length) + for name, subfeature in feature.feature.items() + } + for name, subfeature in feature.items(): + _visit(array.field(name), subfeature) + elif pa.types.is_list(array.type): + if isinstance(feature, list): + _visit(array.values, feature[0]) + elif isinstance(feature, Sequence): + _visit(array.values, feature.feature) + + for name, feature in features.items(): + _visit(table[name], feature) + + +def table_iter(table: Table, batch_size: int, drop_last_batch=False) -> Iterator[pa.Table]: + """Iterate over sub-tables of size `batch_size`. + + Args: + table (`pyarrow.Table`): + PyArrow table to iterate over. + batch_size (`int`): + Size of each sub-table to yield. + drop_last_batch (`bool`, defaults to `False`): + Drop the last batch if it is smaller than `batch_size`. + """ + chunks_buffer = [] + chunks_buffer_size = 0 + for chunk in table.to_reader(max_chunksize=batch_size): + if len(chunk) == 0: + continue + elif chunks_buffer_size + len(chunk) < batch_size: + chunks_buffer.append(chunk) + chunks_buffer_size += len(chunk) + continue + elif chunks_buffer_size + len(chunk) == batch_size: + chunks_buffer.append(chunk) + yield pa.Table.from_batches(chunks_buffer) + chunks_buffer = [] + chunks_buffer_size = 0 + else: + cropped_chunk_length = batch_size - chunks_buffer_size + chunks_buffer.append(chunk.slice(0, cropped_chunk_length)) + yield pa.Table.from_batches(chunks_buffer) + chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)] + chunks_buffer_size = len(chunk) - cropped_chunk_length + if not drop_last_batch and chunks_buffer: + yield pa.Table.from_batches(chunks_buffer) diff --git a/testbed/huggingface__datasets/src/datasets/tasks/__init__.py b/testbed/huggingface__datasets/src/datasets/tasks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9a18a1e79349cfb32a743aeca4c3e9a809645a75 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/tasks/__init__.py @@ -0,0 +1,46 @@ +from typing import Optional + +from ..utils.logging import get_logger +from .audio_classification import AudioClassification +from .automatic_speech_recognition import AutomaticSpeechRecognition +from .base import TaskTemplate +from .image_classification import ImageClassification +from .language_modeling import LanguageModeling +from .question_answering import QuestionAnsweringExtractive +from .summarization import Summarization +from .text_classification import TextClassification + + +__all__ = [ + "AutomaticSpeechRecognition", + "AudioClassification", + "ImageClassification", + "LanguageModeling", + "QuestionAnsweringExtractive", + "Summarization", + "TaskTemplate", + "TextClassification", +] + +logger = get_logger(__name__) + + +NAME2TEMPLATE = { + AutomaticSpeechRecognition.task: AutomaticSpeechRecognition, + AudioClassification.task: AudioClassification, + ImageClassification.task: ImageClassification, + LanguageModeling.task: LanguageModeling, + QuestionAnsweringExtractive.task: QuestionAnsweringExtractive, + Summarization.task: Summarization, + TextClassification.task: TextClassification, +} + + +def task_template_from_dict(task_template_dict: dict) -> Optional[TaskTemplate]: + """Create one of the supported task templates in :py:mod:`datasets.tasks` from a dictionary.""" + task_name = task_template_dict.get("task") + if task_name is None: + logger.warning(f"Couldn't find template for task '{task_name}'. Available templates: {list(NAME2TEMPLATE)}") + return None + template = NAME2TEMPLATE.get(task_name) + return template.from_dict(task_template_dict) diff --git a/testbed/huggingface__datasets/src/datasets/tasks/audio_classification.py b/testbed/huggingface__datasets/src/datasets/tasks/audio_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..6f9fe402f3814b4db0eb1832405adcfaef77503e --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/tasks/audio_classification.py @@ -0,0 +1,33 @@ +import copy +from dataclasses import dataclass, field +from typing import ClassVar, Dict + +from ..features import Audio, ClassLabel, Features +from .base import TaskTemplate + + +@dataclass(frozen=True) +class AudioClassification(TaskTemplate): + task: str = field(default="audio-classification", metadata={"include_in_asdict_even_if_is_default": True}) + input_schema: ClassVar[Features] = Features({"audio": Audio()}) + label_schema: ClassVar[Features] = Features({"labels": ClassLabel}) + audio_column: str = "audio" + label_column: str = "labels" + + def align_with_features(self, features): + if self.label_column not in features: + raise ValueError(f"Column {self.label_column} is not present in features.") + if not isinstance(features[self.label_column], ClassLabel): + raise ValueError(f"Column {self.label_column} is not a ClassLabel.") + task_template = copy.deepcopy(self) + label_schema = self.label_schema.copy() + label_schema["labels"] = features[self.label_column] + task_template.__dict__["label_schema"] = label_schema + return task_template + + @property + def column_mapping(self) -> Dict[str, str]: + return { + self.audio_column: "audio", + self.label_column: "labels", + } diff --git a/testbed/huggingface__datasets/src/datasets/tasks/automatic_speech_recognition.py b/testbed/huggingface__datasets/src/datasets/tasks/automatic_speech_recognition.py new file mode 100644 index 0000000000000000000000000000000000000000..103a98a1bc9774de6b652bbc69b41501a419f0f8 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/tasks/automatic_speech_recognition.py @@ -0,0 +1,30 @@ +import copy +from dataclasses import dataclass, field +from typing import ClassVar, Dict + +from ..features import Audio, Features, Value +from .base import TaskTemplate + + +@dataclass(frozen=True) +class AutomaticSpeechRecognition(TaskTemplate): + task: str = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True}) + input_schema: ClassVar[Features] = Features({"audio": Audio()}) + label_schema: ClassVar[Features] = Features({"transcription": Value("string")}) + audio_column: str = "audio" + transcription_column: str = "transcription" + + def align_with_features(self, features): + if self.audio_column not in features: + raise ValueError(f"Column {self.audio_column} is not present in features.") + if not isinstance(features[self.audio_column], Audio): + raise ValueError(f"Column {self.audio_column} is not an Audio type.") + task_template = copy.deepcopy(self) + input_schema = self.input_schema.copy() + input_schema["audio"] = features[self.audio_column] + task_template.__dict__["input_schema"] = input_schema + return task_template + + @property + def column_mapping(self) -> Dict[str, str]: + return {self.audio_column: "audio", self.transcription_column: "transcription"} diff --git a/testbed/huggingface__datasets/src/datasets/tasks/base.py b/testbed/huggingface__datasets/src/datasets/tasks/base.py new file mode 100644 index 0000000000000000000000000000000000000000..21a5337ffc0784a1ed12f4617a9a0ef6ba7253e5 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/tasks/base.py @@ -0,0 +1,39 @@ +import abc +import copy +import dataclasses +from dataclasses import dataclass +from typing import ClassVar, Dict, Type, TypeVar + +from ..features import Features + + +T = TypeVar("T", bound="TaskTemplate") + + +@dataclass(frozen=True) +class TaskTemplate(abc.ABC): + # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization + task: str + input_schema: ClassVar[Features] + label_schema: ClassVar[Features] + + def align_with_features(self: T, features: Features) -> T: + """ + Align features with the task template. + """ + # No-op + return copy.deepcopy(self) + + @property + def features(self) -> Features: + return Features(**self.input_schema, **self.label_schema) + + @property + @abc.abstractmethod + def column_mapping(self) -> Dict[str, str]: + raise NotImplementedError + + @classmethod + def from_dict(cls: Type[T], template_dict: dict) -> T: + field_names = {f.name for f in dataclasses.fields(cls)} + return cls(**{k: v for k, v in template_dict.items() if k in field_names}) diff --git a/testbed/huggingface__datasets/src/datasets/tasks/image_classification.py b/testbed/huggingface__datasets/src/datasets/tasks/image_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..20a19e0408a7ec8061ac4fac700d83e6dcbadcdf --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/tasks/image_classification.py @@ -0,0 +1,33 @@ +import copy +from dataclasses import dataclass, field +from typing import ClassVar, Dict + +from ..features import ClassLabel, Features, Image +from .base import TaskTemplate + + +@dataclass(frozen=True) +class ImageClassification(TaskTemplate): + task: str = field(default="image-classification", metadata={"include_in_asdict_even_if_is_default": True}) + input_schema: ClassVar[Features] = Features({"image": Image()}) + label_schema: ClassVar[Features] = Features({"labels": ClassLabel}) + image_column: str = "image" + label_column: str = "labels" + + def align_with_features(self, features): + if self.label_column not in features: + raise ValueError(f"Column {self.label_column} is not present in features.") + if not isinstance(features[self.label_column], ClassLabel): + raise ValueError(f"Column {self.label_column} is not a ClassLabel.") + task_template = copy.deepcopy(self) + label_schema = self.label_schema.copy() + label_schema["labels"] = features[self.label_column] + task_template.__dict__["label_schema"] = label_schema + return task_template + + @property + def column_mapping(self) -> Dict[str, str]: + return { + self.image_column: "image", + self.label_column: "labels", + } diff --git a/testbed/huggingface__datasets/src/datasets/tasks/language_modeling.py b/testbed/huggingface__datasets/src/datasets/tasks/language_modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..b2837744fa1718e57ffbeeca1a6e9a60c9468d8f --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/tasks/language_modeling.py @@ -0,0 +1,18 @@ +from dataclasses import dataclass, field +from typing import ClassVar, Dict + +from ..features import Features, Value +from .base import TaskTemplate + + +@dataclass(frozen=True) +class LanguageModeling(TaskTemplate): + task: str = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True}) + + input_schema: ClassVar[Features] = Features({"text": Value("string")}) + label_schema: ClassVar[Features] = Features({}) + text_column: str = "text" + + @property + def column_mapping(self) -> Dict[str, str]: + return {self.text_column: "text"} diff --git a/testbed/huggingface__datasets/src/datasets/tasks/question_answering.py b/testbed/huggingface__datasets/src/datasets/tasks/question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..349fd54141762631eec025681015cedd97c23b63 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/tasks/question_answering.py @@ -0,0 +1,29 @@ +from dataclasses import dataclass, field +from typing import ClassVar, Dict + +from ..features import Features, Sequence, Value +from .base import TaskTemplate + + +@dataclass(frozen=True) +class QuestionAnsweringExtractive(TaskTemplate): + # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization + task: str = field(default="question-answering-extractive", metadata={"include_in_asdict_even_if_is_default": True}) + input_schema: ClassVar[Features] = Features({"question": Value("string"), "context": Value("string")}) + label_schema: ClassVar[Features] = Features( + { + "answers": Sequence( + { + "text": Value("string"), + "answer_start": Value("int32"), + } + ) + } + ) + question_column: str = "question" + context_column: str = "context" + answers_column: str = "answers" + + @property + def column_mapping(self) -> Dict[str, str]: + return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"} diff --git a/testbed/huggingface__datasets/src/datasets/tasks/summarization.py b/testbed/huggingface__datasets/src/datasets/tasks/summarization.py new file mode 100644 index 0000000000000000000000000000000000000000..a0057b07b4f62947c1bfde1962bf06be1427c363 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/tasks/summarization.py @@ -0,0 +1,19 @@ +from dataclasses import dataclass, field +from typing import ClassVar, Dict + +from ..features import Features, Value +from .base import TaskTemplate + + +@dataclass(frozen=True) +class Summarization(TaskTemplate): + # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization + task: str = field(default="summarization", metadata={"include_in_asdict_even_if_is_default": True}) + input_schema: ClassVar[Features] = Features({"text": Value("string")}) + label_schema: ClassVar[Features] = Features({"summary": Value("string")}) + text_column: str = "text" + summary_column: str = "summary" + + @property + def column_mapping(self) -> Dict[str, str]: + return {self.text_column: "text", self.summary_column: "summary"} diff --git a/testbed/huggingface__datasets/src/datasets/tasks/text_classification.py b/testbed/huggingface__datasets/src/datasets/tasks/text_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..13584b73e8ae668bd6c145b60598cd6859be5146 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/tasks/text_classification.py @@ -0,0 +1,34 @@ +import copy +from dataclasses import dataclass, field +from typing import ClassVar, Dict + +from ..features import ClassLabel, Features, Value +from .base import TaskTemplate + + +@dataclass(frozen=True) +class TextClassification(TaskTemplate): + # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization + task: str = field(default="text-classification", metadata={"include_in_asdict_even_if_is_default": True}) + input_schema: ClassVar[Features] = Features({"text": Value("string")}) + label_schema: ClassVar[Features] = Features({"labels": ClassLabel}) + text_column: str = "text" + label_column: str = "labels" + + def align_with_features(self, features): + if self.label_column not in features: + raise ValueError(f"Column {self.label_column} is not present in features.") + if not isinstance(features[self.label_column], ClassLabel): + raise ValueError(f"Column {self.label_column} is not a ClassLabel.") + task_template = copy.deepcopy(self) + label_schema = self.label_schema.copy() + label_schema["labels"] = features[self.label_column] + task_template.__dict__["label_schema"] = label_schema + return task_template + + @property + def column_mapping(self) -> Dict[str, str]: + return { + self.text_column: "text", + self.label_column: "labels", + } diff --git a/testbed/huggingface__datasets/src/datasets/utils/__init__.py b/testbed/huggingface__datasets/src/datasets/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..001fca727b3ce4b71a65e6c2ad0eaa3d1c6a46b9 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flake8: noqa +# Lint as: python3 + +from . import tqdm as _tqdm # _tqdm is the module +from .info_utils import VerificationMode +from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled +from .version import Version +from .experimental import experimental +from .tqdm import ( + disable_progress_bars, + enable_progress_bars, + are_progress_bars_disabled, + tqdm, +) diff --git a/testbed/huggingface__datasets/src/datasets/utils/_filelock.py b/testbed/huggingface__datasets/src/datasets/utils/_filelock.py new file mode 100644 index 0000000000000000000000000000000000000000..e574a5b7e8977d79ed3024bbdb335eaab0d916a8 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/_filelock.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License +"""Utilities to handle file locking in `datasets`.""" + +import os + +from filelock import FileLock as FileLock_ + + +class FileLock(FileLock_): + """ + A `filelock.FileLock` initializer that handles long paths. + """ + + MAX_FILENAME_LENGTH = 255 + + def __init__(self, lock_file, *args, **kwargs): + lock_file = self.hash_filename_if_too_long(lock_file) + super().__init__(lock_file, *args, **kwargs) + + @classmethod + def hash_filename_if_too_long(cls, path: str) -> str: + filename = os.path.basename(path) + if len(filename) > cls.MAX_FILENAME_LENGTH: + dirname = os.path.dirname(path) + hashed_filename = str(hash(filename)) + new_filename = ( + filename[: cls.MAX_FILENAME_LENGTH - len(hashed_filename) - 8] + "..." + hashed_filename + ".lock" + ) + return os.path.join(dirname, new_filename) + else: + return path diff --git a/testbed/huggingface__datasets/src/datasets/utils/beam_utils.py b/testbed/huggingface__datasets/src/datasets/utils/beam_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..831354397cf2bb1c0ee464093484d53c037aa95c --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/beam_utils.py @@ -0,0 +1,52 @@ +import os + +from apache_beam.io.filesystems import FileSystems +from apache_beam.pipeline import Pipeline + +from .logging import get_logger + + +CHUNK_SIZE = 2 << 20 # 2mb +logger = get_logger(__name__) + + +class BeamPipeline(Pipeline): + """Wrapper over `apache_beam.pipeline.Pipeline` for convenience""" + + def is_local(self): + runner = self._options.get_all_options().get("runner") + return runner in [None, "DirectRunner", "PortableRunner"] + + +def upload_local_to_remote(local_file_path, remote_file_path, force_upload=False): + """Use the Beam Filesystems to upload to a remote directory on gcs/s3/hdfs...""" + fs = FileSystems + if fs.exists(remote_file_path): + if force_upload: + logger.info(f"Remote path already exist: {remote_file_path}. Overwriting it as force_upload=True.") + else: + logger.info(f"Remote path already exist: {remote_file_path}. Skipping it as force_upload=False.") + return + with fs.create(remote_file_path) as remote_file: + with open(local_file_path, "rb") as local_file: + chunk = local_file.read(CHUNK_SIZE) + while chunk: + remote_file.write(chunk) + chunk = local_file.read(CHUNK_SIZE) + + +def download_remote_to_local(remote_file_path, local_file_path, force_download=False): + """Use the Beam Filesystems to download from a remote directory on gcs/s3/hdfs...""" + fs = FileSystems + if os.path.exists(local_file_path): + if force_download: + logger.info(f"Local path already exist: {remote_file_path}. Overwriting it as force_upload=True.") + else: + logger.info(f"Local path already exist: {remote_file_path}. Skipping it as force_upload=False.") + return + with fs.open(remote_file_path) as remote_file: + with open(local_file_path, "wb") as local_file: + chunk = remote_file.read(CHUNK_SIZE) + while chunk: + local_file.write(chunk) + chunk = remote_file.read(CHUNK_SIZE) diff --git a/testbed/huggingface__datasets/src/datasets/utils/deprecation_utils.py b/testbed/huggingface__datasets/src/datasets/utils/deprecation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f05ecbeaa3eae5476e99c461dbede9ebfa111eb0 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/deprecation_utils.py @@ -0,0 +1,105 @@ +import enum +import inspect +import warnings +from functools import wraps +from typing import Callable, Optional + +from .logging import get_logger + + +_emitted_deprecation_warnings = set() +logger = get_logger(__name__) + + +def deprecated(help_message: Optional[str] = None): + """Decorator to mark a class or a function as deprecated. + + Args: + help_message (:obj:`str`, optional): An optional message to guide the user on how to + switch to non-deprecated usage of the library. + """ + + def decorator(deprecated_class_or_function: Callable): + global _emitted_deprecation_warnings + + if inspect.isclass(deprecated_class_or_function): + deprecated_function = deprecated_class_or_function.__init__ + name = deprecated_class_or_function.__name__ + else: + deprecated_function = deprecated_class_or_function + name = deprecated_function.__name__ + # Support deprecating __init__ class method: class name instead + name = name if name != "__init__" else deprecated_function.__qualname__.split(".")[-2] + + warning_msg = ( + f"{name} is deprecated and will be removed in the next major version of datasets." + f" {help_message}" + if help_message + else "" + ) + + @wraps(deprecated_function) + def wrapper(*args, **kwargs): + func_hash = hash(deprecated_function) + if func_hash not in _emitted_deprecation_warnings: + warnings.warn(warning_msg, category=FutureWarning, stacklevel=2) + _emitted_deprecation_warnings.add(func_hash) + return deprecated_function(*args, **kwargs) + + wrapper._decorator_name_ = "deprecated" + + if inspect.isclass(deprecated_class_or_function): + deprecated_class_or_function.__init__ = wrapper + return deprecated_class_or_function + else: + return wrapper + + return decorator + + +class OnAccess(enum.EnumMeta): + """ + Enum metaclass that calls a user-specified function whenever a member is accessed. + """ + + def __getattribute__(cls, name): + obj = super().__getattribute__(name) + if isinstance(obj, enum.Enum) and obj._on_access: + obj._on_access() + return obj + + def __getitem__(cls, name): + member = super().__getitem__(name) + if member._on_access: + member._on_access() + return member + + def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1): + obj = super().__call__(value, names, module=module, qualname=qualname, type=type, start=start) + if isinstance(obj, enum.Enum) and obj._on_access: + obj._on_access() + return obj + + +class DeprecatedEnum(enum.Enum, metaclass=OnAccess): + """ + Enum class that calls `deprecate` method whenever a member is accessed. + """ + + def __new__(cls, value): + member = object.__new__(cls) + member._value_ = value + member._on_access = member.deprecate + return member + + @property + def help_message(self): + return "" + + def deprecate(self): + help_message = f" {self.help_message}" if self.help_message else "" + warnings.warn( + f"'{self.__objclass__.__name__}' is deprecated and will be removed in the next major version of datasets." + + help_message, + FutureWarning, + stacklevel=3, + ) diff --git a/testbed/huggingface__datasets/src/datasets/utils/doc_utils.py b/testbed/huggingface__datasets/src/datasets/utils/doc_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6ef8bcb4e70725ad086cb817e0ec4551d1c0966e --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/doc_utils.py @@ -0,0 +1,15 @@ +from typing import Callable + + +def is_documented_by(function_with_docstring: Callable): + """Decorator to share docstrings across common functions. + + Args: + function_with_docstring (`Callable`): Name of the function with the docstring. + """ + + def wrapper(target_function): + target_function.__doc__ = function_with_docstring.__doc__ + return target_function + + return wrapper diff --git a/testbed/huggingface__datasets/src/datasets/utils/download_manager.py b/testbed/huggingface__datasets/src/datasets/utils/download_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..b524c2f9686f65d083c424a4e17d001395b743b6 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/download_manager.py @@ -0,0 +1 @@ +# deprecated, please use datasets.download.download_manager diff --git a/testbed/huggingface__datasets/src/datasets/utils/experimental.py b/testbed/huggingface__datasets/src/datasets/utils/experimental.py new file mode 100644 index 0000000000000000000000000000000000000000..bac2244d59bec405621ae80dcf3b894a190b13b3 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/experimental.py @@ -0,0 +1,42 @@ +"""Contains utilities to flag a feature as "experimental" in datasets.""" +import warnings +from functools import wraps +from typing import Callable + + +def experimental(fn: Callable) -> Callable: + """Decorator to flag a feature as experimental. + + An experimental feature trigger a warning when used as it might be subject to breaking changes in the future. + + Args: + fn (`Callable`): + The function to flag as experimental. + + Returns: + `Callable`: The decorated function. + + Example: + + ```python + >>> from datasets.utils import experimental + + >>> @experimental + ... def my_function(): + ... print("Hello world!") + + >>> my_function() + UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future. + Hello world! + ``` + """ + + @wraps(fn) + def _inner_fn(*args, **kwargs): + warnings.warn( + (f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future."), + UserWarning, + ) + return fn(*args, **kwargs) + + return _inner_fn diff --git a/testbed/huggingface__datasets/src/datasets/utils/extract.py b/testbed/huggingface__datasets/src/datasets/utils/extract.py new file mode 100644 index 0000000000000000000000000000000000000000..8b13028ba18c4b8c0d8411c4409fbd5feca9b39b --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/extract.py @@ -0,0 +1,353 @@ +import bz2 +import gzip +import lzma +import os +import shutil +import struct +import tarfile +import warnings +import zipfile +from abc import ABC, abstractmethod +from pathlib import Path +from typing import Dict, List, Optional, Type, Union + +from .. import config +from ._filelock import FileLock +from .logging import get_logger + + +logger = get_logger(__name__) + + +class ExtractManager: + def __init__(self, cache_dir: Optional[str] = None): + self.extract_dir = ( + os.path.join(cache_dir, config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH + ) + self.extractor = Extractor + + def _get_output_path(self, path: str) -> str: + from .file_utils import hash_url_to_filename + + # Path where we extract compressed archives + # We extract in the cache dir, and get the extracted path name by hashing the original path" + abs_path = os.path.abspath(path) + return os.path.join(self.extract_dir, hash_url_to_filename(abs_path)) + + def _do_extract(self, output_path: str, force_extract: bool) -> bool: + return force_extract or ( + not os.path.isfile(output_path) and not (os.path.isdir(output_path) and os.listdir(output_path)) + ) + + def extract(self, input_path: str, force_extract: bool = False) -> str: + extractor_format = self.extractor.infer_extractor_format(input_path) + if not extractor_format: + return input_path + output_path = self._get_output_path(input_path) + if self._do_extract(output_path, force_extract): + self.extractor.extract(input_path, output_path, extractor_format) + return output_path + + +class BaseExtractor(ABC): + @classmethod + @abstractmethod + def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool: + ... + + @staticmethod + @abstractmethod + def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: + ... + + +class MagicNumberBaseExtractor(BaseExtractor, ABC): + magic_numbers: List[bytes] = [] + + @staticmethod + def read_magic_number(path: Union[Path, str], magic_number_length: int): + with open(path, "rb") as f: + return f.read(magic_number_length) + + @classmethod + def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool: + if not magic_number: + magic_number_length = max(len(cls_magic_number) for cls_magic_number in cls.magic_numbers) + try: + magic_number = cls.read_magic_number(path, magic_number_length) + except OSError: + return False + return any(magic_number.startswith(cls_magic_number) for cls_magic_number in cls.magic_numbers) + + +class TarExtractor(BaseExtractor): + @classmethod + def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool: + return tarfile.is_tarfile(path) + + @staticmethod + def safemembers(members, output_path): + """ + Fix for CVE-2007-4559 + Desc: + Directory traversal vulnerability in the (1) extract and (2) extractall functions in the tarfile + module in Python allows user-assisted remote attackers to overwrite arbitrary files via a .. (dot dot) + sequence in filenames in a TAR archive, a related issue to CVE-2001-1267. + See: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2007-4559 + From: https://stackoverflow.com/a/10077309 + """ + + def resolved(path: str) -> str: + return os.path.realpath(os.path.abspath(path)) + + def badpath(path: str, base: str) -> bool: + # joinpath will ignore base if path is absolute + return not resolved(os.path.join(base, path)).startswith(base) + + def badlink(info, base: str) -> bool: + # Links are interpreted relative to the directory containing the link + tip = resolved(os.path.join(base, os.path.dirname(info.name))) + return badpath(info.linkname, base=tip) + + base = resolved(output_path) + + for finfo in members: + if badpath(finfo.name, base): + logger.error(f"Extraction of {finfo.name} is blocked (illegal path)") + elif finfo.issym() and badlink(finfo, base): + logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}") + elif finfo.islnk() and badlink(finfo, base): + logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}") + else: + yield finfo + + @staticmethod + def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: + os.makedirs(output_path, exist_ok=True) + tar_file = tarfile.open(input_path) + tar_file.extractall(output_path, members=TarExtractor.safemembers(tar_file, output_path)) + tar_file.close() + + +class GzipExtractor(MagicNumberBaseExtractor): + magic_numbers = [b"\x1F\x8B"] + + @staticmethod + def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: + with gzip.open(input_path, "rb") as gzip_file: + with open(output_path, "wb") as extracted_file: + shutil.copyfileobj(gzip_file, extracted_file) + + +class ZipExtractor(MagicNumberBaseExtractor): + magic_numbers = [ + b"PK\x03\x04", + b"PK\x05\x06", # empty archive + b"PK\x07\x08", # spanned archive + ] + + @classmethod + def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool: + if super().is_extractable(path, magic_number=magic_number): + return True + try: + # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. + # From: https://github.com/python/cpython/pull/5053 + from zipfile import ( + _CD_SIGNATURE, + _ECD_DISK_NUMBER, + _ECD_DISK_START, + _ECD_ENTRIES_TOTAL, + _ECD_OFFSET, + _ECD_SIZE, + _EndRecData, + sizeCentralDir, + stringCentralDir, + structCentralDir, + ) + + with open(path, "rb") as fp: + endrec = _EndRecData(fp) + if endrec: + if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: + return True # Empty zipfiles are still zipfiles + elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: + fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk + if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: + data = fp.read(sizeCentralDir) # CD is where we expect it to be + if len(data) == sizeCentralDir: + centdir = struct.unpack(structCentralDir, data) # CD is the right size + if centdir[_CD_SIGNATURE] == stringCentralDir: + return True # First central directory entry has correct magic number + return False + except Exception: # catch all errors in case future python versions change the zipfile internals + return False + + @staticmethod + def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: + os.makedirs(output_path, exist_ok=True) + with zipfile.ZipFile(input_path, "r") as zip_file: + zip_file.extractall(output_path) + zip_file.close() + + +class XzExtractor(MagicNumberBaseExtractor): + magic_numbers = [b"\xFD\x37\x7A\x58\x5A\x00"] + + @staticmethod + def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: + with lzma.open(input_path) as compressed_file: + with open(output_path, "wb") as extracted_file: + shutil.copyfileobj(compressed_file, extracted_file) + + +class RarExtractor(MagicNumberBaseExtractor): + magic_numbers = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID + + @staticmethod + def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: + if not config.RARFILE_AVAILABLE: + raise ImportError("Please pip install rarfile") + import rarfile + + os.makedirs(output_path, exist_ok=True) + rf = rarfile.RarFile(input_path) + rf.extractall(output_path) + rf.close() + + +class ZstdExtractor(MagicNumberBaseExtractor): + magic_numbers = [b"\x28\xb5\x2F\xFD"] + + @staticmethod + def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: + if not config.ZSTANDARD_AVAILABLE: + raise ImportError("Please pip install zstandard") + import zstandard as zstd + + dctx = zstd.ZstdDecompressor() + with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh: + dctx.copy_stream(ifh, ofh) + + +class Bzip2Extractor(MagicNumberBaseExtractor): + magic_numbers = [b"\x42\x5A\x68"] + + @staticmethod + def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: + with bz2.open(input_path, "rb") as compressed_file: + with open(output_path, "wb") as extracted_file: + shutil.copyfileobj(compressed_file, extracted_file) + + +class SevenZipExtractor(MagicNumberBaseExtractor): + magic_numbers = [b"\x37\x7A\xBC\xAF\x27\x1C"] + + @staticmethod + def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: + if not config.PY7ZR_AVAILABLE: + raise ImportError("Please pip install py7zr") + import py7zr + + os.makedirs(output_path, exist_ok=True) + with py7zr.SevenZipFile(input_path, "r") as archive: + archive.extractall(output_path) + + +class Lz4Extractor(MagicNumberBaseExtractor): + magic_numbers = [b"\x04\x22\x4D\x18"] + + @staticmethod + def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: + if not config.LZ4_AVAILABLE: + raise ImportError("Please pip install lz4") + import lz4.frame + + with lz4.frame.open(input_path, "rb") as compressed_file: + with open(output_path, "wb") as extracted_file: + shutil.copyfileobj(compressed_file, extracted_file) + + +class Extractor: + # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) + extractors: Dict[str, Type[BaseExtractor]] = { + "tar": TarExtractor, + "gzip": GzipExtractor, + "zip": ZipExtractor, + "xz": XzExtractor, + "rar": RarExtractor, + "zstd": ZstdExtractor, + "bz2": Bzip2Extractor, + "7z": SevenZipExtractor, # + "lz4": Lz4Extractor, # + } + + @classmethod + def _get_magic_number_max_length(cls): + return max( + len(extractor_magic_number) + for extractor in cls.extractors.values() + if issubclass(extractor, MagicNumberBaseExtractor) + for extractor_magic_number in extractor.magic_numbers + ) + + @staticmethod + def _read_magic_number(path: Union[Path, str], magic_number_length: int): + try: + return MagicNumberBaseExtractor.read_magic_number(path, magic_number_length=magic_number_length) + except OSError: + return b"" + + @classmethod + def is_extractable(cls, path: Union[Path, str], return_extractor: bool = False) -> bool: + warnings.warn( + "Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. " + "Use 'infer_extractor_format' instead.", + category=FutureWarning, + ) + extractor_format = cls.infer_extractor_format(path) + if extractor_format: + return True if not return_extractor else (True, cls.extractors[extractor_format]) + return False if not return_extractor else (False, None) + + @classmethod + def infer_extractor_format(cls, path: Union[Path, str]) -> str: # + magic_number_max_length = cls._get_magic_number_max_length() + magic_number = cls._read_magic_number(path, magic_number_max_length) + for extractor_format, extractor in cls.extractors.items(): + if extractor.is_extractable(path, magic_number=magic_number): + return extractor_format + + @classmethod + def extract( + cls, + input_path: Union[Path, str], + output_path: Union[Path, str], + extractor_format: Optional[str] = None, # + extractor: Optional[BaseExtractor] = "deprecated", + ) -> None: + os.makedirs(os.path.dirname(output_path), exist_ok=True) + # Prevent parallel extractions + lock_path = str(Path(output_path).with_suffix(".lock")) + with FileLock(lock_path): + shutil.rmtree(output_path, ignore_errors=True) + if extractor_format or extractor != "deprecated": + if extractor != "deprecated" or not isinstance(extractor_format, str): # passed as positional arg + warnings.warn( + "Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. " + "Use 'extractor_format' instead.", + category=FutureWarning, + ) + extractor = extractor if extractor != "deprecated" else extractor_format + else: + extractor = cls.extractors[extractor_format] + return extractor.extract(input_path, output_path) + else: + warnings.warn( + "Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an " + "exception in 3.0.0.", + category=FutureWarning, + ) + for extractor in cls.extractors.values(): + if extractor.is_extractable(input_path): + return extractor.extract(input_path, output_path) diff --git a/testbed/huggingface__datasets/src/datasets/utils/file_utils.py b/testbed/huggingface__datasets/src/datasets/utils/file_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..03cd91d0bbe59f21f367e0e5d66e6d4d4e7d9c16 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/file_utils.py @@ -0,0 +1,705 @@ +""" +Utilities for working with the local dataset cache. +This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp +Copyright by the AllenNLP authors. +""" + +import copy +import io +import json +import os +import posixpath +import re +import shutil +import sys +import time +import urllib +import warnings +from contextlib import closing, contextmanager +from functools import partial +from pathlib import Path +from typing import List, Optional, Type, TypeVar, Union +from urllib.parse import urljoin, urlparse + +import fsspec +import huggingface_hub +import requests +from fsspec.core import strip_protocol +from fsspec.utils import can_be_local +from huggingface_hub import HfFolder +from huggingface_hub.utils import insecure_hashlib +from packaging import version + +from .. import __version__, config +from ..download.download_config import DownloadConfig +from . import _tqdm, logging +from . import tqdm as hf_tqdm +from ._filelock import FileLock +from .extract import ExtractManager + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +INCOMPLETE_SUFFIX = ".incomplete" + +T = TypeVar("T", str, Path) + + +def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str: + """ + Add hf_modules_cache to the python path. + By default hf_modules_cache='~/.cache/huggingface/modules'. + It can also be set with the environment variable HF_MODULES_CACHE. + This is used to add modules such as `datasets_modules` + """ + hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE + hf_modules_cache = str(hf_modules_cache) + if hf_modules_cache not in sys.path: + sys.path.append(hf_modules_cache) + + os.makedirs(hf_modules_cache, exist_ok=True) + if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")): + with open(os.path.join(hf_modules_cache, "__init__.py"), "w"): + pass + return hf_modules_cache + + +def is_remote_url(url_or_filename: str) -> bool: + return urlparse(url_or_filename).scheme != "" and not os.path.ismount(urlparse(url_or_filename).scheme + ":/") + + +def is_local_path(url_or_filename: str) -> bool: + # On unix the scheme of a local path is empty (for both absolute and relative), + # while on windows the scheme is the drive name (ex: "c") for absolute paths. + # for details on the windows behavior, see https://bugs.python.org/issue42215 + return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/") + + +def is_relative_path(url_or_filename: str) -> bool: + return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename) + + +def relative_to_absolute_path(path: T) -> T: + """Convert relative path to absolute path.""" + abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path)))) + return Path(abs_path_str) if isinstance(path, Path) else abs_path_str + + +def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str: + if dataset: + endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX + else: + endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX + return "/".join((endpoint, identifier, filename)) + + +def head_hf_s3( + identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0 +) -> Union[requests.Response, Exception]: + return http_head( + hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset), + max_retries=max_retries, + ) + + +def hf_github_url(path: str, name: str, dataset=True, revision: Optional[str] = None) -> str: + default_revision = "main" if version.parse(__version__).is_devrelease else __version__ + revision = revision or default_revision + if dataset: + return config.REPO_DATASETS_URL.format(revision=revision, path=path, name=name) + else: + return config.REPO_METRICS_URL.format(revision=revision, path=path, name=name) + + +def url_or_path_join(base_name: str, *pathnames: str) -> str: + if is_remote_url(base_name): + return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames)) + else: + return Path(base_name, *pathnames).as_posix() + + +def url_or_path_parent(url_or_path: str) -> str: + if is_remote_url(url_or_path): + return url_or_path[: url_or_path.rindex("/")] + else: + return os.path.dirname(url_or_path) + + +def hash_url_to_filename(url, etag=None): + """ + Convert `url` into a hashed filename in a repeatable way. + If `etag` is specified, append its hash to the url's, delimited + by a period. + If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name + so that TF 2.0 can identify it as a HDF5 file + (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380) + """ + url_bytes = url.encode("utf-8") + url_hash = insecure_hashlib.sha256(url_bytes) + filename = url_hash.hexdigest() + + if etag: + etag_bytes = etag.encode("utf-8") + etag_hash = insecure_hashlib.sha256(etag_bytes) + filename += "." + etag_hash.hexdigest() + + if url.endswith(".py"): + filename += ".py" + + return filename + + +def cached_path( + url_or_filename, + download_config=None, + **download_kwargs, +) -> str: + """ + Given something that might be a URL (or might be a local path), + determine which. If it's a URL, download the file and cache it, and + return the path to the cached file. If it's already a local path, + make sure the file exists and then return the path. + + Return: + Local path (string) + + Raises: + FileNotFoundError: in case of non-recoverable file + (non-existent or no cache on disk) + ConnectionError: in case of unreachable url + and no cache on disk + ValueError: if it couldn't parse the url or filename correctly + requests.exceptions.ConnectionError: in case of internet connection issue + """ + if download_config is None: + download_config = DownloadConfig(**download_kwargs) + + cache_dir = download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH + if isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + if isinstance(url_or_filename, Path): + url_or_filename = str(url_or_filename) + + # Convert fsspec URL in the format "file://local/path" to "local/path" + if can_be_local(url_or_filename): + url_or_filename = strip_protocol(url_or_filename) + + if is_remote_url(url_or_filename): + # URL, so get it from the cache (downloading if necessary) + output_path = get_from_cache( + url_or_filename, + cache_dir=cache_dir, + force_download=download_config.force_download, + proxies=download_config.proxies, + resume_download=download_config.resume_download, + user_agent=download_config.user_agent, + local_files_only=download_config.local_files_only, + use_etag=download_config.use_etag, + max_retries=download_config.max_retries, + token=download_config.token, + ignore_url_params=download_config.ignore_url_params, + storage_options=download_config.storage_options, + download_desc=download_config.download_desc, + ) + elif os.path.exists(url_or_filename): + # File, and it exists. + output_path = url_or_filename + elif is_local_path(url_or_filename): + # File, but it doesn't exist. + raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist") + else: + # Something unknown + raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") + + if output_path is None: + return output_path + + if download_config.extract_compressed_file: + output_path = ExtractManager(cache_dir=download_config.cache_dir).extract( + output_path, force_extract=download_config.force_extract + ) + + return output_path + + +def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str: + ua = f"datasets/{__version__}" + ua += f"; python/{config.PY_VERSION}" + ua += f"; huggingface_hub/{huggingface_hub.__version__}" + ua += f"; pyarrow/{config.PYARROW_VERSION}" + if config.TORCH_AVAILABLE: + ua += f"; torch/{config.TORCH_VERSION}" + if config.TF_AVAILABLE: + ua += f"; tensorflow/{config.TF_VERSION}" + if config.JAX_AVAILABLE: + ua += f"; jax/{config.JAX_VERSION}" + if config.BEAM_AVAILABLE: + ua += f"; apache_beam/{config.BEAM_VERSION}" + if isinstance(user_agent, dict): + ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}" + elif isinstance(user_agent, str): + ua += "; " + user_agent + return ua + + +def get_authentication_headers_for_url( + url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated" +) -> dict: + """Handle the HF authentication""" + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'token={use_auth_token}' instead.", + FutureWarning, + ) + token = use_auth_token + headers = {} + if url.startswith(config.HF_ENDPOINT): + if token is False: + token = None + elif isinstance(token, str): + token = token + else: + token = HfFolder.get_token() + + if token: + headers["authorization"] = f"Bearer {token}" + return headers + + +class OfflineModeIsEnabled(ConnectionError): + pass + + +def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None): + """Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_DATASETS_OFFLINE is True.""" + if config.HF_DATASETS_OFFLINE: + raise OfflineModeIsEnabled( + "Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg) + ) + + +def _retry( + func, + func_args: Optional[tuple] = None, + func_kwargs: Optional[dict] = None, + exceptions: Type[requests.exceptions.RequestException] = requests.exceptions.RequestException, + status_codes: Optional[List[int]] = None, + max_retries: int = 0, + base_wait_time: float = 0.5, + max_wait_time: float = 2, +): + func_args = func_args or () + func_kwargs = func_kwargs or {} + retry = 0 + while True: + try: + return func(*func_args, **func_kwargs) + except exceptions as err: + if retry >= max_retries or (status_codes and err.response.status_code not in status_codes): + raise err + else: + sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff + logger.info(f"{func} timed out, retrying in {sleep_time}s... [{retry/max_retries}]") + time.sleep(sleep_time) + retry += 1 + + +def _request_with_retry( + method: str, + url: str, + max_retries: int = 0, + base_wait_time: float = 0.5, + max_wait_time: float = 2, + timeout: float = 10.0, + **params, +) -> requests.Response: + """Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff. + + Note that if the environment variable HF_DATASETS_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised. + + Args: + method (str): HTTP method, such as 'GET' or 'HEAD'. + url (str): The URL of the resource to fetch. + max_retries (int): Maximum number of retries, defaults to 0 (no retries). + base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between + retries then grows exponentially, capped by max_wait_time. + max_wait_time (float): Maximum amount of time between two retries, in seconds. + **params (additional keyword arguments): Params to pass to :obj:`requests.request`. + """ + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + tries, success = 0, False + while not success: + tries += 1 + try: + response = requests.request(method=method.upper(), url=url, timeout=timeout, **params) + success = True + except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err: + if tries > max_retries: + raise err + else: + logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]") + sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff + time.sleep(sleep_time) + return response + + +def fsspec_head(url, storage_options=None): + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options) + if len(paths) > 1: + raise ValueError(f"HEAD can be called with at most one path but was called with {paths}") + return fs.info(paths[0]) + + +class TqdmCallback(fsspec.callbacks.TqdmCallback): + def __init__(self, tqdm_kwargs=None, *args, **kwargs): + super().__init__(tqdm_kwargs, *args, **kwargs) + self._tqdm = _tqdm # replace tqdm.tqdm by datasets.tqdm.tqdm + + +def fsspec_get(url, temp_file, storage_options=None, desc=None): + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options) + if len(paths) > 1: + raise ValueError(f"GET can be called with at most one path but was called with {paths}") + callback = TqdmCallback( + tqdm_kwargs={ + "desc": desc or "Downloading", + "unit": "B", + "unit_scale": True, + } + ) + fs.get_file(paths[0], temp_file.name, callback=callback) + + +def ftp_head(url, timeout=10.0): + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + try: + with closing(urllib.request.urlopen(url, timeout=timeout)) as r: + r.read(1) + except Exception: + return False + return True + + +def ftp_get(url, temp_file, timeout=10.0): + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + try: + logger.info(f"Getting through FTP {url} into {temp_file.name}") + with closing(urllib.request.urlopen(url, timeout=timeout)) as r: + shutil.copyfileobj(r, temp_file) + except urllib.error.URLError as e: + raise ConnectionError(e) from None + + +def http_get( + url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=100.0, max_retries=0, desc=None +): + headers = copy.deepcopy(headers) or {} + headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent")) + if resume_size > 0: + headers["Range"] = f"bytes={resume_size:d}-" + response = _request_with_retry( + method="GET", + url=url, + stream=True, + proxies=proxies, + headers=headers, + cookies=cookies, + max_retries=max_retries, + timeout=timeout, + ) + if response.status_code == 416: # Range not satisfiable + return + content_length = response.headers.get("Content-Length") + total = resume_size + int(content_length) if content_length is not None else None + with hf_tqdm( + unit="B", + unit_scale=True, + total=total, + initial=resume_size, + desc=desc or "Downloading", + ) as progress: + for chunk in response.iter_content(chunk_size=1024): + progress.update(len(chunk)) + temp_file.write(chunk) + + +def http_head( + url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0 +) -> requests.Response: + headers = copy.deepcopy(headers) or {} + headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent")) + response = _request_with_retry( + method="HEAD", + url=url, + proxies=proxies, + headers=headers, + cookies=cookies, + allow_redirects=allow_redirects, + timeout=timeout, + max_retries=max_retries, + ) + return response + + +def request_etag( + url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated" +) -> Optional[str]: + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'token={use_auth_token}' instead.", + FutureWarning, + ) + token = use_auth_token + if urlparse(url).scheme not in ("http", "https"): + return None + headers = get_authentication_headers_for_url(url, token=token) + response = http_head(url, headers=headers, max_retries=3) + response.raise_for_status() + etag = response.headers.get("ETag") if response.ok else None + return etag + + +def get_from_cache( + url, + cache_dir=None, + force_download=False, + proxies=None, + etag_timeout=100, + resume_download=False, + user_agent=None, + local_files_only=False, + use_etag=True, + max_retries=0, + token=None, + use_auth_token="deprecated", + ignore_url_params=False, + storage_options=None, + download_desc=None, +) -> str: + """ + Given a URL, look for the corresponding file in the local cache. + If it's not there, download it. Then return the path to the cached file. + + Return: + Local path (string) + + Raises: + FileNotFoundError: in case of non-recoverable file + (non-existent or no cache on disk) + ConnectionError: in case of unreachable url + and no cache on disk + """ + if use_auth_token != "deprecated": + warnings.warn( + "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n" + f"You can remove this warning by passing 'token={use_auth_token}' instead.", + FutureWarning, + ) + token = use_auth_token + if cache_dir is None: + cache_dir = config.HF_DATASETS_CACHE + if isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + os.makedirs(cache_dir, exist_ok=True) + + if ignore_url_params: + # strip all query parameters and #fragments from the URL + cached_url = urljoin(url, urlparse(url).path) + else: + cached_url = url # additional parameters may be added to the given URL + + connected = False + response = None + cookies = None + etag = None + head_error = None + scheme = None + + # Try a first time to file the file on the local file system without eTag (None) + # if we don't ask for 'force_download' then we spare a request + filename = hash_url_to_filename(cached_url, etag=None) + cache_path = os.path.join(cache_dir, filename) + + if os.path.exists(cache_path) and not force_download and not use_etag: + return cache_path + + # Prepare headers for authentication + headers = get_authentication_headers_for_url(url, token=token) + if user_agent is not None: + headers["user-agent"] = user_agent + + # We don't have the file locally or we need an eTag + if not local_files_only: + scheme = urlparse(url).scheme + if scheme == "ftp": + connected = ftp_head(url) + elif scheme not in ("http", "https"): + response = fsspec_head(url, storage_options=storage_options) + # s3fs uses "ETag", gcsfs uses "etag" + etag = (response.get("ETag", None) or response.get("etag", None)) if use_etag else None + connected = True + try: + response = http_head( + url, + allow_redirects=True, + proxies=proxies, + timeout=etag_timeout, + max_retries=max_retries, + headers=headers, + ) + if response.status_code == 200: # ok + etag = response.headers.get("ETag") if use_etag else None + for k, v in response.cookies.items(): + # In some edge cases, we need to get a confirmation token + if k.startswith("download_warning") and "drive.google.com" in url: + url += "&confirm=" + v + cookies = response.cookies + connected = True + # Fix Google Drive URL to avoid Virus scan warning + if "drive.google.com" in url and "confirm=" not in url: + url += "&confirm=t" + # In some edge cases, head request returns 400 but the connection is actually ok + elif ( + (response.status_code == 400 and "firebasestorage.googleapis.com" in url) + or (response.status_code == 405 and "drive.google.com" in url) + or ( + response.status_code == 403 + and ( + re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url) + or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url) + ) + ) + or (response.status_code == 403 and "ndownloader.figstatic.com" in url) + ): + connected = True + logger.info(f"Couldn't get ETag version for url {url}") + elif response.status_code == 401 and config.HF_ENDPOINT in url and token is None: + raise ConnectionError( + f"Unauthorized for URL {url}. Please use the parameter `token=True` after logging in with `huggingface-cli login`" + ) + except (OSError, requests.exceptions.Timeout) as e: + # not connected + head_error = e + pass + + # connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. + # try to get the last downloaded one + if not connected: + if os.path.exists(cache_path) and not force_download: + return cache_path + if local_files_only: + raise FileNotFoundError( + f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been" + " disabled. To enable file online look-ups, set 'local_files_only' to False." + ) + elif response is not None and response.status_code == 404: + raise FileNotFoundError(f"Couldn't find file at {url}") + _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") + if head_error is not None: + raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})") + elif response is not None: + raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})") + else: + raise ConnectionError(f"Couldn't reach {url}") + + # Try a second time + filename = hash_url_to_filename(cached_url, etag) + cache_path = os.path.join(cache_dir, filename) + + if os.path.exists(cache_path) and not force_download: + return cache_path + + # From now on, connected is True. + # Prevent parallel downloads of the same file with a lock. + lock_path = cache_path + ".lock" + with FileLock(lock_path): + # Retry in case previously locked processes just enter after the precedent process releases the lock + if os.path.exists(cache_path) and not force_download: + return cache_path + + incomplete_path = cache_path + ".incomplete" + + @contextmanager + def temp_file_manager(mode="w+b"): + with open(incomplete_path, mode) as f: + yield f + + resume_size = 0 + if resume_download: + temp_file_manager = partial(temp_file_manager, mode="a+b") + if os.path.exists(incomplete_path): + resume_size = os.stat(incomplete_path).st_size + + # Download to temporary file, then copy to cache path once finished. + # Otherwise, you get corrupt cache entries if the download gets interrupted. + with temp_file_manager() as temp_file: + logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}") + + # GET file object + if scheme == "ftp": + ftp_get(url, temp_file) + elif scheme not in ("http", "https"): + fsspec_get(url, temp_file, storage_options=storage_options, desc=download_desc) + else: + http_get( + url, + temp_file, + proxies=proxies, + resume_size=resume_size, + headers=headers, + cookies=cookies, + max_retries=max_retries, + desc=download_desc, + ) + + logger.info(f"storing {url} in cache at {cache_path}") + shutil.move(temp_file.name, cache_path) + umask = os.umask(0o666) + os.umask(umask) + os.chmod(cache_path, 0o666 & ~umask) + + logger.info(f"creating metadata file for {cache_path}") + meta = {"url": url, "etag": etag} + meta_path = cache_path + ".json" + with open(meta_path, "w", encoding="utf-8") as meta_file: + json.dump(meta, meta_file) + + return cache_path + + +def add_start_docstrings(*docstr): + def docstring_decorator(fn): + fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "") + return fn + + return docstring_decorator + + +def add_end_docstrings(*docstr): + def docstring_decorator(fn): + fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr) + return fn + + return docstring_decorator + + +def estimate_dataset_size(paths): + return sum(path.stat().st_size for path in paths) + + +def readline(f: io.RawIOBase): + # From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525 + res = bytearray() + while True: + b = f.read(1) + if not b: + break + res += b + if res.endswith(b"\n"): + break + return bytes(res) diff --git a/testbed/huggingface__datasets/src/datasets/utils/filelock.py b/testbed/huggingface__datasets/src/datasets/utils/filelock.py new file mode 100644 index 0000000000000000000000000000000000000000..66c3c97649ec4daef6d6e9a1fd73343a1357e715 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/filelock.py @@ -0,0 +1,11 @@ +# deprecated, please use the `filelock` package instead + +from filelock import ( # noqa: F401 # imported for backward compatibility + BaseFileLock, + SoftFileLock, + Timeout, + UnixFileLock, + WindowsFileLock, +) + +from ._filelock import FileLock # noqa: F401 # imported for backward compatibility diff --git a/testbed/huggingface__datasets/src/datasets/utils/hub.py b/testbed/huggingface__datasets/src/datasets/utils/hub.py new file mode 100644 index 0000000000000000000000000000000000000000..46402accfbb385411ab7bc3ee16462e63dd0c2a9 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/hub.py @@ -0,0 +1,12 @@ +from typing import Optional +from urllib.parse import quote + +import huggingface_hub as hfh +from packaging import version + + +def hf_hub_url(repo_id: str, path: str, revision: Optional[str] = None) -> str: + if version.parse(hfh.__version__).release < version.parse("0.11.0").release: + # old versions of hfh don't url-encode the file path + path = quote(path) + return hfh.hf_hub_url(repo_id, path, repo_type="dataset", revision=revision) diff --git a/testbed/huggingface__datasets/src/datasets/utils/info_utils.py b/testbed/huggingface__datasets/src/datasets/utils/info_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4eaa2f0418b2200b9e6714e6697ee68efe753107 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/info_utils.py @@ -0,0 +1,130 @@ +import enum +import os +from typing import Optional + +from huggingface_hub.utils import insecure_hashlib + +from .. import config +from .logging import get_logger + + +logger = get_logger(__name__) + + +class VerificationMode(enum.Enum): + """`Enum` that specifies which verification checks to run. + + The default mode is `BASIC_CHECKS`, which will perform only rudimentary checks to avoid slowdowns + when generating/downloading a dataset for the first time. + + The verification modes: + + | | Verification checks | + |---------------------------|------------------------------------------------------------------------------ | + | `ALL_CHECKS` | Split checks, uniqueness of the keys yielded in case of the GeneratorBuilder | + | | and the validity (number of files, checksums, etc.) of downloaded files | + | `BASIC_CHECKS` (default) | Same as `ALL_CHECKS` but without checking downloaded files | + | `NO_CHECKS` | None | + + """ + + ALL_CHECKS = "all_checks" + BASIC_CHECKS = "basic_checks" + NO_CHECKS = "no_checks" + + +class ChecksumVerificationException(Exception): + """Exceptions during checksums verifications of downloaded files.""" + + +class UnexpectedDownloadedFile(ChecksumVerificationException): + """Some downloaded files were not expected.""" + + +class ExpectedMoreDownloadedFiles(ChecksumVerificationException): + """Some files were supposed to be downloaded but were not.""" + + +class NonMatchingChecksumError(ChecksumVerificationException): + """The downloaded file checksum don't match the expected checksum.""" + + +def verify_checksums(expected_checksums: Optional[dict], recorded_checksums: dict, verification_name=None): + if expected_checksums is None: + logger.info("Unable to verify checksums.") + return + if len(set(expected_checksums) - set(recorded_checksums)) > 0: + raise ExpectedMoreDownloadedFiles(str(set(expected_checksums) - set(recorded_checksums))) + if len(set(recorded_checksums) - set(expected_checksums)) > 0: + raise UnexpectedDownloadedFile(str(set(recorded_checksums) - set(expected_checksums))) + bad_urls = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] + for_verification_name = " for " + verification_name if verification_name is not None else "" + if len(bad_urls) > 0: + raise NonMatchingChecksumError( + f"Checksums didn't match{for_verification_name}:\n" + f"{bad_urls}\n" + "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" + ) + logger.info("All the checksums matched successfully" + for_verification_name) + + +class SplitsVerificationException(Exception): + """Exceptions during splis verifications""" + + +class UnexpectedSplits(SplitsVerificationException): + """The expected splits of the downloaded file is missing.""" + + +class ExpectedMoreSplits(SplitsVerificationException): + """Some recorded splits are missing.""" + + +class NonMatchingSplitsSizesError(SplitsVerificationException): + """The splits sizes don't match the expected splits sizes.""" + + +def verify_splits(expected_splits: Optional[dict], recorded_splits: dict): + if expected_splits is None: + logger.info("Unable to verify splits sizes.") + return + if len(set(expected_splits) - set(recorded_splits)) > 0: + raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits))) + if len(set(recorded_splits) - set(expected_splits)) > 0: + raise UnexpectedSplits(str(set(recorded_splits) - set(expected_splits))) + bad_splits = [ + {"expected": expected_splits[name], "recorded": recorded_splits[name]} + for name in expected_splits + if expected_splits[name].num_examples != recorded_splits[name].num_examples + ] + if len(bad_splits) > 0: + raise NonMatchingSplitsSizesError(str(bad_splits)) + logger.info("All the splits matched successfully.") + + +def get_size_checksum_dict(path: str, record_checksum: bool = True) -> dict: + """Compute the file size and the sha256 checksum of a file""" + if record_checksum: + m = insecure_hashlib.sha256() + with open(path, "rb") as f: + for chunk in iter(lambda: f.read(1 << 20), b""): + m.update(chunk) + checksum = m.hexdigest() + else: + checksum = None + return {"num_bytes": os.path.getsize(path), "checksum": checksum} + + +def is_small_dataset(dataset_size): + """Check if `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`. + + Args: + dataset_size (int): Dataset size in bytes. + + Returns: + bool: Whether `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`. + """ + if dataset_size and config.IN_MEMORY_MAX_SIZE: + return dataset_size < config.IN_MEMORY_MAX_SIZE + else: + return False diff --git a/testbed/huggingface__datasets/src/datasets/utils/logging.py b/testbed/huggingface__datasets/src/datasets/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..b3ea17d6cad2f23e62d28fbc8bd49089bc09decb --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/logging.py @@ -0,0 +1,179 @@ +# Copyright 2020 Optuna, Hugging Face +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Logging utilities. """ + +import logging +import os +from logging import ( + CRITICAL, # NOQA + DEBUG, # NOQA + ERROR, # NOQA + FATAL, # NOQA + INFO, # NOQA + NOTSET, # NOQA + WARN, # NOQA + WARNING, # NOQA +) +from typing import Optional + +from .tqdm import ( # noqa: F401 # imported for backward compatibility + disable_progress_bar, + enable_progress_bar, + is_progress_bar_enabled, + tqdm, +) + + +log_levels = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} + +_default_log_level = logging.WARNING + + +def _get_default_logging_level(): + """ + If DATASETS_VERBOSITY env var is set to one of the valid choices return that as the new default level. + If it is not - fall back to ``_default_log_level`` + """ + env_level_str = os.getenv("DATASETS_VERBOSITY", None) + if env_level_str: + if env_level_str in log_levels: + return log_levels[env_level_str] + else: + logging.getLogger().warning( + f"Unknown option DATASETS_VERBOSITY={env_level_str}, " + f"has to be one of: { ', '.join(log_levels.keys()) }" + ) + return _default_log_level + + +def _get_library_name() -> str: + return __name__.split(".")[0] + + +def _get_library_root_logger() -> logging.Logger: + return logging.getLogger(_get_library_name()) + + +def _configure_library_root_logger() -> None: + # Apply our default configuration to the library root logger. + library_root_logger = _get_library_root_logger() + library_root_logger.addHandler(logging.StreamHandler()) + library_root_logger.setLevel(_get_default_logging_level()) + + +def _reset_library_root_logger() -> None: + library_root_logger = _get_library_root_logger() + library_root_logger.setLevel(logging.NOTSET) + + +def get_logger(name: Optional[str] = None) -> logging.Logger: + """Return a logger with the specified name. + This function can be used in dataset scripts. + """ + if name is None: + name = _get_library_name() + return logging.getLogger(name) + + +def get_verbosity() -> int: + """Return the current level for the HuggingFace datasets library's root logger. + Returns: + Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`. + + + + HuggingFace datasets library has following logging levels: + - `datasets.logging.CRITICAL`, `datasets.logging.FATAL` + - `datasets.logging.ERROR` + - `datasets.logging.WARNING`, `datasets.logging.WARN` + - `datasets.logging.INFO` + - `datasets.logging.DEBUG` + + + """ + return _get_library_root_logger().getEffectiveLevel() + + +def set_verbosity(verbosity: int) -> None: + """Set the level for the Hugging Face Datasets library's root logger. + Args: + verbosity: + Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`. + """ + _get_library_root_logger().setLevel(verbosity) + + +def set_verbosity_info(): + """Set the level for the Hugging Face datasets library's root logger to `INFO`. + + This will display most of the logging information and tqdm bars. + + Shortcut to `datasets.logging.set_verbosity(datasets.logging.INFO)`. + """ + return set_verbosity(INFO) + + +def set_verbosity_warning(): + """Set the level for the Hugging Face datasets library's root logger to `WARNING`. + + This will display only the warning and errors logging information and tqdm bars. + + Shortcut to `datasets.logging.set_verbosity(datasets.logging.WARNING)`. + """ + return set_verbosity(WARNING) + + +def set_verbosity_debug(): + """Set the level for the Hugging Face datasets library's root logger to `DEBUG`. + + This will display all the logging information and tqdm bars. + + Shortcut to `datasets.logging.set_verbosity(datasets.logging.DEBUG)`. + """ + return set_verbosity(DEBUG) + + +def set_verbosity_error(): + """Set the level for the Hugging Face datasets library's root logger to `ERROR`. + + This will display only the errors logging information and tqdm bars. + + Shortcut to `datasets.logging.set_verbosity(datasets.logging.ERROR)`. + """ + return set_verbosity(ERROR) + + +def disable_propagation() -> None: + """Disable propagation of the library log outputs. + Note that log propagation is disabled by default. + """ + _get_library_root_logger().propagate = False + + +def enable_propagation() -> None: + """Enable propagation of the library log outputs. + Please disable the Hugging Face datasets library's default handler to prevent double logging if the root logger has + been configured. + """ + _get_library_root_logger().propagate = True + + +# Configure the library root logger at the module level (singleton-like) +_configure_library_root_logger() diff --git a/testbed/huggingface__datasets/src/datasets/utils/metadata.py b/testbed/huggingface__datasets/src/datasets/utils/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..79a3f68b8f00b4b71b1956eb17a21f90ca4ef235 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/metadata.py @@ -0,0 +1,273 @@ +import textwrap +from collections import Counter +from pathlib import Path +from typing import Any, ClassVar, Dict, Optional, Tuple, Union + +import yaml +from huggingface_hub import DatasetCardData + +from ..config import METADATA_CONFIGS_FIELD +from ..utils.logging import get_logger +from .deprecation_utils import deprecated + + +logger = get_logger(__name__) + + +class _NoDuplicateSafeLoader(yaml.SafeLoader): + def _check_no_duplicates_on_constructed_node(self, node): + keys = [self.constructed_objects[key_node] for key_node, _ in node.value] + keys = [tuple(key) if isinstance(key, list) else key for key in keys] + counter = Counter(keys) + duplicate_keys = [key for key in counter if counter[key] > 1] + if duplicate_keys: + raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}") + + def construct_mapping(self, node, deep=False): + mapping = super().construct_mapping(node, deep=deep) + self._check_no_duplicates_on_constructed_node(node) + return mapping + + +def _split_yaml_from_readme(readme_content: str) -> Tuple[Optional[str], str]: + full_content = list(readme_content.splitlines()) + if full_content and full_content[0] == "---" and "---" in full_content[1:]: + sep_idx = full_content[1:].index("---") + 1 + yamlblock = "\n".join(full_content[1:sep_idx]) + return yamlblock, "\n".join(full_content[sep_idx + 1 :]) + + return None, "\n".join(full_content) + + +@deprecated("Use `huggingface_hub.DatasetCardData` instead.") +class DatasetMetadata(dict): + # class attributes + _FIELDS_WITH_DASHES = {"train_eval_index"} # train-eval-index in the YAML metadata + + @classmethod + def from_readme(cls, path: Union[Path, str]) -> "DatasetMetadata": + """Loads and validates the dataset metadata from its dataset card (README.md) + + Args: + path (:obj:`Path`): Path to the dataset card (its README.md file) + + Returns: + :class:`DatasetMetadata`: The dataset's metadata + + Raises: + :obj:`TypeError`: If the dataset's metadata is invalid + """ + with open(path, encoding="utf-8") as readme_file: + yaml_string, _ = _split_yaml_from_readme(readme_file.read()) + if yaml_string is not None: + return cls.from_yaml_string(yaml_string) + else: + return cls() + + def to_readme(self, path: Path): + if path.exists(): + with open(path, encoding="utf-8") as readme_file: + readme_content = readme_file.read() + else: + readme_content = None + updated_readme_content = self._to_readme(readme_content) + with open(path, "w", encoding="utf-8") as readme_file: + readme_file.write(updated_readme_content) + + def _to_readme(self, readme_content: Optional[str] = None) -> str: + if readme_content is not None: + _, content = _split_yaml_from_readme(readme_content) + full_content = "---\n" + self.to_yaml_string() + "---\n" + content + else: + full_content = "---\n" + self.to_yaml_string() + "---\n" + return full_content + + @classmethod + def from_yaml_string(cls, string: str) -> "DatasetMetadata": + """Loads and validates the dataset metadata from a YAML string + + Args: + string (:obj:`str`): The YAML string + + Returns: + :class:`DatasetMetadata`: The dataset's metadata + + Raises: + :obj:`TypeError`: If the dataset's metadata is invalid + """ + metadata_dict = yaml.load(string, Loader=_NoDuplicateSafeLoader) or {} + + # Convert the YAML keys to DatasetMetadata fields + metadata_dict = { + (key.replace("-", "_") if key.replace("-", "_") in cls._FIELDS_WITH_DASHES else key): value + for key, value in metadata_dict.items() + } + return cls(**metadata_dict) + + def to_yaml_string(self) -> str: + return yaml.safe_dump( + { + (key.replace("_", "-") if key in self._FIELDS_WITH_DASHES else key): value + for key, value in self.items() + }, + sort_keys=False, + allow_unicode=True, + encoding="utf-8", + ).decode("utf-8") + + +class MetadataConfigs(Dict[str, Dict[str, Any]]): + """Should be in format {config_name: {**config_params}}.""" + + FIELD_NAME: ClassVar[str] = METADATA_CONFIGS_FIELD + + @staticmethod + def _raise_if_data_files_field_not_valid(metadata_config: dict): + yaml_data_files = metadata_config.get("data_files") + if yaml_data_files is not None: + yaml_error_message = textwrap.dedent( + f""" + Expected data_files in YAML to be either a string or a list of strings + or a list of dicts with two keys: 'split' and 'path', but got {yaml_data_files} + Examples of data_files in YAML: + + data_files: data.csv + + data_files: data/*.png + + data_files: + - part0/* + - part1/* + + data_files: + - split: train + path: train/* + - split: test + path: test/* + + data_files: + - split: train + path: + - train/part1/* + - train/part2/* + - split: test + path: test/* + """ + ) + if not isinstance(yaml_data_files, (list, str)): + raise ValueError(yaml_error_message) + if isinstance(yaml_data_files, list): + for yaml_data_files_item in yaml_data_files: + if ( + not isinstance(yaml_data_files_item, (str, dict)) + or isinstance(yaml_data_files_item, dict) + and not ( + len(yaml_data_files_item) == 2 + and "split" in yaml_data_files_item + and isinstance(yaml_data_files_item.get("path"), (str, list)) + ) + ): + raise ValueError(yaml_error_message) + + @classmethod + def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "MetadataConfigs": + if dataset_card_data.get(cls.FIELD_NAME): + metadata_configs = dataset_card_data[cls.FIELD_NAME] + if not isinstance(metadata_configs, list): + raise ValueError(f"Expected {cls.FIELD_NAME} to be a list, but got '{metadata_configs}'") + for metadata_config in metadata_configs: + if "config_name" not in metadata_config: + raise ValueError( + f"Each config must include `config_name` field with a string name of a config, " + f"but got {metadata_config}. " + ) + cls._raise_if_data_files_field_not_valid(metadata_config) + return cls( + { + config["config_name"]: {param: value for param, value in config.items() if param != "config_name"} + for config in metadata_configs + } + ) + return cls() + + def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None: + if self: + for metadata_config in self.values(): + self._raise_if_data_files_field_not_valid(metadata_config) + current_metadata_configs = self.from_dataset_card_data(dataset_card_data) + total_metadata_configs = dict(sorted({**current_metadata_configs, **self}.items())) + for config_name, config_metadata in total_metadata_configs.items(): + config_metadata.pop("config_name", None) + dataset_card_data[self.FIELD_NAME] = [ + {"config_name": config_name, **config_metadata} + for config_name, config_metadata in total_metadata_configs.items() + ] + + def get_default_config_name(self) -> Optional[str]: + default_config_name = None + for config_name, metadata_config in self.items(): + if config_name == "default" or metadata_config.get("default"): + if default_config_name is None: + default_config_name = config_name + else: + raise ValueError( + f"Dataset has several default configs: '{default_config_name}' and '{config_name}'." + ) + return default_config_name + + +# DEPRECATED - just here to support old versions of evaluate like 0.2.2 +# To support new tasks on the Hugging Face Hub, please open a PR for this file: +# https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/pipelines.ts +known_task_ids = { + "image-classification": [], + "translation": [], + "image-segmentation": [], + "fill-mask": [], + "automatic-speech-recognition": [], + "token-classification": [], + "sentence-similarity": [], + "audio-classification": [], + "question-answering": [], + "summarization": [], + "zero-shot-classification": [], + "table-to-text": [], + "feature-extraction": [], + "other": [], + "multiple-choice": [], + "text-classification": [], + "text-to-image": [], + "text2text-generation": [], + "zero-shot-image-classification": [], + "tabular-classification": [], + "tabular-regression": [], + "image-to-image": [], + "tabular-to-text": [], + "unconditional-image-generation": [], + "text-retrieval": [], + "text-to-speech": [], + "object-detection": [], + "audio-to-audio": [], + "text-generation": [], + "conversational": [], + "table-question-answering": [], + "visual-question-answering": [], + "image-to-text": [], + "reinforcement-learning": [], + "voice-activity-detection": [], + "time-series-forecasting": [], + "document-question-answering": [], +} + + +if __name__ == "__main__": + from argparse import ArgumentParser + + ap = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.") + ap.add_argument("readme_filepath") + args = ap.parse_args() + + readme_filepath = Path(args.readme_filepath) + dataset_metadata = DatasetMetadata.from_readme(readme_filepath) + print(dataset_metadata) + dataset_metadata.to_readme(readme_filepath) diff --git a/testbed/huggingface__datasets/src/datasets/utils/patching.py b/testbed/huggingface__datasets/src/datasets/utils/patching.py new file mode 100644 index 0000000000000000000000000000000000000000..f245cabd97065d9e82a1320d02999f9ec03bda36 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/patching.py @@ -0,0 +1,119 @@ +from importlib import import_module + +from .logging import get_logger + + +logger = get_logger(__name__) + + +class _PatchedModuleObj: + """Set all the modules components as attributes of the _PatchedModuleObj object.""" + + def __init__(self, module, attrs=None): + attrs = attrs or [] + if module is not None: + for key in module.__dict__: + if key in attrs or not key.startswith("__"): + setattr(self, key, getattr(module, key)) + self._original_module = module._original_module if isinstance(module, _PatchedModuleObj) else module + + +class patch_submodule: + """ + Patch a submodule attribute of an object, by keeping all other submodules intact at all levels. + + Example:: + + >>> import importlib + >>> from datasets.load import dataset_module_factory + >>> from datasets.streaming import patch_submodule, xjoin + >>> + >>> dataset_module = dataset_module_factory("snli") + >>> snli_module = importlib.import_module(dataset_module.module_path) + >>> patcher = patch_submodule(snli_module, "os.path.join", xjoin) + >>> patcher.start() + >>> assert snli_module.os.path.join is xjoin + """ + + _active_patches = [] + + def __init__(self, obj, target: str, new, attrs=None): + self.obj = obj + self.target = target + self.new = new + self.key = target.split(".")[0] + self.original = {} + self.attrs = attrs or [] + + def __enter__(self): + *submodules, target_attr = self.target.split(".") + + # Patch modules: + # it's used to patch attributes of submodules like "os.path.join"; + # in this case we need to patch "os" and "os.path" + + for i in range(len(submodules)): + try: + submodule = import_module(".".join(submodules[: i + 1])) + except ModuleNotFoundError: + continue + # We iterate over all the globals in self.obj in case we find "os" or "os.path" + for attr in self.obj.__dir__(): + obj_attr = getattr(self.obj, attr) + # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". + # This allows to patch renamed modules like "from os import path as ospath". + if obj_attr is submodule or ( + isinstance(obj_attr, _PatchedModuleObj) and obj_attr._original_module is submodule + ): + self.original[attr] = obj_attr + # patch at top level + setattr(self.obj, attr, _PatchedModuleObj(obj_attr, attrs=self.attrs)) + patched = getattr(self.obj, attr) + # construct lower levels patches + for key in submodules[i + 1 :]: + setattr(patched, key, _PatchedModuleObj(getattr(patched, key, None), attrs=self.attrs)) + patched = getattr(patched, key) + # finally set the target attribute + setattr(patched, target_attr, self.new) + + # Patch attribute itself: + # it's used for builtins like "open", + # and also to patch "os.path.join" we may also need to patch "join" + # itself if it was imported as "from os.path import join". + + if submodules: # if it's an attribute of a submodule like "os.path.join" + try: + attr_value = getattr(import_module(".".join(submodules)), target_attr) + except (AttributeError, ModuleNotFoundError): + return + # We iterate over all the globals in self.obj in case we find "os.path.join" + for attr in self.obj.__dir__(): + # We don't check for the name of the global, but rather if its value *is* "os.path.join". + # This allows to patch renamed attributes like "from os.path import join as pjoin". + if getattr(self.obj, attr) is attr_value: + self.original[attr] = getattr(self.obj, attr) + setattr(self.obj, attr, self.new) + elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" + self.original[target_attr] = globals()["__builtins__"][target_attr] + setattr(self.obj, target_attr, self.new) + else: + raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule.") + + def __exit__(self, *exc_info): + for attr in list(self.original): + setattr(self.obj, attr, self.original.pop(attr)) + + def start(self): + """Activate a patch.""" + self.__enter__() + self._active_patches.append(self) + + def stop(self): + """Stop an active patch.""" + try: + self._active_patches.remove(self) + except ValueError: + # If the patch hasn't been started this will fail + return None + + return self.__exit__() diff --git a/testbed/huggingface__datasets/src/datasets/utils/py_utils.py b/testbed/huggingface__datasets/src/datasets/utils/py_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4d49c3b586563ea2bd59488e9a10208cf7857c7d --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/py_utils.py @@ -0,0 +1,1417 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Some python utils function and classes. + +""" + +import copy +import functools +import itertools +import multiprocessing.pool +import os +import queue +import re +import types +import warnings +from contextlib import contextmanager +from dataclasses import fields, is_dataclass +from io import BytesIO as StringIO +from multiprocessing import Manager +from queue import Empty +from shutil import disk_usage +from types import CodeType, FunctionType +from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union +from urllib.parse import urlparse + +import dill +import multiprocess +import multiprocess.pool +import numpy as np +from packaging import version +from tqdm.auto import tqdm + +from .. import config +from ..parallel import parallel_map +from . import logging +from . import tqdm as hf_tqdm + + +try: # pragma: no branch + import typing_extensions as _typing_extensions + from typing_extensions import Final, Literal +except ImportError: + _typing_extensions = Literal = Final = None + + +logger = logging.get_logger(__name__) + + +# NOTE: When used on an instance method, the cache is shared across all +# instances and IS NOT per-instance. +# See +# https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance +# For @property methods, use @memoized_property below. +memoize = functools.lru_cache + + +def size_str(size_in_bytes): + """Returns a human readable size string. + + If size_in_bytes is None, then returns "Unknown size". + + For example `size_str(1.5 * datasets.units.GiB) == "1.50 GiB"`. + + Args: + size_in_bytes: `int` or `None`, the size, in bytes, that we want to + format as a human-readable size string. + """ + if not size_in_bytes: + return "Unknown size" + + _NAME_LIST = [("PiB", 2**50), ("TiB", 2**40), ("GiB", 2**30), ("MiB", 2**20), ("KiB", 2**10)] + + size_in_bytes = float(size_in_bytes) + for name, size_bytes in _NAME_LIST: + value = size_in_bytes / size_bytes + if value >= 1.0: + return f"{value:.2f} {name}" + return f"{int(size_in_bytes)} bytes" + + +def convert_file_size_to_int(size: Union[int, str]) -> int: + """ + Converts a size expressed as a string with digits an unit (like `"50MB"`) to an integer (in bytes). + + Args: + size (`int` or `str`): The size to convert. Will be directly returned if an `int`. + + Example: + + ```py + >>> convert_file_size_to_int("1MiB") + 1048576 + ``` + """ + if isinstance(size, int): + return size + if size.upper().endswith("PIB"): + return int(size[:-3]) * (2**50) + if size.upper().endswith("TIB"): + return int(size[:-3]) * (2**40) + if size.upper().endswith("GIB"): + return int(size[:-3]) * (2**30) + if size.upper().endswith("MIB"): + return int(size[:-3]) * (2**20) + if size.upper().endswith("KIB"): + return int(size[:-3]) * (2**10) + if size.upper().endswith("PB"): + int_size = int(size[:-2]) * (10**15) + return int_size // 8 if size.endswith("b") else int_size + if size.upper().endswith("TB"): + int_size = int(size[:-2]) * (10**12) + return int_size // 8 if size.endswith("b") else int_size + if size.upper().endswith("GB"): + int_size = int(size[:-2]) * (10**9) + return int_size // 8 if size.endswith("b") else int_size + if size.upper().endswith("MB"): + int_size = int(size[:-2]) * (10**6) + return int_size // 8 if size.endswith("b") else int_size + if size.upper().endswith("KB"): + int_size = int(size[:-2]) * (10**3) + return int_size // 8 if size.endswith("b") else int_size + raise ValueError(f"`size={size}` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.") + + +def glob_pattern_to_regex(pattern): + # partially taken from fsspec: + # https://github.com/fsspec/filesystem_spec/blob/697d0f8133d8a5fbc3926e4761d7ecd51337ce50/fsspec/asyn.py#L735 + return ( + pattern.replace("\\", r"\\") + .replace(".", r"\.") + .replace("*", ".*") + .replace("+", r"\+") + .replace("//", "/") + .replace("(", r"\(") + .replace(")", r"\)") + .replace("|", r"\|") + .replace("^", r"\^") + .replace("$", r"\$") + .rstrip("/") + .replace("?", ".") + ) + + +def string_to_dict(string: str, pattern: str) -> Dict[str, str]: + """Un-format a string using a python f-string pattern. + From https://stackoverflow.com/a/36838374 + + Example:: + + >>> p = 'hello, my name is {name} and I am a {age} year old {what}' + >>> s = p.format(name='cody', age=18, what='quarterback') + >>> s + 'hello, my name is cody and I am a 18 year old quarterback' + >>> string_to_dict(s, p) + {'age': '18', 'name': 'cody', 'what': 'quarterback'} + + Args: + string (str): input string + pattern (str): pattern formatted like a python f-string + + Returns: + Dict[str, str]: dictionary of variable -> value, retrieved from the input using the pattern + + Raises: + ValueError: if the string doesn't match the pattern + """ + regex = re.sub(r"{(.+?)}", r"(?P<_\1>.+)", pattern) + result = re.search(regex, string) + if result is None: + raise ValueError(f"String {string} doesn't match the pattern {pattern}") + values = list(result.groups()) + keys = re.findall(r"{(.+?)}", pattern) + _dict = dict(zip(keys, values)) + return _dict + + +def asdict(obj): + """Convert an object to its dictionary representation recursively. + + + """ + + # Implementation based on https://docs.python.org/3/library/dataclasses.html#dataclasses.asdict + + def _is_dataclass_instance(obj): + # https://docs.python.org/3/library/dataclasses.html#dataclasses.is_dataclass + return is_dataclass(obj) and not isinstance(obj, type) + + def _asdict_inner(obj): + if _is_dataclass_instance(obj): + result = {} + for f in fields(obj): + value = _asdict_inner(getattr(obj, f.name)) + if not f.init or value != f.default or f.metadata.get("include_in_asdict_even_if_is_default", False): + result[f.name] = value + return result + elif isinstance(obj, tuple) and hasattr(obj, "_fields"): + # obj is a namedtuple + return type(obj)(*[_asdict_inner(v) for v in obj]) + elif isinstance(obj, (list, tuple)): + # Assume we can create an object of this type by passing in a + # generator (which is not true for namedtuples, handled + # above). + return type(obj)(_asdict_inner(v) for v in obj) + elif isinstance(obj, dict): + return {_asdict_inner(k): _asdict_inner(v) for k, v in obj.items()} + else: + return copy.deepcopy(obj) + + if not isinstance(obj, dict) and not _is_dataclass_instance(obj): + raise TypeError(f"{obj} is not a dict or a dataclass") + + return _asdict_inner(obj) + + +@contextmanager +def temporary_assignment(obj, attr, value): + """Temporarily assign obj.attr to value.""" + original = getattr(obj, attr, None) + setattr(obj, attr, value) + try: + yield + finally: + setattr(obj, attr, original) + + +@contextmanager +def temp_seed(seed: int, set_pytorch=False, set_tensorflow=False): + """Temporarily set the random seed. This works for python numpy, pytorch and tensorflow.""" + np_state = np.random.get_state() + np.random.seed(seed) + + if set_pytorch and config.TORCH_AVAILABLE: + import torch + + torch_state = torch.random.get_rng_state() + torch.random.manual_seed(seed) + + if torch.cuda.is_available(): + torch_cuda_states = torch.cuda.get_rng_state_all() + torch.cuda.manual_seed_all(seed) + + if set_tensorflow and config.TF_AVAILABLE: + import tensorflow as tf + from tensorflow.python.eager import context as tfpycontext + + tf_state = tf.random.get_global_generator() + temp_gen = tf.random.Generator.from_seed(seed) + tf.random.set_global_generator(temp_gen) + + if not tf.executing_eagerly(): + raise ValueError("Setting random seed for TensorFlow is only available in eager mode") + + tf_context = tfpycontext.context() # eager mode context + tf_seed = tf_context._seed + tf_rng_initialized = hasattr(tf_context, "_rng") + if tf_rng_initialized: + tf_rng = tf_context._rng + tf_context._set_global_seed(seed) + + try: + yield + finally: + np.random.set_state(np_state) + + if set_pytorch and config.TORCH_AVAILABLE: + torch.random.set_rng_state(torch_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state_all(torch_cuda_states) + + if set_tensorflow and config.TF_AVAILABLE: + tf.random.set_global_generator(tf_state) + + tf_context._seed = tf_seed + if tf_rng_initialized: + tf_context._rng = tf_rng + else: + delattr(tf_context, "_rng") + + +def unique_values(values): + """Iterate over iterable and return only unique values in order.""" + seen = set() + for value in values: + if value not in seen: + seen.add(value) + yield value + + +def no_op_if_value_is_null(func): + """If the value is None, return None, else call `func`.""" + + def wrapper(value): + return func(value) if value is not None else None + + return wrapper + + +def first_non_null_value(iterable): + """Return the index and the value of the first non-null value in the iterable. If all values are None, return -1 as index.""" + for i, value in enumerate(iterable): + if value is not None: + return i, value + return -1, None + + +def zip_dict(*dicts): + """Iterate over items of dictionaries grouped by their keys.""" + for key in unique_values(itertools.chain(*dicts)): # set merge all keys + # Will raise KeyError if the dict don't have the same keys + yield key, tuple(d[key] for d in dicts) + + +class NonMutableDict(dict): + """Dict where keys can only be added but not modified. + + Will raise an error if the user try to overwrite one key. The error message + can be customized during construction. It will be formatted using {key} for + the overwritten key. + """ + + def __init__(self, *args, **kwargs): + self._error_msg = kwargs.pop( + "error_msg", + "Try to overwrite existing key: {key}", + ) + if kwargs: + raise ValueError("NonMutableDict cannot be initialized with kwargs.") + super().__init__(*args, **kwargs) + + def __setitem__(self, key, value): + if key in self: + raise ValueError(self._error_msg.format(key=key)) + return super().__setitem__(key, value) + + def update(self, other): + if any(k in self for k in other): + raise ValueError(self._error_msg.format(key=set(self) & set(other))) + return super().update(other) + + +class classproperty(property): # pylint: disable=invalid-name + """Descriptor to be used as decorator for @classmethods.""" + + def __get__(self, obj, objtype=None): + return self.fget.__get__(None, objtype)() + + +def _single_map_nested(args): + """Apply a function recursively to each element of a nested data struct.""" + function, data_struct, types, rank, disable_tqdm, desc = args + + # Singleton first to spare some computation + if not isinstance(data_struct, dict) and not isinstance(data_struct, types): + return function(data_struct) + + # Reduce logging to keep things readable in multiprocessing with tqdm + if rank is not None and logging.get_verbosity() < logging.WARNING: + logging.set_verbosity_warning() + # Print at least one thing to fix tqdm in notebooks in multiprocessing + # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308 + if rank is not None and not disable_tqdm and any("notebook" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__): + print(" ", end="", flush=True) + + # Loop over single examples or batches and write to buffer/file if examples are to be updated + pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct + pbar_desc = (desc + " " if desc is not None else "") + "#" + str(rank) if rank is not None else desc + with hf_tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit="obj", desc=pbar_desc) as pbar: + if isinstance(data_struct, dict): + return {k: _single_map_nested((function, v, types, None, True, None)) for k, v in pbar} + else: + mapped = [_single_map_nested((function, v, types, None, True, None)) for v in pbar] + if isinstance(data_struct, list): + return mapped + elif isinstance(data_struct, tuple): + return tuple(mapped) + else: + return np.array(mapped) + + +def map_nested( + function: Callable[[Any], Any], + data_struct: Any, + dict_only: bool = False, + map_list: bool = True, + map_tuple: bool = False, + map_numpy: bool = False, + num_proc: Optional[int] = None, + parallel_min_length: int = 2, + types: Optional[tuple] = None, + disable_tqdm: bool = True, + desc: Optional[str] = None, +) -> Any: + """Apply a function recursively to each element of a nested data struct. + + Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to + `parallel_min_length`. + + + + Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``. + + Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and + multiprocessing is used. + + + + Args: + function (`Callable`): Function to be applied to `data_struct`. + data_struct (`Any`): Data structure to apply `function` to. + dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in + `data_struct`. + map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict` + values). + map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides + `dict` values). + map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides + `dict` values). + num_proc (`int`, *optional*): Number of processes. + parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel + processing. + + types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their + elements. + disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar. + desc (`str`, *optional*): Prefix for the tqdm progressbar. + + Returns: + `Any` + """ + if types is None: + types = [] + if not dict_only: + if map_list: + types.append(list) + if map_tuple: + types.append(tuple) + if map_numpy: + types.append(np.ndarray) + types = tuple(types) + + # Singleton + if not isinstance(data_struct, dict) and not isinstance(data_struct, types): + return function(data_struct) + + iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct + + if num_proc is None: + num_proc = 1 + if num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length: + mapped = [ + _single_map_nested((function, obj, types, None, True, None)) + for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc) + ] + else: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message=".* is experimental and might be subject to breaking changes in the future\\.$", + category=UserWarning, + ) + mapped = parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, _single_map_nested) + + if isinstance(data_struct, dict): + return dict(zip(data_struct.keys(), mapped)) + else: + if isinstance(data_struct, list): + return mapped + elif isinstance(data_struct, tuple): + return tuple(mapped) + else: + return np.array(mapped) + + +class NestedDataStructure: + def __init__(self, data=None): + self.data = data if data is not None else [] + + def flatten(self, data=None): + data = data if data is not None else self.data + if isinstance(data, dict): + return self.flatten(list(data.values())) + elif isinstance(data, (list, tuple)): + return [flattened for item in data for flattened in self.flatten(item)] + else: + return [data] + + +def has_sufficient_disk_space(needed_bytes, directory="."): + try: + free_bytes = disk_usage(os.path.abspath(directory)).free + except OSError: + return True + return needed_bytes < free_bytes + + +def _convert_github_url(url_path: str) -> Tuple[str, Optional[str]]: + """Convert a link to a file on a github repo in a link to the raw github object.""" + parsed = urlparse(url_path) + sub_directory = None + if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com": + if "blob" in url_path: + if not url_path.endswith(".py"): + raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'") + url_path = url_path.replace("blob", "raw") # Point to the raw file + else: + # Parse github url to point to zip + github_path = parsed.path[1:] + repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master") + repo_owner, repo_name = repo_info.split("/") + url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip" + sub_directory = f"{repo_name}-{branch}" + return url_path, sub_directory + + +def get_imports(file_path: str) -> Tuple[str, str, str, str]: + """Find whether we should import or clone additional files for a given processing script. + And list the import. + + We allow: + - library dependencies, + - local dependencies and + - external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository. + external dependencies will be downloaded (and extracted if needed in the dataset folder). + We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script. + + Note that only direct import in the dataset processing script will be handled + We don't recursively explore the additional import to download further files. + + Example:: + + import tensorflow + import .c4_utils + import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset + """ + lines = [] + with open(file_path, encoding="utf-8") as f: + lines.extend(f.readlines()) + + logger.debug(f"Checking {file_path} for additional imports.") + imports: List[Tuple[str, str, str, Optional[str]]] = [] + is_in_docstring = False + for line in lines: + docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line) + + if len(docstr_start_match) == 1: + # flip True <=> False only if doctstring + # starts at line without finishing + is_in_docstring = not is_in_docstring + + if is_in_docstring: + # import statements in doctstrings should + # not be added as required dependencies + continue + + match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE) + if match is None: + match = re.match( + r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", + line, + flags=re.MULTILINE, + ) + if match is None: + continue + if match.group(1): + # The import starts with a '.', we will download the relevant file + if any(imp[1] == match.group(2) for imp in imports): + # We already have this import + continue + if match.group(3): + # The import has a comment with 'From:', we'll retrieve it from the given url + url_path = match.group(3) + url_path, sub_directory = _convert_github_url(url_path) + imports.append(("external", match.group(2), url_path, sub_directory)) + elif match.group(2): + # The import should be at the same place as the file + imports.append(("internal", match.group(2), match.group(2), None)) + else: + if match.group(3): + # The import has a comment with `From: git+https:...`, asks user to pip install from git. + url_path = match.group(3) + imports.append(("library", match.group(2), url_path, None)) + else: + imports.append(("library", match.group(2), match.group(2), None)) + + return imports + + +class Pickler(dill.Pickler): + """Same Pickler as the one from dill, but improved for notebooks and shells""" + + dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy()) + + def save(self, obj, save_persistent_id=True): + # lazy registration of reduction functions + obj_type = type(obj) + if obj_type not in Pickler.dispatch: + if config.DILL_VERSION < version.parse("0.3.6"): + + def dill_log(pickler, msg): + dill._dill.log.info(msg) + + elif config.DILL_VERSION.release[:3] in [version.parse("0.3.6").release, version.parse("0.3.7").release]: + + def dill_log(pickler, msg): + dill._dill.logger.trace(pickler, msg) + + if (obj_type.__module__, obj_type.__name__) == ("_regex", "Pattern"): + try: + import regex + + @pklregister(obj_type) + def _save_regex(pickler, obj): + dill_log(pickler, f"Re: {obj}") + args = ( + obj.pattern, + obj.flags, + ) + pickler.save_reduce(regex.compile, args, obj=obj) + dill_log(pickler, "# Re") + return + + except ImportError: + pass + elif (obj_type.__module__, obj_type.__name__) == ("torch", "Tensor"): + try: + import torch + + @pklregister(obj_type) + def _save_tensor(pickler, obj): + # `torch.from_numpy` is not picklable in `torch>=1.11.0` + def _create_tensor(np_array): + return torch.from_numpy(np_array) + + dill_log(pickler, f"To: {obj}") + args = (obj.detach().cpu().numpy(),) + pickler.save_reduce(_create_tensor, args, obj=obj) + dill_log(pickler, "# To") + return + + except ImportError: + pass + elif (obj_type.__module__, obj_type.__name__) == ("tiktoken.core", "Encoding"): + try: + import tiktoken + + @pklregister(obj_type) + def _save_encoding(pickler, obj): + dill_log(pickler, f"Enc: {obj}") + args = (obj.name, obj._pat_str, obj._mergeable_ranks, obj._special_tokens) + pickler.save_reduce(tiktoken.Encoding, args, obj=obj) + dill_log(pickler, "# Enc") + return + + except ImportError: + pass + elif obj_type.__module__.startswith("spacy.lang") and any( + (cls.__module__, cls.__name__) == ("spacy.language", "Language") for cls in obj_type.__mro__ + ): + try: + import spacy + + @pklregister(obj_type) + def _save_lang(pickler, obj): + def _create_lang(config, bytes_data): + lang_cls = spacy.util.get_lang_class(config["nlp"]["lang"]) + nlp = lang_cls.from_config(config) + return nlp.from_bytes(bytes_data) + + dill_log(pickler, f"Sp: {obj}") + args = (obj.config, obj.to_bytes()) + pickler.save_reduce(_create_lang, args, obj=obj) + dill_log(pickler, "# Sp") + return + + except ImportError: + pass + + dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id) + + def memoize(self, obj): + # don't memoize strings since two identical strings can have different python ids + if type(obj) != str: # noqa: E721 + dill.Pickler.memoize(self, obj) + + +def dump(obj, file): + """pickle an object to a file""" + Pickler(file, recurse=True).dump(obj) + return + + +@contextmanager +def _no_cache_fields(obj): + try: + if ( + "PreTrainedTokenizerBase" in [base_class.__name__ for base_class in type(obj).__mro__] + and hasattr(obj, "cache") + and isinstance(obj.cache, dict) + ): + with temporary_assignment(obj, "cache", {}): + yield + else: + yield + + except ImportError: + yield + + +def dumps(obj): + """pickle an object to a string""" + file = StringIO() + with _no_cache_fields(obj): + dump(obj, file) + return file.getvalue() + + +def pklregister(t): + def proxy(func): + Pickler.dispatch[t] = func + return func + + return proxy + + +if config.DILL_VERSION < version.parse("0.3.6"): + + @pklregister(set) + def _save_set(pickler, obj): + dill._dill.log.info(f"Se: {obj}") + from datasets.fingerprint import Hasher + + args = (sorted(obj, key=Hasher.hash),) + pickler.save_reduce(set, args, obj=obj) + dill._dill.log.info("# Se") + +elif config.DILL_VERSION.release[:3] in [version.parse("0.3.6").release, version.parse("0.3.7").release]: + + @pklregister(set) + def _save_set(pickler, obj): + dill._dill.logger.trace(pickler, "Se: %s", obj) + from datasets.fingerprint import Hasher + + args = (sorted(obj, key=Hasher.hash),) + pickler.save_reduce(set, args, obj=obj) + dill._dill.logger.trace(pickler, "# Se") + + +if config.DILL_VERSION < version.parse("0.3.6"): + + @pklregister(CodeType) + def _save_code(pickler, obj): + """ + From dill._dill.save_code + This is a modified version that removes the origin (filename + line no.) + of functions created in notebooks or shells for example. + """ + dill._dill.log.info(f"Co: {obj}") + # The filename of a function is the .py file where it is defined. + # Filenames of functions created in notebooks or shells start with '<' + # ex: for ipython, and for shell + # Filenames of functions created in ipykernel the filename + # look like f"{tempdir}/ipykernel_{id1}/{id2}.py" + # Moreover lambda functions have a special name: '' + # ex: (lambda x: x).__code__.co_name == "" # True + # + # For the hashing mechanism we ignore where the function has been defined + # More specifically: + # - we ignore the filename of special functions (filename starts with '<') + # - we always ignore the line number + # - we only use the base name of the file instead of the whole path, + # to be robust in case a script is moved for example. + # + # Only those two lines are different from the original implementation: + co_filename = ( + "" + if obj.co_filename.startswith("<") + or ( + len(obj.co_filename.split(os.path.sep)) > 1 + and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_") + ) + or obj.co_name == "" + else os.path.basename(obj.co_filename) + ) + co_firstlineno = 1 + # The rest is the same as in the original dill implementation + if dill._dill.PY3: + if hasattr(obj, "co_posonlyargcount"): + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, + obj.co_name, + co_firstlineno, + obj.co_lnotab, + obj.co_freevars, + obj.co_cellvars, + ) + else: + args = ( + obj.co_argcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, + obj.co_name, + co_firstlineno, + obj.co_lnotab, + obj.co_freevars, + obj.co_cellvars, + ) + else: + args = ( + obj.co_argcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, + obj.co_name, + co_firstlineno, + obj.co_lnotab, + obj.co_freevars, + obj.co_cellvars, + ) + pickler.save_reduce(CodeType, args, obj=obj) + dill._dill.log.info("# Co") + return + +elif config.DILL_VERSION.release[:3] in [version.parse("0.3.6").release, version.parse("0.3.7").release]: + # From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1104 + @pklregister(CodeType) + def save_code(pickler, obj): + dill._dill.logger.trace(pickler, "Co: %s", obj) + + ############################################################################################################ + # Modification here for huggingface/datasets + # The filename of a function is the .py file where it is defined. + # Filenames of functions created in notebooks or shells start with '<' + # ex: for ipython, and for shell + # Filenames of functions created in ipykernel the filename + # look like f"{tempdir}/ipykernel_{id1}/{id2}.py" + # Moreover lambda functions have a special name: '' + # ex: (lambda x: x).__code__.co_name == "" # True + # + # For the hashing mechanism we ignore where the function has been defined + # More specifically: + # - we ignore the filename of special functions (filename starts with '<') + # - we always ignore the line number + # - we only use the base name of the file instead of the whole path, + # to be robust in case a script is moved for example. + # + # Only those two lines are different from the original implementation: + co_filename = ( + "" + if obj.co_filename.startswith("<") + or ( + len(obj.co_filename.split(os.path.sep)) > 1 + and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_") + ) + or obj.co_name == "" + else os.path.basename(obj.co_filename) + ) + co_firstlineno = 1 + # The rest is the same as in the original dill implementation, except for the replacements: + # - obj.co_filename => co_filename + # - obj.co_firstlineno => co_firstlineno + ############################################################################################################ + + if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args) + args = ( + obj.co_lnotab, # for < python 3.10 [not counted in args] + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, # Modification for huggingface/datasets ############################################ + obj.co_name, + obj.co_qualname, + co_firstlineno, # Modification for huggingface/datasets ######################################### + obj.co_linetable, + obj.co_endlinetable, + obj.co_columntable, + obj.co_exceptiontable, + obj.co_freevars, + obj.co_cellvars, + ) + elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args) + args = ( + obj.co_lnotab, # for < python 3.10 [not counted in args] + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, # Modification for huggingface/datasets ############################################ + obj.co_name, + obj.co_qualname, + co_firstlineno, # Modification for huggingface/datasets ######################################### + obj.co_linetable, + obj.co_exceptiontable, + obj.co_freevars, + obj.co_cellvars, + ) + elif hasattr(obj, "co_linetable"): # python 3.10 (16 args) + args = ( + obj.co_lnotab, # for < python 3.10 [not counted in args] + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, # Modification for huggingface/datasets ############################################ + obj.co_name, + co_firstlineno, # Modification for huggingface/datasets ######################################### + obj.co_linetable, + obj.co_freevars, + obj.co_cellvars, + ) + elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args) + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, # Modification for huggingface/datasets ############################################ + obj.co_name, + co_firstlineno, # Modification for huggingface/datasets ######################################### + obj.co_lnotab, + obj.co_freevars, + obj.co_cellvars, + ) + else: # python 3.7 (15 args) + args = ( + obj.co_argcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + obj.co_names, + obj.co_varnames, + co_filename, # Modification for huggingface/datasets ############################################ + obj.co_name, + co_firstlineno, # Modification for huggingface/datasets ######################################### + obj.co_lnotab, + obj.co_freevars, + obj.co_cellvars, + ) + + pickler.save_reduce(dill._dill._create_code, args, obj=obj) + dill._dill.logger.trace(pickler, "# Co") + return + + +if config.DILL_VERSION < version.parse("0.3.5"): + + @pklregister(FunctionType) + def save_function(pickler, obj): + """ + From dill._dill.save_function + This is a modified version that make globs deterministic since the order of + the keys in the output dictionary of globalvars can change. + """ + if not dill._dill._locate_function(obj): + dill._dill.log.info(f"F1: {obj}") + if getattr(pickler, "_recurse", False): + # recurse to get all globals referred to by obj + globalvars = dill.detect.globalvars + globs = globalvars(obj, recurse=True, builtin=True) + if id(obj) in dill._dill.stack: + globs = obj.__globals__ if dill._dill.PY3 else obj.func_globals + else: + globs = obj.__globals__ if dill._dill.PY3 else obj.func_globals + # globs is a dictionary with keys = var names (str) and values = python objects + # however the dictionary is not always loaded in the same order + # therefore we have to sort the keys to make deterministic. + # This is important to make `dump` deterministic. + # Only this line is different from the original implementation: + globs = dict(sorted(globs.items())) + # The rest is the same as in the original dill implementation + _byref = getattr(pickler, "_byref", None) + _recurse = getattr(pickler, "_recurse", None) + _memo = (id(obj) in dill._dill.stack) and (_recurse is not None) + dill._dill.stack[id(obj)] = len(dill._dill.stack), obj + if dill._dill.PY3: + _super = ("super" in getattr(obj.__code__, "co_names", ())) and (_byref is not None) + if _super: + pickler._byref = True + if _memo: + pickler._recurse = False + fkwdefaults = getattr(obj, "__kwdefaults__", None) + pickler.save_reduce( + dill._dill._create_function, + (obj.__code__, globs, obj.__name__, obj.__defaults__, obj.__closure__, obj.__dict__, fkwdefaults), + obj=obj, + ) + else: + _super = ( + ("super" in getattr(obj.func_code, "co_names", ())) + and (_byref is not None) + and getattr(pickler, "_recurse", False) + ) + if _super: + pickler._byref = True + if _memo: + pickler._recurse = False + pickler.save_reduce( + dill._dill._create_function, + (obj.func_code, globs, obj.func_name, obj.func_defaults, obj.func_closure, obj.__dict__), + obj=obj, + ) + if _super: + pickler._byref = _byref + if _memo: + pickler._recurse = _recurse + if ( + dill._dill.OLDER + and not _byref + and (_super or (not _super and _memo) or (not _super and not _memo and _recurse)) + ): + pickler.clear_memo() + dill._dill.log.info("# F1") + else: + dill._dill.log.info(f"F2: {obj}") + name = getattr(obj, "__qualname__", getattr(obj, "__name__", None)) + dill._dill.StockPickler.save_global(pickler, obj, name=name) + dill._dill.log.info("# F2") + return + +elif config.DILL_VERSION.release[:3] == version.parse("0.3.5").release: # 0.3.5, 0.3.5.1 + # https://github.com/uqfoundation/dill/blob/dill-0.3.5.1/dill/_dill.py + @pklregister(FunctionType) + def save_function(pickler, obj): + if not dill._dill._locate_function(obj, pickler): + dill._dill.log.info("F1: %s" % obj) + _recurse = getattr(pickler, "_recurse", None) + _postproc = getattr(pickler, "_postproc", None) + _main_modified = getattr(pickler, "_main_modified", None) + _original_main = getattr(pickler, "_original_main", dill._dill.__builtin__) # 'None' + postproc_list = [] + if _recurse: + # recurse to get all globals referred to by obj + from dill.detect import globalvars + + globs_copy = globalvars(obj, recurse=True, builtin=True) + + # Add the name of the module to the globs dictionary to prevent + # the duplication of the dictionary. Pickle the unpopulated + # globals dictionary and set the remaining items after the function + # is created to correctly handle recursion. + globs = {"__name__": obj.__module__} + else: + globs_copy = obj.__globals__ if dill._dill.PY3 else obj.func_globals + + # If the globals is the __dict__ from the module being saved as a + # session, substitute it by the dictionary being actually saved. + if _main_modified and globs_copy is _original_main.__dict__: + globs_copy = getattr(pickler, "_main", _original_main).__dict__ + globs = globs_copy + # If the globals is a module __dict__, do not save it in the pickle. + elif ( + globs_copy is not None + and obj.__module__ is not None + and getattr(dill._dill._import_module(obj.__module__, True), "__dict__", None) is globs_copy + ): + globs = globs_copy + else: + globs = {"__name__": obj.__module__} + + # DONE: modified here for huggingface/datasets + # - globs is a dictionary with keys = var names (str) and values = python objects + # - globs_copy is a dictionary with keys = var names (str) and values = ids of the python objects + # however the dictionary is not always loaded in the same order + # therefore we have to sort the keys to make deterministic. + # This is important to make `dump` deterministic. + # Only these line are different from the original implementation: + # START + globs_is_globs_copy = globs is globs_copy + globs = dict(sorted(globs.items())) + if globs_is_globs_copy: + globs_copy = globs + elif globs_copy is not None: + globs_copy = dict(sorted(globs_copy.items())) + # END + + if globs_copy is not None and globs is not globs_copy: + # In the case that the globals are copied, we need to ensure that + # the globals dictionary is updated when all objects in the + # dictionary are already created. + if dill._dill.PY3: + glob_ids = {id(g) for g in globs_copy.values()} + else: + glob_ids = {id(g) for g in globs_copy.itervalues()} + for stack_element in _postproc: + if stack_element in glob_ids: + _postproc[stack_element].append((dill._dill._setitems, (globs, globs_copy))) + break + else: + postproc_list.append((dill._dill._setitems, (globs, globs_copy))) + + if dill._dill.PY3: + closure = obj.__closure__ + state_dict = {} + for fattrname in ("__doc__", "__kwdefaults__", "__annotations__"): + fattr = getattr(obj, fattrname, None) + if fattr is not None: + state_dict[fattrname] = fattr + if obj.__qualname__ != obj.__name__: + state_dict["__qualname__"] = obj.__qualname__ + if "__name__" not in globs or obj.__module__ != globs["__name__"]: + state_dict["__module__"] = obj.__module__ + + state = obj.__dict__ + if type(state) is not dict: # noqa: E721 + state_dict["__dict__"] = state + state = None + if state_dict: + state = state, state_dict + + dill._dill._save_with_postproc( + pickler, + ( + dill._dill._create_function, + (obj.__code__, globs, obj.__name__, obj.__defaults__, closure), + state, + ), + obj=obj, + postproc_list=postproc_list, + ) + else: + closure = obj.func_closure + if obj.__doc__ is not None: + postproc_list.append((setattr, (obj, "__doc__", obj.__doc__))) + if "__name__" not in globs or obj.__module__ != globs["__name__"]: + postproc_list.append((setattr, (obj, "__module__", obj.__module__))) + if obj.__dict__: + postproc_list.append((setattr, (obj, "__dict__", obj.__dict__))) + + dill._dill._save_with_postproc( + pickler, + (dill._dill._create_function, (obj.func_code, globs, obj.func_name, obj.func_defaults, closure)), + obj=obj, + postproc_list=postproc_list, + ) + + # Lift closure cell update to earliest function (#458) + if _postproc: + topmost_postproc = next(iter(_postproc.values()), None) + if closure and topmost_postproc: + for cell in closure: + possible_postproc = (setattr, (cell, "cell_contents", obj)) + try: + topmost_postproc.remove(possible_postproc) + except ValueError: + continue + + # Change the value of the cell + pickler.save_reduce(*possible_postproc) + # pop None created by calling preprocessing step off stack + if dill._dill.PY3: + pickler.write(bytes("0", "UTF-8")) + else: + pickler.write("0") + + dill._dill.log.info("# F1") + else: + dill._dill.log.info("F2: %s" % obj) + name = getattr(obj, "__qualname__", getattr(obj, "__name__", None)) + dill._dill.StockPickler.save_global(pickler, obj, name=name) + dill._dill.log.info("# F2") + return + +elif config.DILL_VERSION.release[:3] in [version.parse("0.3.6").release, version.parse("0.3.7").release]: + # From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1739 + @pklregister(FunctionType) + def save_function(pickler, obj): + if not dill._dill._locate_function(obj, pickler): + if type(obj.__code__) is not CodeType: + # Some PyPy builtin functions have no module name, and thus are not + # able to be located + module_name = getattr(obj, "__module__", None) + if module_name is None: + module_name = dill._dill.__builtin__.__name__ + module = dill._dill._import_module(module_name, safe=True) + _pypy_builtin = False + try: + found, _ = dill._dill._getattribute(module, obj.__qualname__) + if getattr(found, "__func__", None) is obj: + _pypy_builtin = True + except AttributeError: + pass + + if _pypy_builtin: + dill._dill.logger.trace(pickler, "F3: %s", obj) + pickler.save_reduce(getattr, (found, "__func__"), obj=obj) + dill._dill.logger.trace(pickler, "# F3") + return + + dill._dill.logger.trace(pickler, "F1: %s", obj) + _recurse = getattr(pickler, "_recurse", None) + _postproc = getattr(pickler, "_postproc", None) + _main_modified = getattr(pickler, "_main_modified", None) + _original_main = getattr(pickler, "_original_main", dill._dill.__builtin__) # 'None' + postproc_list = [] + if _recurse: + # recurse to get all globals referred to by obj + from dill.detect import globalvars + + globs_copy = globalvars(obj, recurse=True, builtin=True) + + # Add the name of the module to the globs dictionary to prevent + # the duplication of the dictionary. Pickle the unpopulated + # globals dictionary and set the remaining items after the function + # is created to correctly handle recursion. + globs = {"__name__": obj.__module__} + else: + globs_copy = obj.__globals__ + + # If the globals is the __dict__ from the module being saved as a + # session, substitute it by the dictionary being actually saved. + if _main_modified and globs_copy is _original_main.__dict__: + globs_copy = getattr(pickler, "_main", _original_main).__dict__ + globs = globs_copy + # If the globals is a module __dict__, do not save it in the pickle. + elif ( + globs_copy is not None + and obj.__module__ is not None + and getattr(dill._dill._import_module(obj.__module__, True), "__dict__", None) is globs_copy + ): + globs = globs_copy + else: + globs = {"__name__": obj.__module__} + + ######################################################################################################## + # Modification here for huggingface/datasets + # - globs is a dictionary with keys = var names (str) and values = python objects + # - globs_copy is a dictionary with keys = var names (str) and values = ids of the python objects + # However the dictionary is not always loaded in the same order, + # therefore we have to sort the keys to make deterministic. + # This is important to make `dump` deterministic. + # Only these line are different from the original implementation: + # START + globs_is_globs_copy = globs is globs_copy + globs = dict(sorted(globs.items())) + if globs_is_globs_copy: + globs_copy = globs + elif globs_copy is not None: + globs_copy = dict(sorted(globs_copy.items())) + # END + ######################################################################################################## + + if globs_copy is not None and globs is not globs_copy: + # In the case that the globals are copied, we need to ensure that + # the globals dictionary is updated when all objects in the + # dictionary are already created. + glob_ids = {id(g) for g in globs_copy.values()} + for stack_element in _postproc: + if stack_element in glob_ids: + _postproc[stack_element].append((dill._dill._setitems, (globs, globs_copy))) + break + else: + postproc_list.append((dill._dill._setitems, (globs, globs_copy))) + + closure = obj.__closure__ + state_dict = {} + for fattrname in ("__doc__", "__kwdefaults__", "__annotations__"): + fattr = getattr(obj, fattrname, None) + if fattr is not None: + state_dict[fattrname] = fattr + if obj.__qualname__ != obj.__name__: + state_dict["__qualname__"] = obj.__qualname__ + if "__name__" not in globs or obj.__module__ != globs["__name__"]: + state_dict["__module__"] = obj.__module__ + + state = obj.__dict__ + if type(state) is not dict: # noqa: E721 + state_dict["__dict__"] = state + state = None + if state_dict: + state = state, state_dict + + dill._dill._save_with_postproc( + pickler, + (dill._dill._create_function, (obj.__code__, globs, obj.__name__, obj.__defaults__, closure), state), + obj=obj, + postproc_list=postproc_list, + ) + + # Lift closure cell update to earliest function (#458) + if _postproc: + topmost_postproc = next(iter(_postproc.values()), None) + if closure and topmost_postproc: + for cell in closure: + possible_postproc = (setattr, (cell, "cell_contents", obj)) + try: + topmost_postproc.remove(possible_postproc) + except ValueError: + continue + + # Change the value of the cell + pickler.save_reduce(*possible_postproc) + # pop None created by calling preprocessing step off stack + pickler.write(bytes("0", "UTF-8")) + + dill._dill.logger.trace(pickler, "# F1") + else: + dill._dill.logger.trace(pickler, "F2: %s", obj) + name = getattr(obj, "__qualname__", getattr(obj, "__name__", None)) + dill._dill.StockPickler.save_global(pickler, obj, name=name) + dill._dill.logger.trace(pickler, "# F2") + return + + +def copyfunc(func): + result = types.FunctionType(func.__code__, func.__globals__, func.__name__, func.__defaults__, func.__closure__) + result.__kwdefaults__ = func.__kwdefaults__ + return result + + +Y = TypeVar("Y") + + +def _write_generator_to_queue(queue: queue.Queue, func: Callable[..., Iterable[Y]], kwargs: dict) -> int: + for i, result in enumerate(func(**kwargs)): + queue.put(result) + return i + + +def _get_pool_pid(pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool]) -> Set[int]: + return {f.pid for f in pool._pool} + + +def iflatmap_unordered( + pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool], + func: Callable[..., Iterable[Y]], + *, + kwargs_iterable: Iterable[dict], +) -> Iterable[Y]: + initial_pool_pid = _get_pool_pid(pool) + pool_changed = False + manager_cls = Manager if isinstance(pool, multiprocessing.pool.Pool) else multiprocess.Manager + with manager_cls() as manager: + queue = manager.Queue() + async_results = [ + pool.apply_async(_write_generator_to_queue, (queue, func, kwargs)) for kwargs in kwargs_iterable + ] + try: + while True: + try: + yield queue.get(timeout=0.05) + except Empty: + if all(async_result.ready() for async_result in async_results) and queue.empty(): + break + if _get_pool_pid(pool) != initial_pool_pid: + pool_changed = True + # One of the subprocesses has died. We should not wait forever. + raise RuntimeError( + "One of the subprocesses has abruptly died during map operation." + "To debug the error, disable multiprocessing." + ) + finally: + if not pool_changed: + # we get the result in case there's an error to raise + [async_result.get(timeout=0.05) for async_result in async_results] diff --git a/testbed/huggingface__datasets/src/datasets/utils/readme.py b/testbed/huggingface__datasets/src/datasets/utils/readme.py new file mode 100644 index 0000000000000000000000000000000000000000..66ed087f7d67181c6840179fa634e8b8e4238f85 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/readme.py @@ -0,0 +1,277 @@ +# loading package files: https://stackoverflow.com/a/20885799 +import importlib.resources as pkg_resources +import logging +from pathlib import Path +from typing import Any, List, Tuple + +import yaml + +from . import resources +from .deprecation_utils import deprecated + + +BASE_REF_URL = "https://github.com/huggingface/datasets/tree/main/src/datasets/utils" +this_url = f"{BASE_REF_URL}/{__file__}" +logger = logging.getLogger(__name__) + + +def load_yaml_resource(resource: str) -> Tuple[Any, str]: + content = pkg_resources.read_text(resources, resource) + return yaml.safe_load(content), f"{BASE_REF_URL}/resources/{resource}" + + +readme_structure, known_readme_structure_url = load_yaml_resource("readme_structure.yaml") + +FILLER_TEXT = [ + "[Needs More Information]", + "[More Information Needed]", + "(https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)", +] + +# Dictionary representation of section/readme, error_list, warning_list +ReadmeValidatorOutput = Tuple[dict, List[str], List[str]] + + +class Section: + def __init__(self, name: str, level: str, lines: List[str] = None, suppress_parsing_errors: bool = False): + self.name = name + self.level = level + self.lines = lines + self.text = "" + self.is_empty_text = True + self.content = {} + self.parsing_error_list = [] + self.parsing_warning_list = [] + if self.lines is not None: + self.parse(suppress_parsing_errors=suppress_parsing_errors) + + def parse(self, suppress_parsing_errors: bool = False): + current_sub_level = "" + current_lines = [] + code_start = False + for line in self.lines: + if line.strip(" \n") == "": + continue + elif line.strip(" \n")[:3] == "```": + code_start = not code_start + elif line.split()[0] == self.level + "#" and not code_start: + if current_sub_level != "": + self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines) + current_lines = [] + else: + if current_lines != []: + self.text += "".join(current_lines).strip() + if self.text != "" and self.text not in FILLER_TEXT: + self.is_empty_text = False + current_lines = [] + + current_sub_level = " ".join(line.split()[1:]).strip(" \n") + else: + current_lines.append(line) + else: + if current_sub_level != "": + if current_sub_level in self.content: + self.parsing_error_list.append( + f"Multiple sections with the same heading `{current_sub_level}` have been found. Please keep only one of these sections." + ) + self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines) + else: + if current_lines != []: + self.text += "".join(current_lines).strip() + if self.text != "" and self.text not in FILLER_TEXT: + self.is_empty_text = False + + if self.level == "" and not suppress_parsing_errors: + if self.parsing_error_list != [] or self.parsing_warning_list != []: + errors = errors = "\n".join("-\t" + x for x in self.parsing_error_list + self.parsing_warning_list) + error_string = f"The following issues were found while parsing the README at `{self.name}`:\n" + errors + raise ValueError(error_string) + + def validate(self, structure: dict) -> ReadmeValidatorOutput: + """Validates a Section class object recursively using the structure provided as a dictionary. + + Args: + structute (:obj: `dict`): The dictionary representing expected structure. + + Returns: + :obj: `ReadmeValidatorOutput`: The dictionary representation of the section, and the errors. + """ + # Header text validation + error_list = [] + warning_list = [] + if structure["allow_empty"] is False: + # If content is expected + if self.is_empty_text and self.content == {}: + # If no content is found, mention it in the error_list + error_list.append(f"Expected some content in section `{self.name}` but it is empty.") + + if structure["allow_empty_text"] is False: + # If some text is expected + if self.is_empty_text: + # If no text is found, mention it in the error_list + error_list.append( + f"Expected some text in section `{self.name}` but it is empty (text in subsections are ignored)." + ) + # Subsections Validation + if structure["subsections"] is not None: + # If subsections are expected + if self.content == {}: + # If no subsections are present + values = [subsection["name"] for subsection in structure["subsections"]] + # Mention the expected values in the error_list + error_list.append( + f"Section `{self.name}` expected the following subsections: {', '.join(['`'+x+'`' for x in values])}. Found 'None'." + ) + else: + # If some subsections are present + structure_names = [subsection["name"] for subsection in structure["subsections"]] + has_missing_subsections = False + for idx, name in enumerate(structure_names): + if name not in self.content: + # If the expected subsection is not present + error_list.append(f"Section `{self.name}` is missing subsection: `{name}`.") + has_missing_subsections = True + else: + # If the subsection is present, validate subsection, return the result + # and concat the errors from subsection to section error_list + + # Skip sublevel validation if current level is `###` + if self.level == "###": + continue + else: + _, subsec_error_list, subsec_warning_list = self.content[name].validate( + structure["subsections"][idx] + ) + error_list += subsec_error_list + warning_list += subsec_warning_list + + if has_missing_subsections: # we only allow to have extra subsections if all the other ones are here + for name in self.content: + if name not in structure_names: + # If an extra subsection is present + warning_list.append( + f"`{self.name}` has an extra subsection: `{name}`. Skipping further validation checks for this subsection as expected structure is unknown." + ) + if error_list: + # If there are errors, do not return the dictionary as it is invalid + return {}, error_list, warning_list + else: + return self.to_dict(), error_list, warning_list + + def to_dict(self) -> dict: + """Returns the dictionary representation of a section.""" + return { + "name": self.name, + "text": self.text, + "is_empty_text": self.is_empty_text, + "subsections": [value.to_dict() for value in self.content.values()], + } + + +@deprecated("Use `huggingface_hub.DatasetCard` instead.") +class ReadMe(Section): # Level 0 + def __init__(self, name: str, lines: List[str], structure: dict = None, suppress_parsing_errors: bool = False): + super().__init__(name=name, level="") # Not using lines here as we need to use a child class parse + self.structure = structure + self.yaml_tags_line_count = -2 + self.tag_count = 0 + self.lines = lines + if self.lines is not None: + self.parse(suppress_parsing_errors=suppress_parsing_errors) + + def validate(self): + if self.structure is None: + content, error_list, warning_list = self._validate(readme_structure) + else: + content, error_list, warning_list = self._validate(self.structure) + if error_list != [] or warning_list != []: + errors = "\n".join(["-\t" + x for x in error_list + warning_list]) + error_string = f"The following issues were found for the README at `{self.name}`:\n" + errors + raise ValueError(error_string) + + @classmethod + def from_readme(cls, path: Path, structure: dict = None, suppress_parsing_errors: bool = False): + with open(path, encoding="utf-8") as f: + lines = f.readlines() + return cls(path, lines, structure, suppress_parsing_errors=suppress_parsing_errors) + + @classmethod + def from_string( + cls, string: str, structure: dict = None, root_name: str = "root", suppress_parsing_errors: bool = False + ): + lines = string.split("\n") + return cls(root_name, lines, structure, suppress_parsing_errors=suppress_parsing_errors) + + def parse(self, suppress_parsing_errors: bool = False): + # Skip Tags + line_count = 0 + + for line in self.lines: + self.yaml_tags_line_count += 1 + if line.strip(" \n") == "---": + self.tag_count += 1 + if self.tag_count == 2: + break + line_count += 1 + if self.tag_count == 2: + self.lines = self.lines[line_count + 1 :] # Get the last + 1 th item. + else: + self.lines = self.lines[self.tag_count :] + super().parse(suppress_parsing_errors=suppress_parsing_errors) + + def __str__(self): + """Returns the string of dictionary representation of the ReadMe.""" + return str(self.to_dict()) + + def _validate(self, readme_structure): + error_list = [] + warning_list = [] + if self.yaml_tags_line_count == 0: + warning_list.append("Empty YAML markers are present in the README.") + elif self.tag_count == 0: + warning_list.append("No YAML markers are present in the README.") + elif self.tag_count == 1: + warning_list.append("Only the start of YAML tags present in the README.") + # Check how many first level sections are present. + num_first_level_keys = len(self.content.keys()) + if num_first_level_keys > 1: + # If more than one, add to the error list, continue + error_list.append( + f"The README has several first-level headings: {', '.join(['`'+x+'`' for x in list(self.content.keys())])}. Only one heading is expected. Skipping further validation for this README." + ) + elif num_first_level_keys < 1: + # If less than one, append error. + error_list.append( + "The README has no first-level headings. One heading is expected. Skipping further validation for this README." + ) + + else: + # If one exactly + start_key = list(self.content.keys())[0] # Get the key + if start_key.startswith("Dataset Card for"): # Check correct start + # If the starting is correct, validate all the sections + _, sec_error_list, sec_warning_list = self.content[start_key].validate( + readme_structure["subsections"][0] + ) + error_list += sec_error_list + warning_list += sec_warning_list + else: + # If not found, append error + error_list.append( + "No first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README." + ) + if error_list: + # If there are errors, do not return the dictionary as it is invalid + return {}, error_list, warning_list + else: + return self.to_dict(), error_list, warning_list + + +if __name__ == "__main__": + from argparse import ArgumentParser + + ap = ArgumentParser(usage="Validate the content (excluding YAML tags) of a README.md file.") + ap.add_argument("readme_filepath") + args = ap.parse_args() + readme_filepath = Path(args.readme_filepath) + readme = ReadMe.from_readme(readme_filepath) diff --git a/testbed/huggingface__datasets/src/datasets/utils/resources/__init__.py b/testbed/huggingface__datasets/src/datasets/utils/resources/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/src/datasets/utils/resources/creators.json b/testbed/huggingface__datasets/src/datasets/utils/resources/creators.json new file mode 100644 index 0000000000000000000000000000000000000000..d9e15f0039cc27ed8abd9fdf394423a3fada2c95 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/resources/creators.json @@ -0,0 +1,17 @@ +{ + "language": [ + "found", + "crowdsourced", + "expert-generated", + "machine-generated", + "other" + ], + "annotations": [ + "found", + "crowdsourced", + "expert-generated", + "machine-generated", + "no-annotation", + "other" + ] +} diff --git a/testbed/huggingface__datasets/src/datasets/utils/resources/languages.json b/testbed/huggingface__datasets/src/datasets/utils/resources/languages.json new file mode 100644 index 0000000000000000000000000000000000000000..ea7686f956b898af3faf97b86be89b71d88855d4 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/resources/languages.json @@ -0,0 +1,8026 @@ +{ + "code": "Programming language (C++, Java, Javascript, Python, etc.)", + "aa": "Afar", + "aaa": "Ghotuo", + "aab": "Alumu-Tesu", + "aac": "Ari", + "aad": "Amal", + "aae": "Arbëreshë Albanian", + "aaf": "Aranadan", + "aag": "Ambrak", + "aah": "Abu' Arapesh", + "aai": "Arifama-Miniafia", + "aak": "Ankave", + "aal": "Afade", + "aan": "Anambé", + "aao": "Algerian Saharan Arabic", + "aap": "Pará Arára", + "aaq": "Eastern Abnaki", + "aas": "Aasáx", + "aat": "Arvanitika Albanian", + "aau": "Abau", + "aav": "Austro-Asiatic languages", + "aaw": "Solong", + "aax": "Mandobo Atas", + "aaz": "Amarasi", + "ab": "Abkhazian", + "aba": "Abé", + "abb": "Bankon", + "abc": "Ambala Ayta", + "abd": "Manide", + "abe": "Western Abnaki", + "abf": "Abai Sungai", + "abg": "Abaga", + "abh": "Tajiki Arabic", + "abi": "Abidji", + "abj": "Aka-Bea", + "abl": "Lampung Nyo", + "abm": "Abanyom", + "abn": "Abua", + "abo": "Abon", + "abp": "Abellen Ayta", + "abq": "Abaza", + "abr": "Abron", + "abs": "Ambonese Malay", + "abt": "Ambulas", + "abu": "Abure", + "abv": "Baharna Arabic", + "abw": "Pal", + "abx": "Inabaknon", + "aby": "Aneme Wake", + "abz": "Abui", + "aca": "Achagua", + "acb": "Áncá", + "acd": "Gikyode", + "ace": "Achinese", + "acf": "Saint Lucian Creole French", + "ach": "Acoli", + "aci": "Aka-Cari", + "ack": "Aka-Kora", + "acl": "Akar-Bale", + "acm": "Mesopotamian Arabic", + "acn": "Achang", + "acp": "Eastern Acipa", + "acq": "Ta'izzi-Adeni Arabic", + "acr": "Achi", + "acs": "Acroá", + "act": "Achterhoeks", + "acu": "Achuar-Shiwiar", + "acv": "Achumawi", + "acw": "Hijazi Arabic", + "acx": "Omani Arabic", + "acy": "Cypriot Arabic", + "acz": "Acheron", + "ada": "Adangme", + "adb": "Atauran", + "add": "Lidzonka; Dzodinka", + "ade": "Adele", + "adf": "Dhofari Arabic", + "adg": "Andegerebinha", + "adh": "Adhola", + "adi": "Adi", + "adj": "Adioukrou", + "adl": "Galo", + "adn": "Adang", + "ado": "Abu", + "adq": "Adangbe", + "adr": "Adonara", + "ads": "Adamorobe Sign Language", + "adt": "Adnyamathanha", + "adu": "Aduge", + "adw": "Amundava", + "adx": "Amdo Tibetan", + "ady": "Adyghe; Adygei", + "adz": "Adzera", + "ae": "Avestan", + "aea": "Areba", + "aeb": "Tunisian Arabic", + "aec": "Saidi Arabic", + "aed": "Argentine Sign Language", + "aee": "Northeast Pashai; Northeast Pashayi", + "aek": "Haeke", + "ael": "Ambele", + "aem": "Arem", + "aen": "Armenian Sign Language", + "aeq": "Aer", + "aer": "Eastern Arrernte", + "aes": "Alsea", + "aeu": "Akeu", + "aew": "Ambakich", + "aey": "Amele", + "aez": "Aeka", + "af": "Afrikaans", + "afa": "Afro-Asiatic languages", + "afb": "Gulf Arabic", + "afd": "Andai", + "afe": "Putukwam", + "afg": "Afghan Sign Language", + "afh": "Afrihili", + "afi": "Akrukay; Chini", + "afk": "Nanubae", + "afn": "Defaka", + "afo": "Eloyi", + "afp": "Tapei", + "afs": "Afro-Seminole Creole", + "aft": "Afitti", + "afu": "Awutu", + "afz": "Obokuitai", + "aga": "Aguano", + "agb": "Legbo", + "agc": "Agatu", + "agd": "Agarabi", + "age": "Angal", + "agf": "Arguni", + "agg": "Angor", + "agh": "Ngelima", + "agi": "Agariya", + "agj": "Argobba", + "agk": "Isarog Agta", + "agl": "Fembe", + "agm": "Angaataha", + "agn": "Agutaynen", + "ago": "Tainae", + "agq": "Aghem", + "agr": "Aguaruna", + "ags": "Esimbi", + "agt": "Central Cagayan Agta", + "agu": "Aguacateco", + "agv": "Remontado Dumagat", + "agw": "Kahua", + "agx": "Aghul", + "agy": "Southern Alta", + "agz": "Mt. Iriga Agta", + "aha": "Ahanta", + "ahb": "Axamb", + "ahg": "Qimant", + "ahh": "Aghu", + "ahi": "Tiagbamrin Aizi", + "ahk": "Akha", + "ahl": "Igo", + "ahm": "Mobumrin Aizi", + "ahn": "Àhàn", + "aho": "Ahom", + "ahp": "Aproumu Aizi", + "ahr": "Ahirani", + "ahs": "Ashe", + "aht": "Ahtena", + "aia": "Arosi", + "aib": "Ainu (China)", + "aic": "Ainbai", + "aid": "Alngith", + "aie": "Amara", + "aif": "Agi", + "aig": "Antigua and Barbuda Creole English", + "aih": "Ai-Cham", + "aii": "Assyrian Neo-Aramaic", + "aij": "Lishanid Noshan", + "aik": "Ake", + "ail": "Aimele", + "aim": "Aimol", + "ain": "Ainu (Japan)", + "aio": "Aiton", + "aip": "Burumakok", + "aiq": "Aimaq", + "air": "Airoran", + "ait": "Arikem", + "aiw": "Aari", + "aix": "Aighon", + "aiy": "Ali", + "aja": "Aja (South Sudan)", + "ajg": "Aja (Benin)", + "aji": "Ajië", + "ajn": "Andajin", + "ajp": "South Levantine Arabic", + "ajs": "Algerian Jewish Sign Language", + "aju": "Judeo-Moroccan Arabic", + "ajw": "Ajawa", + "ajz": "Amri Karbi", + "ak": "Akan", + "akb": "Batak Angkola", + "akc": "Mpur", + "akd": "Ukpet-Ehom", + "ake": "Akawaio", + "akf": "Akpa", + "akg": "Anakalangu", + "akh": "Angal Heneng", + "aki": "Aiome", + "akj": "Aka-Jeru", + "akk": "Akkadian", + "akl": "Aklanon", + "akm": "Aka-Bo", + "ako": "Akurio", + "akp": "Siwu", + "akq": "Ak", + "akr": "Araki", + "aks": "Akaselem", + "akt": "Akolet", + "aku": "Akum", + "akv": "Akhvakh", + "akw": "Akwa", + "akx": "Aka-Kede", + "aky": "Aka-Kol", + "akz": "Alabama", + "ala": "Alago", + "alc": "Qawasqar", + "ald": "Alladian", + "ale": "Aleut", + "alf": "Alege", + "alg": "Algonquian languages", + "alh": "Alawa", + "ali": "Amaimon", + "alj": "Alangan", + "alk": "Alak", + "all": "Allar", + "alm": "Amblong", + "aln": "Gheg Albanian", + "alo": "Larike-Wakasihu", + "alp": "Alune", + "alq": "Algonquin", + "alr": "Alutor", + "als": "Tosk Albanian", + "alt": "Southern Altai", + "alu": "'Are'are", + "alv": "Atlantic-Congo languages", + "alw": "Alaba-K’abeena; Wanbasana", + "alx": "Amol", + "aly": "Alyawarr", + "alz": "Alur", + "am": "Amharic", + "ama": "Amanayé", + "amb": "Ambo", + "amc": "Amahuaca", + "ame": "Yanesha'", + "amf": "Hamer-Banna", + "amg": "Amurdak", + "ami": "Amis", + "amj": "Amdang", + "amk": "Ambai", + "aml": "War-Jaintia", + "amm": "Ama (Papua New Guinea)", + "amn": "Amanab", + "amo": "Amo", + "amp": "Alamblak", + "amq": "Amahai", + "amr": "Amarakaeri", + "ams": "Southern Amami-Oshima", + "amt": "Amto", + "amu": "Guerrero Amuzgo", + "amv": "Ambelau", + "amw": "Western Neo-Aramaic", + "amx": "Anmatyerre", + "amy": "Ami", + "amz": "Atampaya", + "an": "Aragonese", + "ana": "Andaqui", + "anb": "Andoa", + "anc": "Ngas", + "and": "Ansus", + "ane": "Xârâcùù", + "anf": "Animere", + "ang": "Old English (ca. 450-1100)", + "anh": "Nend", + "ani": "Andi", + "anj": "Anor", + "ank": "Goemai", + "anl": "Anu-Hkongso Chin", + "anm": "Anal", + "ann": "Obolo", + "ano": "Andoque", + "anp": "Angika", + "anq": "Jarawa (India)", + "anr": "Andh", + "ans": "Anserma", + "ant": "Antakarinya; Antikarinya", + "anu": "Anuak", + "anv": "Denya", + "anw": "Anaang", + "anx": "Andra-Hus", + "any": "Anyin", + "anz": "Anem", + "aoa": "Angolar", + "aob": "Abom", + "aoc": "Pemon", + "aod": "Andarum", + "aoe": "Angal Enen", + "aof": "Bragat", + "aog": "Angoram", + "aoi": "Anindilyakwa", + "aoj": "Mufian", + "aok": "Arhö", + "aol": "Alor", + "aom": "Ömie", + "aon": "Bumbita Arapesh", + "aor": "Aore", + "aos": "Taikat", + "aot": "Atong (India); A'tong", + "aou": "A'ou", + "aox": "Atorada", + "aoz": "Uab Meto", + "apa": "Apache languages", + "apb": "Sa'a", + "apc": "North Levantine Arabic", + "apd": "Sudanese Arabic", + "ape": "Bukiyip", + "apf": "Pahanan Agta", + "apg": "Ampanang", + "aph": "Athpariya", + "api": "Apiaká", + "apj": "Jicarilla Apache", + "apk": "Kiowa Apache", + "apl": "Lipan Apache", + "apm": "Mescalero-Chiricahua Apache", + "apn": "Apinayé", + "apo": "Ambul", + "app": "Apma", + "apq": "A-Pucikwar", + "apr": "Arop-Lokep", + "aps": "Arop-Sissano", + "apt": "Apatani", + "apu": "Apurinã", + "apv": "Alapmunte", + "apw": "Western Apache", + "apx": "Aputai", + "apy": "Apalaí", + "apz": "Safeyoka", + "aqa": "Alacalufan languages", + "aqc": "Archi", + "aqd": "Ampari Dogon", + "aqg": "Arigidi", + "aqk": "Aninka", + "aql": "Algic languages", + "aqm": "Atohwaim", + "aqn": "Northern Alta", + "aqp": "Atakapa", + "aqr": "Arhâ", + "aqt": "Angaité", + "aqz": "Akuntsu", + "ar": "Arabic", + "arb": "Standard Arabic", + "arc": "Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)", + "ard": "Arabana", + "are": "Western Arrarnta", + "arh": "Arhuaco", + "ari": "Arikara", + "arj": "Arapaso", + "ark": "Arikapú", + "arl": "Arabela", + "arn": "Mapudungun; Mapuche", + "aro": "Araona", + "arp": "Arapaho", + "arq": "Algerian Arabic", + "arr": "Karo (Brazil)", + "ars": "Najdi Arabic", + "art": "Artificial languages", + "aru": "Aruá (Amazonas State); Arawá", + "arv": "Arbore", + "arw": "Arawak", + "arx": "Aruá (Rodonia State)", + "ary": "Moroccan Arabic", + "arz": "Egyptian Arabic", + "as": "Assamese", + "asa": "Asu (Tanzania)", + "asb": "Assiniboine", + "asc": "Casuarina Coast Asmat", + "ase": "American Sign Language", + "asf": "Auslan; Australian Sign Language", + "asg": "Cishingini", + "ash": "Abishira", + "asi": "Buruwai", + "asj": "Sari", + "ask": "Ashkun", + "asl": "Asilulu", + "asn": "Xingú Asuriní", + "aso": "Dano", + "asp": "Algerian Sign Language", + "asq": "Austrian Sign Language", + "asr": "Asuri", + "ass": "Ipulo", + "ast": "Asturian; Asturleonese; Bable; Leonese", + "asu": "Tocantins Asurini", + "asv": "Asoa", + "asw": "Australian Aborigines Sign Language", + "asx": "Muratayak", + "asy": "Yaosakor Asmat", + "asz": "As", + "ata": "Pele-Ata", + "atb": "Zaiwa", + "atc": "Atsahuaca", + "atd": "Ata Manobo", + "ate": "Atemble", + "atg": "Ivbie North-Okpela-Arhe", + "ath": "Athapascan languages", + "ati": "Attié", + "atj": "Atikamekw", + "atk": "Ati", + "atl": "Mt. Iraya Agta", + "atm": "Ata", + "atn": "Ashtiani", + "ato": "Atong (Cameroon)", + "atp": "Pudtol Atta", + "atq": "Aralle-Tabulahan", + "atr": "Waimiri-Atroari", + "ats": "Gros Ventre", + "att": "Pamplona Atta", + "atu": "Reel", + "atv": "Northern Altai", + "atw": "Atsugewi", + "atx": "Arutani", + "aty": "Aneityum", + "atz": "Arta", + "aua": "Asumboa", + "aub": "Alugu", + "auc": "Waorani", + "aud": "Anuta", + "auf": "Arauan languages", + "aug": "Aguna", + "auh": "Aushi", + "aui": "Anuki", + "auj": "Awjilah", + "auk": "Heyo", + "aul": "Aulua", + "aum": "Asu (Nigeria)", + "aun": "Molmo One", + "auo": "Auyokawa", + "aup": "Makayam", + "auq": "Anus; Korur", + "aur": "Aruek", + "aus": "Australian languages", + "aut": "Austral", + "auu": "Auye", + "auw": "Awyi", + "aux": "Aurá", + "auy": "Awiyaana", + "auz": "Uzbeki Arabic", + "av": "Avaric", + "avb": "Avau", + "avd": "Alviri-Vidari", + "avi": "Avikam", + "avk": "Kotava", + "avl": "Eastern Egyptian Bedawi Arabic", + "avm": "Angkamuthi", + "avn": "Avatime", + "avo": "Agavotaguerra", + "avs": "Aushiri", + "avt": "Au", + "avu": "Avokaya", + "avv": "Avá-Canoeiro", + "awa": "Awadhi", + "awb": "Awa (Papua New Guinea)", + "awc": "Cicipu", + "awd": "Arawakan languages", + "awe": "Awetí", + "awg": "Anguthimri", + "awh": "Awbono", + "awi": "Aekyom", + "awk": "Awabakal", + "awm": "Arawum", + "awn": "Awngi", + "awo": "Awak", + "awr": "Awera", + "aws": "South Awyu", + "awt": "Araweté", + "awu": "Central Awyu", + "awv": "Jair Awyu", + "aww": "Awun", + "awx": "Awara", + "awy": "Edera Awyu", + "axb": "Abipon", + "axe": "Ayerrerenge", + "axg": "Mato Grosso Arára", + "axk": "Yaka (Central African Republic)", + "axl": "Lower Southern Aranda", + "axm": "Middle Armenian", + "axx": "Xârâgurè", + "ay": "Aymara", + "aya": "Awar", + "ayb": "Ayizo Gbe", + "ayc": "Southern Aymara", + "ayd": "Ayabadhu", + "aye": "Ayere", + "ayg": "Ginyanga", + "ayh": "Hadrami Arabic", + "ayi": "Leyigha", + "ayk": "Akuku", + "ayl": "Libyan Arabic", + "ayn": "Sanaani Arabic", + "ayo": "Ayoreo", + "ayp": "North Mesopotamian Arabic", + "ayq": "Ayi (Papua New Guinea)", + "ayr": "Central Aymara", + "ays": "Sorsogon Ayta", + "ayt": "Magbukun Ayta", + "ayu": "Ayu", + "ayz": "Mai Brat", + "az": "Azerbaijani", + "aza": "Azha", + "azb": "South Azerbaijani", + "azc": "Uto-Aztecan languages", + "azd": "Eastern Durango Nahuatl", + "azg": "San Pedro Amuzgos Amuzgo", + "azj": "North Azerbaijani", + "azm": "Ipalapa Amuzgo", + "azn": "Western Durango Nahuatl", + "azo": "Awing", + "azt": "Faire Atta", + "azz": "Highland Puebla Nahuatl", + "ba": "Bashkir", + "baa": "Babatana", + "bab": "Bainouk-Gunyuño", + "bac": "Badui", + "bad": "Banda languages", + "bae": "Baré", + "baf": "Nubaca", + "bag": "Tuki", + "bah": "Bahamas Creole English", + "bai": "Bamileke languages", + "baj": "Barakai", + "bal": "Baluchi", + "ban": "Balinese", + "bao": "Waimaha", + "bap": "Bantawa", + "bar": "Bavarian", + "bas": "Basa (Cameroon)", + "bat": "Baltic languages", + "bau": "Bada (Nigeria)", + "bav": "Vengo", + "baw": "Bambili-Bambui", + "bax": "Bamun", + "bay": "Batuley", + "bba": "Baatonum", + "bbb": "Barai", + "bbc": "Batak Toba", + "bbd": "Bau", + "bbe": "Bangba", + "bbf": "Baibai", + "bbg": "Barama", + "bbh": "Bugan", + "bbi": "Barombi", + "bbj": "Ghomálá'", + "bbk": "Babanki", + "bbl": "Bats", + "bbm": "Babango", + "bbn": "Uneapa", + "bbo": "Northern Bobo Madaré; Konabéré", + "bbp": "West Central Banda", + "bbq": "Bamali", + "bbr": "Girawa", + "bbs": "Bakpinka", + "bbt": "Mburku", + "bbu": "Kulung (Nigeria)", + "bbv": "Karnai", + "bbw": "Baba", + "bbx": "Bubia", + "bby": "Befang", + "bca": "Central Bai", + "bcb": "Bainouk-Samik", + "bcc": "Southern Balochi", + "bcd": "North Babar", + "bce": "Bamenyam", + "bcf": "Bamu", + "bcg": "Baga Pokur", + "bch": "Bariai", + "bci": "Baoulé", + "bcj": "Bardi", + "bck": "Bunuba", + "bcl": "Central Bikol", + "bcm": "Bannoni", + "bcn": "Bali (Nigeria)", + "bco": "Kaluli", + "bcp": "Bali (Democratic Republic of Congo)", + "bcq": "Bench", + "bcr": "Babine", + "bcs": "Kohumono", + "bct": "Bendi", + "bcu": "Awad Bing", + "bcv": "Shoo-Minda-Nye", + "bcw": "Bana", + "bcy": "Bacama", + "bcz": "Bainouk-Gunyaamolo", + "bda": "Bayot", + "bdb": "Basap", + "bdc": "Emberá-Baudó", + "bdd": "Bunama", + "bde": "Bade", + "bdf": "Biage", + "bdg": "Bonggi", + "bdh": "Baka (South Sudan)", + "bdi": "Burun", + "bdj": "Bai (South Sudan); Bai", + "bdk": "Budukh", + "bdl": "Indonesian Bajau", + "bdm": "Buduma", + "bdn": "Baldemu", + "bdo": "Morom", + "bdp": "Bende", + "bdq": "Bahnar", + "bdr": "West Coast Bajau", + "bds": "Burunge", + "bdt": "Bokoto", + "bdu": "Oroko", + "bdv": "Bodo Parja", + "bdw": "Baham", + "bdx": "Budong-Budong", + "bdy": "Bandjalang", + "bdz": "Badeshi", + "be": "Belarusian", + "bea": "Beaver", + "beb": "Bebele", + "bec": "Iceve-Maci", + "bed": "Bedoanas", + "bee": "Byangsi", + "bef": "Benabena", + "beg": "Belait", + "beh": "Biali", + "bei": "Bekati'", + "bej": "Beja; Bedawiyet", + "bek": "Bebeli", + "bem": "Bemba (Zambia)", + "beo": "Beami", + "bep": "Besoa", + "beq": "Beembe", + "ber": "Berber languages", + "bes": "Besme", + "bet": "Guiberoua Béte", + "beu": "Blagar", + "bev": "Daloa Bété", + "bew": "Betawi", + "bex": "Jur Modo", + "bey": "Beli (Papua New Guinea)", + "bez": "Bena (Tanzania)", + "bfa": "Bari", + "bfb": "Pauri Bareli", + "bfc": "Panyi Bai; Northern Bai", + "bfd": "Bafut", + "bfe": "Betaf; Tena", + "bff": "Bofi", + "bfg": "Busang Kayan", + "bfh": "Blafe", + "bfi": "British Sign Language", + "bfj": "Bafanji", + "bfk": "Ban Khor Sign Language", + "bfl": "Banda-Ndélé", + "bfm": "Mmen", + "bfn": "Bunak", + "bfo": "Malba Birifor", + "bfp": "Beba", + "bfq": "Badaga", + "bfr": "Bazigar", + "bfs": "Southern Bai", + "bft": "Balti", + "bfu": "Gahri", + "bfw": "Bondo", + "bfx": "Bantayanon", + "bfy": "Bagheli", + "bfz": "Mahasu Pahari", + "bg": "Bulgarian", + "bga": "Gwamhi-Wuri", + "bgb": "Bobongko", + "bgc": "Haryanvi", + "bgd": "Rathwi Bareli", + "bge": "Bauria", + "bgf": "Bangandu", + "bgg": "Bugun", + "bgi": "Giangan", + "bgj": "Bangolan", + "bgk": "Bit; Buxinhua", + "bgl": "Bo (Laos)", + "bgn": "Western Balochi", + "bgo": "Baga Koga", + "bgp": "Eastern Balochi", + "bgq": "Bagri", + "bgr": "Bawm Chin", + "bgs": "Tagabawa", + "bgt": "Bughotu", + "bgu": "Mbongno", + "bgv": "Warkay-Bipim", + "bgw": "Bhatri", + "bgx": "Balkan Gagauz Turkish", + "bgy": "Benggoi", + "bgz": "Banggai", + "bh": "Bihari languages", + "bha": "Bharia", + "bhb": "Bhili", + "bhc": "Biga", + "bhd": "Bhadrawahi", + "bhe": "Bhaya", + "bhf": "Odiai", + "bhg": "Binandere", + "bhh": "Bukharic", + "bhi": "Bhilali", + "bhj": "Bahing", + "bhl": "Bimin", + "bhm": "Bathari", + "bhn": "Bohtan Neo-Aramaic", + "bho": "Bhojpuri", + "bhp": "Bima", + "bhq": "Tukang Besi South", + "bhr": "Bara Malagasy", + "bhs": "Buwal", + "bht": "Bhattiyali", + "bhu": "Bhunjia", + "bhv": "Bahau", + "bhw": "Biak", + "bhx": "Bhalay", + "bhy": "Bhele", + "bhz": "Bada (Indonesia)", + "bi": "Bislama", + "bia": "Badimaya", + "bib": "Bissa; Bisa", + "bid": "Bidiyo", + "bie": "Bepour", + "bif": "Biafada", + "big": "Biangai", + "bik": "Bikol", + "bil": "Bile", + "bim": "Bimoba", + "bin": "Bini; Edo", + "bio": "Nai", + "bip": "Bila", + "biq": "Bipi", + "bir": "Bisorio", + "bit": "Berinomo", + "biu": "Biete", + "biv": "Southern Birifor", + "biw": "Kol (Cameroon)", + "bix": "Bijori", + "biy": "Birhor", + "biz": "Baloi", + "bja": "Budza", + "bjb": "Banggarla", + "bjc": "Bariji", + "bje": "Biao-Jiao Mien", + "bjf": "Barzani Jewish Neo-Aramaic", + "bjg": "Bidyogo", + "bjh": "Bahinemo", + "bji": "Burji", + "bjj": "Kanauji", + "bjk": "Barok", + "bjl": "Bulu (Papua New Guinea)", + "bjm": "Bajelani", + "bjn": "Banjar", + "bjo": "Mid-Southern Banda", + "bjp": "Fanamaket", + "bjr": "Binumarien", + "bjs": "Bajan", + "bjt": "Balanta-Ganja", + "bju": "Busuu", + "bjv": "Bedjond", + "bjw": "Bakwé", + "bjx": "Banao Itneg", + "bjy": "Bayali", + "bjz": "Baruga", + "bka": "Kyak", + "bkc": "Baka (Cameroon)", + "bkd": "Binukid; Talaandig", + "bkf": "Beeke", + "bkg": "Buraka", + "bkh": "Bakoko", + "bki": "Baki", + "bkj": "Pande", + "bkk": "Brokskat", + "bkl": "Berik", + "bkm": "Kom (Cameroon)", + "bkn": "Bukitan", + "bko": "Kwa'", + "bkp": "Boko (Democratic Republic of Congo)", + "bkq": "Bakairí", + "bkr": "Bakumpai", + "bks": "Northern Sorsoganon", + "bkt": "Boloki", + "bku": "Buhid", + "bkv": "Bekwarra", + "bkw": "Bekwel", + "bkx": "Baikeno", + "bky": "Bokyi", + "bkz": "Bungku", + "bla": "Siksika", + "blb": "Bilua", + "blc": "Bella Coola", + "bld": "Bolango", + "ble": "Balanta-Kentohe", + "blf": "Buol", + "blh": "Kuwaa", + "bli": "Bolia", + "blj": "Bolongan", + "blk": "Pa'o Karen; Pa'O", + "bll": "Biloxi", + "blm": "Beli (South Sudan)", + "bln": "Southern Catanduanes Bikol", + "blo": "Anii", + "blp": "Blablanga", + "blq": "Baluan-Pam", + "blr": "Blang", + "bls": "Balaesang", + "blt": "Tai Dam", + "blv": "Kibala; Bolo", + "blw": "Balangao", + "blx": "Mag-Indi Ayta", + "bly": "Notre", + "blz": "Balantak", + "bm": "Bambara", + "bma": "Lame", + "bmb": "Bembe", + "bmc": "Biem", + "bmd": "Baga Manduri", + "bme": "Limassa", + "bmf": "Bom-Kim", + "bmg": "Bamwe", + "bmh": "Kein", + "bmi": "Bagirmi", + "bmj": "Bote-Majhi", + "bmk": "Ghayavi", + "bml": "Bomboli", + "bmm": "Northern Betsimisaraka Malagasy", + "bmn": "Bina (Papua New Guinea)", + "bmo": "Bambalang", + "bmp": "Bulgebi", + "bmq": "Bomu", + "bmr": "Muinane", + "bms": "Bilma Kanuri", + "bmt": "Biao Mon", + "bmu": "Somba-Siawari", + "bmv": "Bum", + "bmw": "Bomwali", + "bmx": "Baimak", + "bmz": "Baramu", + "bn": "Bengali; Bangla", + "bna": "Bonerate", + "bnb": "Bookan", + "bnc": "Bontok", + "bnd": "Banda (Indonesia)", + "bne": "Bintauna", + "bnf": "Masiwang", + "bng": "Benga", + "bni": "Bangi", + "bnj": "Eastern Tawbuid", + "bnk": "Bierebo", + "bnl": "Boon", + "bnm": "Batanga", + "bnn": "Bunun", + "bno": "Bantoanon", + "bnp": "Bola", + "bnq": "Bantik", + "bnr": "Butmas-Tur", + "bns": "Bundeli", + "bnt": "Bantu languages", + "bnu": "Bentong", + "bnv": "Bonerif; Beneraf; Edwas", + "bnw": "Bisis", + "bnx": "Bangubangu", + "bny": "Bintulu", + "bnz": "Beezen", + "bo": "Tibetan", + "boa": "Bora", + "bob": "Aweer", + "boe": "Mundabli", + "bof": "Bolon", + "bog": "Bamako Sign Language", + "boh": "Boma", + "boi": "Barbareño", + "boj": "Anjam", + "bok": "Bonjo", + "bol": "Bole", + "bom": "Berom", + "bon": "Bine", + "boo": "Tiemacèwè Bozo", + "bop": "Bonkiman", + "boq": "Bogaya", + "bor": "Borôro", + "bot": "Bongo", + "bou": "Bondei", + "bov": "Tuwuli", + "bow": "Rema", + "box": "Buamu", + "boy": "Bodo (Central African Republic)", + "boz": "Tiéyaxo Bozo", + "bpa": "Daakaka", + "bpc": "Mbuk", + "bpd": "Banda-Banda", + "bpe": "Bauni", + "bpg": "Bonggo", + "bph": "Botlikh", + "bpi": "Bagupi", + "bpj": "Binji", + "bpk": "Orowe; 'Ôrôê", + "bpl": "Broome Pearling Lugger Pidgin", + "bpm": "Biyom", + "bpn": "Dzao Min", + "bpo": "Anasi", + "bpp": "Kaure", + "bpq": "Banda Malay", + "bpr": "Koronadal Blaan", + "bps": "Sarangani Blaan", + "bpt": "Barrow Point", + "bpu": "Bongu", + "bpv": "Bian Marind", + "bpw": "Bo (Papua New Guinea)", + "bpx": "Palya Bareli", + "bpy": "Bishnupriya", + "bpz": "Bilba", + "bqa": "Tchumbuli", + "bqb": "Bagusa", + "bqc": "Boko (Benin); Boo", + "bqd": "Bung", + "bqf": "Baga Kaloum", + "bqg": "Bago-Kusuntu", + "bqh": "Baima", + "bqi": "Bakhtiari", + "bqj": "Bandial", + "bqk": "Banda-Mbrès", + "bql": "Bilakura", + "bqm": "Wumboko", + "bqn": "Bulgarian Sign Language", + "bqo": "Balo", + "bqp": "Busa", + "bqq": "Biritai", + "bqr": "Burusu", + "bqs": "Bosngun", + "bqt": "Bamukumbit", + "bqu": "Boguru", + "bqv": "Koro Wachi; Begbere-Ejar", + "bqw": "Buru (Nigeria)", + "bqx": "Baangi", + "bqy": "Bengkala Sign Language", + "bqz": "Bakaka", + "br": "Breton", + "bra": "Braj", + "brb": "Brao; Lave", + "brc": "Berbice Creole Dutch", + "brd": "Baraamu", + "brf": "Bira", + "brg": "Baure", + "brh": "Brahui", + "bri": "Mokpwe", + "brj": "Bieria", + "brk": "Birked", + "brl": "Birwa", + "brm": "Barambu", + "brn": "Boruca", + "bro": "Brokkat", + "brp": "Barapasi", + "brq": "Breri", + "brr": "Birao", + "brs": "Baras", + "brt": "Bitare", + "bru": "Eastern Bru", + "brv": "Western Bru", + "brw": "Bellari", + "brx": "Bodo (India)", + "bry": "Burui", + "brz": "Bilbil", + "bs": "Bosnian", + "bsa": "Abinomn", + "bsb": "Brunei Bisaya", + "bsc": "Bassari; Oniyan", + "bse": "Wushi", + "bsf": "Bauchi", + "bsg": "Bashkardi", + "bsh": "Kati", + "bsi": "Bassossi", + "bsj": "Bangwinji", + "bsk": "Burushaski", + "bsl": "Basa-Gumna", + "bsm": "Busami", + "bsn": "Barasana-Eduria", + "bso": "Buso", + "bsp": "Baga Sitemu", + "bsq": "Bassa", + "bsr": "Bassa-Kontagora", + "bss": "Akoose", + "bst": "Basketo", + "bsu": "Bahonsuai", + "bsv": "Baga Sobané", + "bsw": "Baiso", + "bsx": "Yangkam", + "bsy": "Sabah Bisaya", + "bta": "Bata", + "btc": "Bati (Cameroon)", + "btd": "Batak Dairi", + "bte": "Gamo-Ningi", + "btf": "Birgit", + "btg": "Gagnoa Bété", + "bth": "Biatah Bidayuh", + "bti": "Burate", + "btj": "Bacanese Malay", + "btk": "Batak languages", + "btm": "Batak Mandailing", + "btn": "Ratagnon", + "bto": "Rinconada Bikol", + "btp": "Budibud", + "btq": "Batek", + "btr": "Baetora", + "bts": "Batak Simalungun", + "btt": "Bete-Bendi", + "btu": "Batu", + "btv": "Bateri", + "btw": "Butuanon", + "btx": "Batak Karo", + "bty": "Bobot", + "btz": "Batak Alas-Kluet", + "bua": "Buriat", + "bub": "Bua", + "buc": "Bushi", + "bud": "Ntcham", + "bue": "Beothuk", + "buf": "Bushoong", + "bug": "Buginese", + "buh": "Younuo Bunu", + "bui": "Bongili", + "buj": "Basa-Gurmana", + "buk": "Bugawac", + "bum": "Bulu (Cameroon)", + "bun": "Sherbro", + "buo": "Terei", + "bup": "Busoa", + "buq": "Brem", + "bus": "Bokobaru", + "but": "Bungain", + "buu": "Budu", + "buv": "Bun", + "buw": "Bubi", + "bux": "Boghom", + "buy": "Bullom So", + "buz": "Bukwen", + "bva": "Barein", + "bvb": "Bube", + "bvc": "Baelelea", + "bvd": "Baeggu", + "bve": "Berau Malay", + "bvf": "Boor", + "bvg": "Bonkeng", + "bvh": "Bure", + "bvi": "Belanda Viri", + "bvj": "Baan", + "bvk": "Bukat", + "bvl": "Bolivian Sign Language", + "bvm": "Bamunka", + "bvn": "Buna", + "bvo": "Bolgo", + "bvp": "Bumang", + "bvq": "Birri", + "bvr": "Burarra", + "bvt": "Bati (Indonesia)", + "bvu": "Bukit Malay", + "bvv": "Baniva", + "bvw": "Boga", + "bvx": "Dibole", + "bvy": "Baybayanon", + "bvz": "Bauzi", + "bwa": "Bwatoo", + "bwb": "Namosi-Naitasiri-Serua", + "bwc": "Bwile", + "bwd": "Bwaidoka", + "bwe": "Bwe Karen", + "bwf": "Boselewa", + "bwg": "Barwe", + "bwh": "Bishuo", + "bwi": "Baniwa", + "bwj": "Láá Láá Bwamu", + "bwk": "Bauwaki", + "bwl": "Bwela", + "bwm": "Biwat", + "bwn": "Wunai Bunu", + "bwo": "Boro (Ethiopia); Borna (Ethiopia)", + "bwp": "Mandobo Bawah", + "bwq": "Southern Bobo Madaré", + "bwr": "Bura-Pabir", + "bws": "Bomboma", + "bwt": "Bafaw-Balong", + "bwu": "Buli (Ghana)", + "bww": "Bwa", + "bwx": "Bu-Nao Bunu", + "bwy": "Cwi Bwamu", + "bwz": "Bwisi", + "bxa": "Tairaha", + "bxb": "Belanda Bor", + "bxc": "Molengue", + "bxd": "Pela", + "bxe": "Birale", + "bxf": "Bilur; Minigir", + "bxg": "Bangala", + "bxh": "Buhutu", + "bxi": "Pirlatapa", + "bxj": "Bayungu", + "bxk": "Bukusu; Lubukusu", + "bxl": "Jalkunan", + "bxm": "Mongolia Buriat", + "bxn": "Burduna", + "bxo": "Barikanchi", + "bxp": "Bebil", + "bxq": "Beele", + "bxr": "Russia Buriat", + "bxs": "Busam", + "bxu": "China Buriat", + "bxv": "Berakou", + "bxw": "Bankagooma", + "bxz": "Binahari", + "bya": "Batak", + "byb": "Bikya", + "byc": "Ubaghara", + "byd": "Benyadu'", + "bye": "Pouye", + "byf": "Bete", + "byg": "Baygo", + "byh": "Bhujel", + "byi": "Buyu", + "byj": "Bina (Nigeria)", + "byk": "Biao", + "byl": "Bayono", + "bym": "Bidjara", + "byn": "Bilin; Blin", + "byo": "Biyo", + "byp": "Bumaji", + "byq": "Basay", + "byr": "Baruya; Yipma", + "bys": "Burak", + "byt": "Berti", + "byv": "Medumba", + "byw": "Belhariya", + "byx": "Qaqet", + "byz": "Banaro", + "bza": "Bandi", + "bzb": "Andio", + "bzc": "Southern Betsimisaraka Malagasy", + "bzd": "Bribri", + "bze": "Jenaama Bozo", + "bzf": "Boikin", + "bzg": "Babuza", + "bzh": "Mapos Buang", + "bzi": "Bisu", + "bzj": "Belize Kriol English", + "bzk": "Nicaragua Creole English", + "bzl": "Boano (Sulawesi)", + "bzm": "Bolondo", + "bzn": "Boano (Maluku)", + "bzo": "Bozaba", + "bzp": "Kemberano", + "bzq": "Buli (Indonesia)", + "bzr": "Biri", + "bzs": "Brazilian Sign Language", + "bzt": "Brithenig", + "bzu": "Burmeso", + "bzv": "Naami", + "bzw": "Basa (Nigeria)", + "bzx": "Kɛlɛngaxo Bozo", + "bzy": "Obanliku", + "bzz": "Evant", + "ca": "Catalan; Valencian", + "caa": "Chortí", + "cab": "Garifuna", + "cac": "Chuj", + "cad": "Caddo", + "cae": "Lehar; Laalaa", + "caf": "Southern Carrier", + "cag": "Nivaclé", + "cah": "Cahuarano", + "cai": "Central American Indian languages", + "caj": "Chané", + "cak": "Kaqchikel; Cakchiquel", + "cal": "Carolinian", + "cam": "Cemuhî", + "can": "Chambri", + "cao": "Chácobo", + "cap": "Chipaya", + "caq": "Car Nicobarese", + "car": "Galibi Carib", + "cas": "Tsimané", + "cau": "Caucasian languages", + "cav": "Cavineña", + "caw": "Callawalla", + "cax": "Chiquitano", + "cay": "Cayuga", + "caz": "Canichana", + "cba": "Chibchan languages", + "cbb": "Cabiyarí", + "cbc": "Carapana", + "cbd": "Carijona", + "cbg": "Chimila", + "cbi": "Chachi", + "cbj": "Ede Cabe", + "cbk": "Chavacano", + "cbl": "Bualkhaw Chin", + "cbn": "Nyahkur", + "cbo": "Izora", + "cbq": "Tsucuba; Cuba", + "cbr": "Cashibo-Cacataibo", + "cbs": "Cashinahua", + "cbt": "Chayahuita", + "cbu": "Candoshi-Shapra", + "cbv": "Cacua", + "cbw": "Kinabalian", + "cby": "Carabayo", + "ccc": "Chamicuro", + "ccd": "Cafundo Creole", + "cce": "Chopi", + "ccg": "Samba Daka", + "cch": "Atsam", + "ccj": "Kasanga", + "ccl": "Cutchi-Swahili", + "ccm": "Malaccan Creole Malay", + "ccn": "North Caucasian languages", + "cco": "Comaltepec Chinantec", + "ccp": "Chakma", + "ccr": "Cacaopera", + "ccs": "South Caucasian languages", + "cda": "Choni", + "cdc": "Chadic languages", + "cdd": "Caddoan languages", + "cde": "Chenchu", + "cdf": "Chiru", + "cdh": "Chambeali", + "cdi": "Chodri", + "cdj": "Churahi", + "cdm": "Chepang", + "cdn": "Chaudangsi", + "cdo": "Min Dong Chinese", + "cdr": "Cinda-Regi-Tiyal", + "cds": "Chadian Sign Language", + "cdy": "Chadong", + "cdz": "Koda", + "ce": "Chechen", + "cea": "Lower Chehalis", + "ceb": "Cebuano", + "ceg": "Chamacoco", + "cek": "Eastern Khumi Chin", + "cel": "Celtic languages", + "cen": "Cen", + "cet": "Centúúm", + "cey": "Ekai Chin", + "cfa": "Dijim-Bwilim", + "cfd": "Cara", + "cfg": "Como Karim", + "cfm": "Falam Chin", + "cga": "Changriwa", + "cgc": "Kagayanen", + "cgg": "Chiga", + "cgk": "Chocangacakha", + "ch": "Chamorro", + "chb": "Chibcha", + "chc": "Catawba", + "chd": "Highland Oaxaca Chontal", + "chf": "Tabasco Chontal", + "chg": "Chagatai", + "chh": "Chinook", + "chj": "Ojitlán Chinantec", + "chk": "Chuukese", + "chl": "Cahuilla", + "chm": "Mari (Russia)", + "chn": "Chinook jargon", + "cho": "Choctaw", + "chp": "Chipewyan; Dene Suline", + "chq": "Quiotepec Chinantec", + "chr": "Cherokee", + "cht": "Cholón", + "chw": "Chuwabu", + "chx": "Chantyal", + "chy": "Cheyenne", + "chz": "Ozumacín Chinantec", + "cia": "Cia-Cia", + "cib": "Ci Gbe", + "cic": "Chickasaw", + "cid": "Chimariko", + "cie": "Cineni", + "cih": "Chinali", + "cik": "Chitkuli Kinnauri", + "cim": "Cimbrian", + "cin": "Cinta Larga", + "cip": "Chiapanec", + "cir": "Tiri; Haméa; Méa", + "ciw": "Chippewa", + "ciy": "Chaima", + "cja": "Western Cham", + "cje": "Chru", + "cjh": "Upper Chehalis", + "cji": "Chamalal", + "cjk": "Chokwe", + "cjm": "Eastern Cham", + "cjn": "Chenapian", + "cjo": "Ashéninka Pajonal", + "cjp": "Cabécar", + "cjs": "Shor", + "cjv": "Chuave", + "cjy": "Jinyu Chinese", + "ckb": "Central Kurdish", + "ckh": "Chak", + "ckl": "Cibak", + "ckm": "Chakavian", + "ckn": "Kaang Chin", + "cko": "Anufo", + "ckq": "Kajakse", + "ckr": "Kairak", + "cks": "Tayo", + "ckt": "Chukot", + "cku": "Koasati", + "ckv": "Kavalan", + "ckx": "Caka", + "cky": "Cakfem-Mushere", + "ckz": "Cakchiquel-Quiché Mixed Language", + "cla": "Ron", + "clc": "Chilcotin", + "cld": "Chaldean Neo-Aramaic", + "cle": "Lealao Chinantec", + "clh": "Chilisso", + "cli": "Chakali", + "clj": "Laitu Chin", + "clk": "Idu-Mishmi", + "cll": "Chala", + "clm": "Clallam", + "clo": "Lowland Oaxaca Chontal", + "clt": "Lautu Chin", + "clu": "Caluyanun", + "clw": "Chulym", + "cly": "Eastern Highland Chatino", + "cma": "Maa", + "cmc": "Chamic languages", + "cme": "Cerma", + "cmg": "Classical Mongolian", + "cmi": "Emberá-Chamí", + "cml": "Campalagian", + "cmm": "Michigamea", + "cmn": "Mandarin Chinese", + "cmo": "Central Mnong", + "cmr": "Mro-Khimi Chin", + "cms": "Messapic", + "cmt": "Camtho", + "cna": "Changthang", + "cnb": "Chinbon Chin", + "cnc": "Côông", + "cng": "Northern Qiang", + "cnh": "Hakha Chin; Haka Chin", + "cni": "Asháninka", + "cnk": "Khumi Chin", + "cnl": "Lalana Chinantec", + "cno": "Con", + "cnp": "Northern Ping Chinese; Northern Pinghua", + "cnq": "Chung", + "cnr": "Montenegrin", + "cns": "Central Asmat", + "cnt": "Tepetotutla Chinantec", + "cnu": "Chenoua", + "cnw": "Ngawn Chin", + "cnx": "Middle Cornish", + "co": "Corsican", + "coa": "Cocos Islands Malay", + "cob": "Chicomuceltec", + "coc": "Cocopa", + "cod": "Cocama-Cocamilla", + "coe": "Koreguaje", + "cof": "Colorado", + "cog": "Chong", + "coh": "Chonyi-Dzihana-Kauma; Chichonyi-Chidzihana-Chikauma", + "coj": "Cochimi", + "cok": "Santa Teresa Cora", + "col": "Columbia-Wenatchi", + "com": "Comanche", + "con": "Cofán", + "coo": "Comox", + "cop": "Coptic", + "coq": "Coquille", + "cot": "Caquinte", + "cou": "Wamey", + "cov": "Cao Miao", + "cow": "Cowlitz", + "cox": "Nanti", + "coz": "Chochotec", + "cpa": "Palantla Chinantec", + "cpb": "Ucayali-Yurúa Ashéninka", + "cpc": "Ajyíninka Apurucayali", + "cpe": "English-based creoles and pidgins", + "cpf": "French-based creoles and pidgins", + "cpg": "Cappadocian Greek", + "cpi": "Chinese Pidgin English", + "cpn": "Cherepon", + "cpo": "Kpeego", + "cpp": "Portuguese-based creoles and pidgins", + "cps": "Capiznon", + "cpu": "Pichis Ashéninka", + "cpx": "Pu-Xian Chinese", + "cpy": "South Ucayali Ashéninka", + "cqd": "Chuanqiandian Cluster Miao", + "cr": "Cree", + "cra": "Chara", + "crb": "Island Carib", + "crc": "Lonwolwol", + "crd": "Coeur d'Alene", + "crf": "Caramanta", + "crg": "Michif", + "crh": "Crimean Tatar; Crimean Turkish", + "cri": "Sãotomense", + "crj": "Southern East Cree", + "crk": "Plains Cree", + "crl": "Northern East Cree", + "crm": "Moose Cree", + "crn": "El Nayar Cora", + "cro": "Crow", + "crp": "Creoles and pidgins", + "crq": "Iyo'wujwa Chorote", + "crr": "Carolina Algonquian", + "crs": "Seselwa Creole French", + "crt": "Iyojwa'ja Chorote", + "crv": "Chaura", + "crw": "Chrau", + "crx": "Carrier", + "cry": "Cori", + "crz": "Cruzeño", + "cs": "Czech", + "csa": "Chiltepec Chinantec", + "csb": "Kashubian", + "csc": "Catalan Sign Language; Lengua de señas catalana; Llengua de Signes Catalana", + "csd": "Chiangmai Sign Language", + "cse": "Czech Sign Language", + "csf": "Cuba Sign Language", + "csg": "Chilean Sign Language", + "csh": "Asho Chin", + "csi": "Coast Miwok", + "csj": "Songlai Chin", + "csk": "Jola-Kasa", + "csl": "Chinese Sign Language", + "csm": "Central Sierra Miwok", + "csn": "Colombian Sign Language", + "cso": "Sochiapam Chinantec; Sochiapan Chinantec", + "csp": "Southern Ping Chinese; Southern Pinghua", + "csq": "Croatia Sign Language", + "csr": "Costa Rican Sign Language", + "css": "Southern Ohlone", + "cst": "Northern Ohlone", + "csu": "Central Sudanic languages", + "csv": "Sumtu Chin", + "csw": "Swampy Cree", + "csx": "Cambodian Sign Language", + "csy": "Siyin Chin", + "csz": "Coos", + "cta": "Tataltepec Chatino", + "ctc": "Chetco", + "ctd": "Tedim Chin", + "cte": "Tepinapa Chinantec", + "ctg": "Chittagonian", + "cth": "Thaiphum Chin", + "ctl": "Tlacoatzintepec Chinantec", + "ctm": "Chitimacha", + "ctn": "Chhintange", + "cto": "Emberá-Catío", + "ctp": "Western Highland Chatino", + "cts": "Northern Catanduanes Bikol", + "ctt": "Wayanad Chetti", + "ctu": "Chol", + "cty": "Moundadan Chetty", + "ctz": "Zacatepec Chatino", + "cu": "Church Slavic; Church Slavonic; Old Bulgarian; Old Church Slavonic; Old Slavonic", + "cua": "Cua", + "cub": "Cubeo", + "cuc": "Usila Chinantec", + "cuh": "Chuka; Gichuka", + "cui": "Cuiba", + "cuj": "Mashco Piro", + "cuk": "San Blas Kuna", + "cul": "Culina; Kulina", + "cuo": "Cumanagoto", + "cup": "Cupeño", + "cuq": "Cun", + "cur": "Chhulung", + "cus": "Cushitic languages", + "cut": "Teutila Cuicatec", + "cuu": "Tai Ya", + "cuv": "Cuvok", + "cuw": "Chukwa", + "cux": "Tepeuxila Cuicatec", + "cuy": "Cuitlatec", + "cv": "Chuvash", + "cvg": "Chug", + "cvn": "Valle Nacional Chinantec", + "cwa": "Kabwa", + "cwb": "Maindo", + "cwd": "Woods Cree", + "cwe": "Kwere", + "cwg": "Chewong; Cheq Wong", + "cwt": "Kuwaataay", + "cy": "Welsh", + "cya": "Nopala Chatino", + "cyb": "Cayubaba", + "cyo": "Cuyonon", + "czh": "Huizhou Chinese", + "czk": "Knaanic", + "czn": "Zenzontepec Chatino", + "czo": "Min Zhong Chinese", + "czt": "Zotung Chin", + "da": "Danish", + "daa": "Dangaléat", + "dac": "Dambi", + "dad": "Marik", + "dae": "Duupa", + "dag": "Dagbani", + "dah": "Gwahatike", + "dai": "Day", + "daj": "Dar Fur Daju", + "dak": "Dakota", + "dal": "Dahalo", + "dam": "Damakawa", + "dao": "Daai Chin", + "daq": "Dandami Maria", + "dar": "Dargwa", + "das": "Daho-Doo", + "dau": "Dar Sila Daju", + "dav": "Taita; Dawida", + "daw": "Davawenyo", + "dax": "Dayi", + "day": "Land Dayak languages", + "daz": "Dao", + "dba": "Bangime", + "dbb": "Deno", + "dbd": "Dadiya", + "dbe": "Dabe", + "dbf": "Edopi", + "dbg": "Dogul Dom Dogon", + "dbi": "Doka", + "dbj": "Ida'an", + "dbl": "Dyirbal", + "dbm": "Duguri", + "dbn": "Duriankere", + "dbo": "Dulbu", + "dbp": "Duwai", + "dbq": "Daba", + "dbr": "Dabarre", + "dbt": "Ben Tey Dogon", + "dbu": "Bondum Dom Dogon", + "dbv": "Dungu", + "dbw": "Bankan Tey Dogon", + "dby": "Dibiyaso", + "dcc": "Deccan", + "dcr": "Negerhollands", + "dda": "Dadi Dadi", + "ddd": "Dongotono", + "dde": "Doondo", + "ddg": "Fataluku", + "ddi": "West Goodenough", + "ddj": "Jaru", + "ddn": "Dendi (Benin)", + "ddo": "Dido", + "ddr": "Dhudhuroa", + "dds": "Donno So Dogon", + "ddw": "Dawera-Daweloor", + "de": "German", + "dec": "Dagik", + "ded": "Dedua", + "dee": "Dewoin", + "def": "Dezfuli", + "deg": "Degema", + "deh": "Dehwari", + "dei": "Demisa", + "dek": "Dek", + "del": "Delaware", + "dem": "Dem", + "den": "Slave (Athapascan)", + "dep": "Pidgin Delaware", + "deq": "Dendi (Central African Republic)", + "der": "Deori", + "des": "Desano", + "dev": "Domung", + "dez": "Dengese", + "dga": "Southern Dagaare", + "dgb": "Bunoge Dogon", + "dgc": "Casiguran Dumagat Agta", + "dgd": "Dagaari Dioula", + "dge": "Degenan", + "dgg": "Doga", + "dgh": "Dghwede", + "dgi": "Northern Dagara", + "dgk": "Dagba", + "dgl": "Andaandi; Dongolawi", + "dgn": "Dagoman", + "dgo": "Dogri (individual language)", + "dgr": "Dogrib; Tłı̨chǫ", + "dgs": "Dogoso", + "dgt": "Ndra'ngith", + "dgw": "Daungwurrung", + "dgx": "Doghoro", + "dgz": "Daga", + "dhd": "Dhundari", + "dhg": "Dhangu-Djangu; Dhangu; Djangu", + "dhi": "Dhimal", + "dhl": "Dhalandji", + "dhm": "Zemba", + "dhn": "Dhanki", + "dho": "Dhodia", + "dhr": "Dhargari", + "dhs": "Dhaiso", + "dhu": "Dhurga", + "dhv": "Dehu; Drehu", + "dhw": "Dhanwar (Nepal)", + "dhx": "Dhungaloo", + "dia": "Dia", + "dib": "South Central Dinka", + "dic": "Lakota Dida", + "did": "Didinga", + "dif": "Dieri; Diyari", + "dig": "Digo; Chidigo", + "dih": "Kumiai", + "dii": "Dimbong", + "dij": "Dai", + "dik": "Southwestern Dinka", + "dil": "Dilling", + "dim": "Dime", + "din": "Dinka", + "dio": "Dibo", + "dip": "Northeastern Dinka", + "diq": "Dimli (individual language)", + "dir": "Dirim", + "dis": "Dimasa", + "diu": "Diriku", + "diw": "Northwestern Dinka", + "dix": "Dixon Reef", + "diy": "Diuwe", + "diz": "Ding", + "dja": "Djadjawurrung", + "djb": "Djinba", + "djc": "Dar Daju Daju", + "djd": "Djamindjung; Ngaliwurru", + "dje": "Zarma", + "djf": "Djangun", + "dji": "Djinang", + "djj": "Djeebbana", + "djk": "Eastern Maroon Creole; Businenge Tongo; Nenge", + "djm": "Jamsay Dogon", + "djn": "Jawoyn; Djauan", + "djo": "Jangkang", + "djr": "Djambarrpuyngu", + "dju": "Kapriman", + "djw": "Djawi", + "dka": "Dakpakha", + "dkg": "Kadung", + "dkk": "Dakka", + "dkr": "Kuijau", + "dks": "Southeastern Dinka", + "dkx": "Mazagway", + "dlg": "Dolgan", + "dlk": "Dahalik", + "dlm": "Dalmatian", + "dln": "Darlong", + "dma": "Duma", + "dmb": "Mombo Dogon", + "dmc": "Gavak", + "dmd": "Madhi Madhi", + "dme": "Dugwor", + "dmf": "Medefaidrin", + "dmg": "Upper Kinabatangan", + "dmk": "Domaaki", + "dml": "Dameli", + "dmm": "Dama", + "dmn": "Mande languages", + "dmo": "Kemedzung", + "dmr": "East Damar", + "dms": "Dampelas", + "dmu": "Dubu; Tebi", + "dmv": "Dumpas", + "dmw": "Mudburra", + "dmx": "Dema", + "dmy": "Demta; Sowari", + "dna": "Upper Grand Valley Dani", + "dnd": "Daonda", + "dne": "Ndendeule", + "dng": "Dungan", + "dni": "Lower Grand Valley Dani", + "dnj": "Dan", + "dnk": "Dengka", + "dnn": "Dzùùngoo", + "dno": "Ndrulo; Northern Lendu", + "dnr": "Danaru", + "dnt": "Mid Grand Valley Dani", + "dnu": "Danau", + "dnv": "Danu", + "dnw": "Western Dani", + "dny": "Dení", + "doa": "Dom", + "dob": "Dobu", + "doc": "Northern Dong", + "doe": "Doe", + "dof": "Domu", + "doh": "Dong", + "doi": "Dogri (macrolanguage)", + "dok": "Dondo", + "dol": "Doso", + "don": "Toura (Papua New Guinea)", + "doo": "Dongo", + "dop": "Lukpa", + "doq": "Dominican Sign Language", + "dor": "Dori'o", + "dos": "Dogosé", + "dot": "Dass", + "dov": "Dombe", + "dow": "Doyayo", + "dox": "Bussa", + "doy": "Dompo", + "doz": "Dorze", + "dpp": "Papar", + "dra": "Dravidian languages", + "drb": "Dair", + "drc": "Minderico", + "drd": "Darmiya", + "dre": "Dolpo", + "drg": "Rungus", + "dri": "C'Lela", + "drl": "Paakantyi", + "drn": "West Damar", + "dro": "Daro-Matu Melanau", + "drq": "Dura", + "drs": "Gedeo", + "drt": "Drents", + "dru": "Rukai", + "dry": "Darai", + "dsb": "Lower Sorbian", + "dse": "Dutch Sign Language", + "dsh": "Daasanach", + "dsi": "Disa", + "dsl": "Danish Sign Language", + "dsn": "Dusner", + "dso": "Desiya", + "dsq": "Tadaksahak", + "dsz": "Mardin Sign Language", + "dta": "Daur", + "dtb": "Labuk-Kinabatangan Kadazan", + "dtd": "Ditidaht", + "dth": "Adithinngithigh", + "dti": "Ana Tinga Dogon", + "dtk": "Tene Kan Dogon", + "dtm": "Tomo Kan Dogon", + "dtn": "Daatsʼíin", + "dto": "Tommo So Dogon", + "dtp": "Kadazan Dusun; Central Dusun", + "dtr": "Lotud", + "dts": "Toro So Dogon", + "dtt": "Toro Tegu Dogon", + "dtu": "Tebul Ure Dogon", + "dty": "Dotyali", + "dua": "Duala", + "dub": "Dubli", + "duc": "Duna", + "due": "Umiray Dumaget Agta", + "duf": "Dumbea; Drubea", + "dug": "Duruma; Chiduruma", + "duh": "Dungra Bhil", + "dui": "Dumun", + "duk": "Uyajitaya", + "dul": "Alabat Island Agta", + "dum": "Middle Dutch (ca. 1050-1350)", + "dun": "Dusun Deyah", + "duo": "Dupaninan Agta", + "dup": "Duano", + "duq": "Dusun Malang", + "dur": "Dii", + "dus": "Dumi", + "duu": "Drung", + "duv": "Duvle", + "duw": "Dusun Witu", + "dux": "Duungooma", + "duy": "Dicamay Agta", + "duz": "Duli-Gey", + "dv": "Dhivehi; Divehi; Maldivian", + "dva": "Duau", + "dwa": "Diri", + "dwk": "Dawik Kui", + "dwr": "Dawro", + "dws": "Dutton World Speedwords", + "dwu": "Dhuwal", + "dww": "Dawawa", + "dwy": "Dhuwaya", + "dwz": "Dewas Rai", + "dya": "Dyan", + "dyb": "Dyaberdyaber", + "dyd": "Dyugun", + "dyg": "Villa Viciosa Agta", + "dyi": "Djimini Senoufo", + "dym": "Yanda Dom Dogon", + "dyn": "Dyangadi; Dhanggatti", + "dyo": "Jola-Fonyi", + "dyu": "Dyula", + "dyy": "Djabugay; Dyaabugay", + "dz": "Dzongkha", + "dza": "Tunzu", + "dze": "Djiwarli", + "dzg": "Dazaga", + "dzl": "Dzalakha", + "dzn": "Dzando", + "eaa": "Karenggapa", + "ebc": "Beginci", + "ebg": "Ebughu", + "ebk": "Eastern Bontok", + "ebo": "Teke-Ebo", + "ebr": "Ebrié", + "ebu": "Embu; Kiembu", + "ecr": "Eteocretan", + "ecs": "Ecuadorian Sign Language", + "ecy": "Eteocypriot", + "ee": "Ewe", + "eee": "E", + "efa": "Efai", + "efe": "Efe", + "efi": "Efik", + "ega": "Ega", + "egl": "Emilian", + "egm": "Benamanga", + "ego": "Eggon", + "egx": "Egyptian languages", + "egy": "Egyptian (Ancient)", + "ehs": "Miyakubo Sign Language", + "ehu": "Ehueun", + "eip": "Eipomek", + "eit": "Eitiep", + "eiv": "Askopan", + "eja": "Ejamat", + "eka": "Ekajuk", + "eke": "Ekit", + "ekg": "Ekari", + "eki": "Eki", + "ekk": "Standard Estonian", + "ekl": "Kol (Bangladesh); Kol", + "ekm": "Elip", + "eko": "Koti", + "ekp": "Ekpeye", + "ekr": "Yace", + "eky": "Eastern Kayah", + "el": "Modern Greek (1453-)", + "ele": "Elepi", + "elh": "El Hugeirat", + "eli": "Nding", + "elk": "Elkei", + "elm": "Eleme", + "elo": "El Molo", + "elu": "Elu", + "elx": "Elamite", + "ema": "Emai-Iuleha-Ora", + "emb": "Embaloh", + "eme": "Emerillon", + "emg": "Eastern Meohang", + "emi": "Mussau-Emira", + "emk": "Eastern Maninkakan", + "emm": "Mamulique", + "emn": "Eman", + "emp": "Northern Emberá", + "emq": "Eastern Minyag", + "ems": "Pacific Gulf Yupik", + "emu": "Eastern Muria", + "emw": "Emplawas", + "emx": "Erromintxela", + "emy": "Epigraphic Mayan", + "emz": "Mbessa", + "en": "English", + "ena": "Apali", + "enb": "Markweeta", + "enc": "En", + "end": "Ende", + "enf": "Forest Enets", + "enh": "Tundra Enets", + "enl": "Enlhet", + "enm": "Middle English (1100-1500)", + "enn": "Engenni", + "eno": "Enggano", + "enq": "Enga", + "enr": "Emumu; Emem", + "enu": "Enu", + "env": "Enwan (Edo State)", + "enw": "Enwan (Akwa Ibom State)", + "enx": "Enxet", + "eo": "Esperanto", + "eot": "Beti (Côte d'Ivoire)", + "epi": "Epie", + "era": "Eravallan", + "erg": "Sie", + "erh": "Eruwa", + "eri": "Ogea", + "erk": "South Efate", + "ero": "Horpa", + "err": "Erre", + "ers": "Ersu", + "ert": "Eritai", + "erw": "Erokwanas", + "es": "Spanish; Castilian", + "ese": "Ese Ejja", + "esg": "Aheri Gondi", + "esh": "Eshtehardi", + "esi": "North Alaskan Inupiatun", + "esk": "Northwest Alaska Inupiatun", + "esl": "Egypt Sign Language", + "esm": "Esuma", + "esn": "Salvadoran Sign Language", + "eso": "Estonian Sign Language", + "esq": "Esselen", + "ess": "Central Siberian Yupik", + "esu": "Central Yupik", + "esx": "Eskimo-Aleut languages", + "esy": "Eskayan", + "et": "Estonian", + "etb": "Etebi", + "etc": "Etchemin", + "eth": "Ethiopian Sign Language", + "etn": "Eton (Vanuatu)", + "eto": "Eton (Cameroon)", + "etr": "Edolo", + "ets": "Yekhee", + "ett": "Etruscan", + "etu": "Ejagham", + "etx": "Eten", + "etz": "Semimi", + "eu": "Basque", + "euq": "Basque (family)", + "eve": "Even", + "evh": "Uvbie", + "evn": "Evenki", + "ewo": "Ewondo", + "ext": "Extremaduran", + "eya": "Eyak", + "eyo": "Keiyo", + "eza": "Ezaa", + "eze": "Uzekwe", + "fa": "Persian", + "faa": "Fasu", + "fab": "Fa d'Ambu", + "fad": "Wagi", + "faf": "Fagani", + "fag": "Finongan", + "fah": "Baissa Fali", + "fai": "Faiwol", + "faj": "Faita", + "fak": "Fang (Cameroon)", + "fal": "South Fali", + "fam": "Fam", + "fan": "Fang (Equatorial Guinea)", + "fap": "Paloor", + "far": "Fataleka", + "fat": "Fanti", + "fau": "Fayu", + "fax": "Fala", + "fay": "Southwestern Fars", + "faz": "Northwestern Fars", + "fbl": "West Albay Bikol", + "fcs": "Quebec Sign Language", + "fer": "Feroge", + "ff": "Fulah", + "ffi": "Foia Foia", + "ffm": "Maasina Fulfulde", + "fgr": "Fongoro", + "fi": "Finnish", + "fia": "Nobiin", + "fie": "Fyer", + "fif": "Faifi", + "fil": "Filipino; Pilipino", + "fip": "Fipa", + "fir": "Firan", + "fit": "Tornedalen Finnish; Meänkieli", + "fiu": "Finno-Ugrian languages", + "fiw": "Fiwaga", + "fj": "Fijian", + "fkk": "Kirya-Konzəl", + "fkv": "Kven Finnish", + "fla": "Kalispel-Pend d'Oreille", + "flh": "Foau", + "fli": "Fali", + "fll": "North Fali", + "fln": "Flinders Island", + "flr": "Fuliiru", + "fly": "Flaaitaal; Tsotsitaal", + "fmp": "Fe'fe'", + "fmu": "Far Western Muria", + "fnb": "Fanbak", + "fng": "Fanagalo", + "fni": "Fania", + "fo": "Faroese", + "fod": "Foodo", + "foi": "Foi", + "fom": "Foma", + "fon": "Fon", + "for": "Fore", + "fos": "Siraya", + "fox": "Formosan languages", + "fpe": "Fernando Po Creole English", + "fqs": "Fas", + "fr": "French", + "frc": "Cajun French", + "frd": "Fordata", + "frk": "Frankish", + "frm": "Middle French (ca. 1400-1600)", + "fro": "Old French (842-ca. 1400)", + "frp": "Arpitan; Francoprovençal", + "frq": "Forak", + "frr": "Northern Frisian", + "frs": "Eastern Frisian", + "frt": "Fortsenal", + "fse": "Finnish Sign Language", + "fsl": "French Sign Language", + "fss": "Finland-Swedish Sign Language; finlandssvenskt teckenspråk; suomenruotsalainen viittomakieli", + "fub": "Adamawa Fulfulde", + "fuc": "Pulaar", + "fud": "East Futuna", + "fue": "Borgu Fulfulde", + "fuf": "Pular", + "fuh": "Western Niger Fulfulde", + "fui": "Bagirmi Fulfulde", + "fuj": "Ko", + "fum": "Fum", + "fun": "Fulniô", + "fuq": "Central-Eastern Niger Fulfulde", + "fur": "Friulian", + "fut": "Futuna-Aniwa", + "fuu": "Furu", + "fuv": "Nigerian Fulfulde", + "fuy": "Fuyug", + "fvr": "Fur", + "fwa": "Fwâi", + "fwe": "Fwe", + "fy": "Western Frisian", + "ga": "Irish", + "gaa": "Ga", + "gab": "Gabri", + "gac": "Mixed Great Andamanese", + "gad": "Gaddang", + "gae": "Guarequena", + "gaf": "Gende", + "gag": "Gagauz", + "gah": "Alekano", + "gai": "Borei", + "gaj": "Gadsup", + "gak": "Gamkonora", + "gal": "Galolen", + "gam": "Kandawo", + "gan": "Gan Chinese", + "gao": "Gants", + "gap": "Gal", + "gaq": "Gata'", + "gar": "Galeya", + "gas": "Adiwasi Garasia", + "gat": "Kenati", + "gau": "Mudhili Gadaba", + "gaw": "Nobonob", + "gax": "Borana-Arsi-Guji Oromo", + "gay": "Gayo", + "gaz": "West Central Oromo", + "gba": "Gbaya (Central African Republic)", + "gbb": "Kaytetye", + "gbd": "Karajarri", + "gbe": "Niksek", + "gbf": "Gaikundi", + "gbg": "Gbanziri", + "gbh": "Defi Gbe", + "gbi": "Galela", + "gbj": "Bodo Gadaba", + "gbk": "Gaddi", + "gbl": "Gamit", + "gbm": "Garhwali", + "gbn": "Mo'da", + "gbo": "Northern Grebo", + "gbp": "Gbaya-Bossangoa", + "gbq": "Gbaya-Bozoum", + "gbr": "Gbagyi", + "gbs": "Gbesi Gbe", + "gbu": "Gagadu", + "gbv": "Gbanu", + "gbw": "Gabi-Gabi", + "gbx": "Eastern Xwla Gbe", + "gby": "Gbari", + "gbz": "Zoroastrian Dari", + "gcc": "Mali", + "gcd": "Ganggalida", + "gce": "Galice", + "gcf": "Guadeloupean Creole French", + "gcl": "Grenadian Creole English", + "gcn": "Gaina", + "gcr": "Guianese Creole French", + "gct": "Colonia Tovar German", + "gd": "Scottish Gaelic; Gaelic", + "gda": "Gade Lohar", + "gdb": "Pottangi Ollar Gadaba", + "gdc": "Gugu Badhun", + "gdd": "Gedaged", + "gde": "Gude", + "gdf": "Guduf-Gava", + "gdg": "Ga'dang", + "gdh": "Gadjerawang; Gajirrabeng", + "gdi": "Gundi", + "gdj": "Gurdjar", + "gdk": "Gadang", + "gdl": "Dirasha", + "gdm": "Laal", + "gdn": "Umanakaina", + "gdo": "Ghodoberi", + "gdq": "Mehri", + "gdr": "Wipi", + "gds": "Ghandruk Sign Language", + "gdt": "Kungardutyi", + "gdu": "Gudu", + "gdx": "Godwari", + "gea": "Geruma", + "geb": "Kire", + "gec": "Gboloo Grebo", + "ged": "Gade", + "gef": "Gerai", + "geg": "Gengle", + "geh": "Hutterite German; Hutterisch", + "gei": "Gebe", + "gej": "Gen", + "gek": "Ywom", + "gel": "ut-Ma'in", + "gem": "Germanic languages", + "geq": "Geme", + "ges": "Geser-Gorom", + "gev": "Eviya", + "gew": "Gera", + "gex": "Garre", + "gey": "Enya", + "gez": "Geez", + "gfk": "Patpatar", + "gft": "Gafat", + "gga": "Gao", + "ggb": "Gbii", + "ggd": "Gugadj", + "gge": "Gurr-goni", + "ggg": "Gurgula", + "ggk": "Kungarakany", + "ggl": "Ganglau", + "ggt": "Gitua", + "ggu": "Gagu; Gban", + "ggw": "Gogodala", + "gha": "Ghadamès", + "ghc": "Hiberno-Scottish Gaelic", + "ghe": "Southern Ghale", + "ghh": "Northern Ghale", + "ghk": "Geko Karen", + "ghl": "Ghulfan", + "ghn": "Ghanongga", + "gho": "Ghomara", + "ghr": "Ghera", + "ghs": "Guhu-Samane", + "ght": "Kuke; Kutang Ghale", + "gia": "Kija", + "gib": "Gibanawa", + "gic": "Gail", + "gid": "Gidar", + "gie": "Gaɓogbo; Guébie", + "gig": "Goaria", + "gih": "Githabul", + "gii": "Girirra", + "gil": "Gilbertese", + "gim": "Gimi (Eastern Highlands)", + "gin": "Hinukh", + "gip": "Gimi (West New Britain)", + "giq": "Green Gelao", + "gir": "Red Gelao", + "gis": "North Giziga", + "git": "Gitxsan", + "giu": "Mulao", + "giw": "White Gelao", + "gix": "Gilima", + "giy": "Giyug", + "giz": "South Giziga", + "gjk": "Kachi Koli", + "gjm": "Gunditjmara", + "gjn": "Gonja", + "gjr": "Gurindji Kriol", + "gju": "Gujari", + "gka": "Guya", + "gkd": "Magɨ (Madang Province)", + "gke": "Ndai", + "gkn": "Gokana", + "gko": "Kok-Nar", + "gkp": "Guinea Kpelle", + "gku": "ǂUngkue", + "gl": "Galician", + "glb": "Belning", + "glc": "Bon Gula", + "gld": "Nanai", + "glh": "Northwest Pashai; Northwest Pashayi", + "glj": "Gula Iro", + "glk": "Gilaki", + "gll": "Garlali", + "glo": "Galambu", + "glr": "Glaro-Twabo", + "glu": "Gula (Chad)", + "glw": "Glavda", + "gly": "Gule", + "gma": "Gambera", + "gmb": "Gula'alaa", + "gmd": "Mághdì", + "gme": "East Germanic languages", + "gmg": "Magɨyi", + "gmh": "Middle High German (ca. 1050-1500)", + "gml": "Middle Low German", + "gmm": "Gbaya-Mbodomo", + "gmn": "Gimnime", + "gmq": "North Germanic languages", + "gmr": "Mirning; Mirniny", + "gmu": "Gumalu", + "gmv": "Gamo", + "gmw": "West Germanic languages", + "gmx": "Magoma", + "gmy": "Mycenaean Greek", + "gmz": "Mgbolizhia", + "gn": "Guarani", + "gna": "Kaansa", + "gnb": "Gangte", + "gnc": "Guanche", + "gnd": "Zulgo-Gemzek", + "gne": "Ganang", + "gng": "Ngangam", + "gnh": "Lere", + "gni": "Gooniyandi", + "gnj": "Ngen", + "gnk": "ǁGana", + "gnl": "Gangulu", + "gnm": "Ginuman", + "gnn": "Gumatj", + "gno": "Northern Gondi", + "gnq": "Gana", + "gnr": "Gureng Gureng", + "gnt": "Guntai", + "gnu": "Gnau", + "gnw": "Western Bolivian Guaraní", + "gnz": "Ganzi", + "goa": "Guro", + "gob": "Playero", + "goc": "Gorakor", + "god": "Godié", + "goe": "Gongduk", + "gof": "Gofa", + "gog": "Gogo", + "goh": "Old High German (ca. 750-1050)", + "goi": "Gobasi", + "goj": "Gowlan", + "gok": "Gowli", + "gol": "Gola", + "gom": "Goan Konkani", + "gon": "Gondi", + "goo": "Gone Dau", + "gop": "Yeretuar", + "goq": "Gorap", + "gor": "Gorontalo", + "gos": "Gronings", + "got": "Gothic", + "gou": "Gavar", + "gov": "Goo", + "gow": "Gorowa", + "gox": "Gobu", + "goy": "Goundo", + "goz": "Gozarkhani", + "gpa": "Gupa-Abawa", + "gpe": "Ghanaian Pidgin English", + "gpn": "Taiap", + "gqa": "Ga'anda", + "gqi": "Guiqiong", + "gqn": "Guana (Brazil)", + "gqr": "Gor", + "gqu": "Qau", + "gra": "Rajput Garasia", + "grb": "Grebo", + "grc": "Ancient Greek (to 1453)", + "grd": "Guruntum-Mbaaru", + "grg": "Madi", + "grh": "Gbiri-Niragu", + "gri": "Ghari", + "grj": "Southern Grebo", + "grk": "Greek languages", + "grm": "Kota Marudu Talantang", + "gro": "Groma", + "grq": "Gorovu", + "grr": "Taznatit", + "grs": "Gresi", + "grt": "Garo", + "gru": "Kistane", + "grv": "Central Grebo", + "grw": "Gweda", + "grx": "Guriaso", + "gry": "Barclayville Grebo", + "grz": "Guramalum", + "gse": "Ghanaian Sign Language", + "gsg": "German Sign Language", + "gsl": "Gusilay", + "gsm": "Guatemalan Sign Language", + "gsn": "Nema; Gusan", + "gso": "Southwest Gbaya", + "gsp": "Wasembo", + "gss": "Greek Sign Language", + "gsw": "Swiss German; Alemannic; Alsatian", + "gta": "Guató", + "gtu": "Aghu-Tharnggala", + "gu": "Gujarati", + "gua": "Shiki", + "gub": "Guajajára", + "guc": "Wayuu", + "gud": "Yocoboué Dida", + "gue": "Gurindji", + "guf": "Gupapuyngu", + "gug": "Paraguayan Guaraní", + "guh": "Guahibo", + "gui": "Eastern Bolivian Guaraní", + "guk": "Gumuz", + "gul": "Sea Island Creole English", + "gum": "Guambiano", + "gun": "Mbyá Guaraní", + "guo": "Guayabero", + "gup": "Gunwinggu", + "guq": "Aché", + "gur": "Farefare", + "gus": "Guinean Sign Language", + "gut": "Maléku Jaíka", + "guu": "Yanomamö", + "guw": "Gun", + "gux": "Gourmanchéma", + "guz": "Gusii; Ekegusii", + "gv": "Manx", + "gva": "Guana (Paraguay)", + "gvc": "Guanano", + "gve": "Duwet", + "gvf": "Golin", + "gvj": "Guajá", + "gvl": "Gulay", + "gvm": "Gurmana", + "gvn": "Kuku-Yalanji", + "gvo": "Gavião Do Jiparaná", + "gvp": "Pará Gavião", + "gvr": "Gurung", + "gvs": "Gumawana", + "gvy": "Guyani", + "gwa": "Mbato", + "gwb": "Gwa", + "gwc": "Gawri; Kalami", + "gwd": "Gawwada", + "gwe": "Gweno", + "gwf": "Gowro", + "gwg": "Moo", + "gwi": "Gwichʼin", + "gwj": "ǀGwi", + "gwm": "Awngthim", + "gwn": "Gwandara", + "gwr": "Gwere", + "gwt": "Gawar-Bati", + "gwu": "Guwamu", + "gww": "Kwini", + "gwx": "Gua", + "gxx": "Wè Southern", + "gya": "Northwest Gbaya", + "gyb": "Garus", + "gyd": "Kayardild", + "gye": "Gyem", + "gyf": "Gungabula", + "gyg": "Gbayi", + "gyi": "Gyele", + "gyl": "Gayil", + "gym": "Ngäbere", + "gyn": "Guyanese Creole English", + "gyo": "Gyalsumdo", + "gyr": "Guarayu", + "gyy": "Gunya", + "gyz": "Geji; Gyaazi", + "gza": "Ganza", + "gzi": "Gazi", + "gzn": "Gane", + "ha": "Hausa", + "haa": "Han", + "hab": "Hanoi Sign Language", + "hac": "Gurani", + "had": "Hatam", + "hae": "Eastern Oromo", + "haf": "Haiphong Sign Language", + "hag": "Hanga", + "hah": "Hahon", + "hai": "Haida", + "haj": "Hajong", + "hak": "Hakka Chinese", + "hal": "Halang", + "ham": "Hewa", + "han": "Hangaza", + "hao": "Hakö", + "hap": "Hupla", + "haq": "Ha", + "har": "Harari", + "has": "Haisla", + "hav": "Havu", + "haw": "Hawaiian", + "hax": "Southern Haida", + "hay": "Haya", + "haz": "Hazaragi", + "hba": "Hamba", + "hbb": "Huba", + "hbn": "Heiban", + "hbo": "Ancient Hebrew", + "hbu": "Habu", + "hca": "Andaman Creole Hindi", + "hch": "Huichol", + "hdn": "Northern Haida", + "hds": "Honduras Sign Language", + "hdy": "Hadiyya", + "he": "Hebrew", + "hea": "Northern Qiandong Miao", + "hed": "Herdé", + "heg": "Helong", + "heh": "Hehe", + "hei": "Heiltsuk", + "hem": "Hemba", + "hgm": "Haiǁom", + "hgw": "Haigwai", + "hhi": "Hoia Hoia", + "hhr": "Kerak", + "hhy": "Hoyahoya", + "hi": "Hindi", + "hia": "Lamang", + "hib": "Hibito", + "hid": "Hidatsa", + "hif": "Fiji Hindi", + "hig": "Kamwe", + "hih": "Pamosu", + "hii": "Hinduri", + "hij": "Hijuk", + "hik": "Seit-Kaitetu", + "hil": "Hiligaynon", + "him": "Himachali languages; Western Pahari languages", + "hio": "Tsoa", + "hir": "Himarimã", + "hit": "Hittite", + "hiw": "Hiw", + "hix": "Hixkaryána", + "hji": "Haji", + "hka": "Kahe", + "hke": "Hunde", + "hkh": "Khah; Poguli", + "hkk": "Hunjara-Kaina Ke", + "hkn": "Mel-Khaonh", + "hks": "Hong Kong Sign Language; Heung Kong Sau Yue", + "hla": "Halia", + "hlb": "Halbi", + "hld": "Halang Doan", + "hle": "Hlersu", + "hlt": "Matu Chin", + "hlu": "Hieroglyphic Luwian", + "hma": "Southern Mashan Hmong; Southern Mashan Miao", + "hmb": "Humburi Senni Songhay", + "hmc": "Central Huishui Hmong; Central Huishui Miao", + "hmd": "Large Flowery Miao; A-hmaos; Da-Hua Miao", + "hme": "Eastern Huishui Hmong; Eastern Huishui Miao", + "hmf": "Hmong Don", + "hmg": "Southwestern Guiyang Hmong", + "hmh": "Southwestern Huishui Hmong; Southwestern Huishui Miao", + "hmi": "Northern Huishui Hmong; Northern Huishui Miao", + "hmj": "Ge; Gejia", + "hmk": "Maek", + "hml": "Luopohe Hmong; Luopohe Miao", + "hmm": "Central Mashan Hmong; Central Mashan Miao", + "hmn": "Hmong; Mong", + "hmp": "Northern Mashan Hmong; Northern Mashan Miao", + "hmq": "Eastern Qiandong Miao", + "hmr": "Hmar", + "hms": "Southern Qiandong Miao", + "hmt": "Hamtai", + "hmu": "Hamap", + "hmv": "Hmong Dô", + "hmw": "Western Mashan Hmong; Western Mashan Miao", + "hmx": "Hmong-Mien languages", + "hmy": "Southern Guiyang Hmong; Southern Guiyang Miao", + "hmz": "Hmong Shua; Sinicized Miao", + "hna": "Mina (Cameroon)", + "hnd": "Southern Hindko", + "hne": "Chhattisgarhi", + "hng": "Hungu", + "hnh": "ǁAni", + "hni": "Hani", + "hnj": "Hmong Njua; Mong Leng; Mong Njua", + "hnn": "Hanunoo", + "hno": "Northern Hindko", + "hns": "Caribbean Hindustani", + "hnu": "Hung", + "ho": "Hiri Motu", + "hoa": "Hoava", + "hob": "Mari (Madang Province)", + "hoc": "Ho", + "hod": "Holma", + "hoe": "Horom", + "hoh": "Hobyót", + "hoi": "Holikachuk", + "hoj": "Hadothi; Haroti", + "hok": "Hokan languages", + "hol": "Holu", + "hom": "Homa", + "hoo": "Holoholo", + "hop": "Hopi", + "hor": "Horo", + "hos": "Ho Chi Minh City Sign Language", + "hot": "Hote; Malê", + "hov": "Hovongan", + "how": "Honi", + "hoy": "Holiya", + "hoz": "Hozo", + "hpo": "Hpon", + "hps": "Hawai'i Sign Language (HSL); Hawai'i Pidgin Sign Language", + "hr": "Croatian", + "hra": "Hrangkhol", + "hrc": "Niwer Mil", + "hre": "Hre", + "hrk": "Haruku", + "hrm": "Horned Miao", + "hro": "Haroi", + "hrp": "Nhirrpi", + "hrt": "Hértevin", + "hru": "Hruso", + "hrw": "Warwar Feni", + "hrx": "Hunsrik", + "hrz": "Harzani", + "hsb": "Upper Sorbian", + "hsh": "Hungarian Sign Language", + "hsl": "Hausa Sign Language", + "hsn": "Xiang Chinese", + "hss": "Harsusi", + "ht": "Haitian; Haitian Creole", + "hti": "Hoti", + "hto": "Minica Huitoto", + "hts": "Hadza", + "htu": "Hitu", + "htx": "Middle Hittite", + "hu": "Hungarian", + "hub": "Huambisa", + "huc": "ǂHua; ǂʼAmkhoe", + "hud": "Huaulu", + "hue": "San Francisco Del Mar Huave", + "huf": "Humene", + "hug": "Huachipaeri", + "huh": "Huilliche", + "hui": "Huli", + "huj": "Northern Guiyang Hmong; Northern Guiyang Miao", + "huk": "Hulung", + "hul": "Hula", + "hum": "Hungana", + "huo": "Hu", + "hup": "Hupa", + "huq": "Tsat", + "hur": "Halkomelem", + "hus": "Huastec", + "hut": "Humla", + "huu": "Murui Huitoto", + "huv": "San Mateo Del Mar Huave", + "huw": "Hukumina", + "hux": "Nüpode Huitoto", + "huy": "Hulaulá", + "huz": "Hunzib", + "hvc": "Haitian Vodoun Culture Language", + "hve": "San Dionisio Del Mar Huave", + "hvk": "Haveke", + "hvn": "Sabu", + "hvv": "Santa María Del Mar Huave", + "hwa": "Wané", + "hwc": "Hawai'i Creole English; Hawai'i Pidgin", + "hwo": "Hwana", + "hy": "Armenian", + "hya": "Hya", + "hyw": "Western Armenian", + "hyx": "Armenian (family)", + "hz": "Herero", + "ia": "Interlingua (International Auxiliary Language Association)", + "iai": "Iaai", + "ian": "Iatmul", + "iar": "Purari", + "iba": "Iban", + "ibb": "Ibibio", + "ibd": "Iwaidja", + "ibe": "Akpes", + "ibg": "Ibanag", + "ibh": "Bih", + "ibl": "Ibaloi", + "ibm": "Agoi", + "ibn": "Ibino", + "ibr": "Ibuoro", + "ibu": "Ibu", + "iby": "Ibani", + "ica": "Ede Ica", + "ich": "Etkywan", + "icl": "Icelandic Sign Language", + "icr": "Islander Creole English", + "id": "Indonesian", + "ida": "Idakho-Isukha-Tiriki; Luidakho-Luisukha-Lutirichi", + "idb": "Indo-Portuguese", + "idc": "Idon; Ajiya", + "idd": "Ede Idaca", + "ide": "Idere", + "idi": "Idi", + "idr": "Indri", + "ids": "Idesa", + "idt": "Idaté", + "idu": "Idoma", + "ie": "Interlingue; Occidental", + "ifa": "Amganad Ifugao", + "ifb": "Batad Ifugao; Ayangan Ifugao", + "ife": "Ifè", + "iff": "Ifo", + "ifk": "Tuwali Ifugao", + "ifm": "Teke-Fuumu", + "ifu": "Mayoyao Ifugao", + "ify": "Keley-I Kallahan", + "ig": "Igbo", + "igb": "Ebira", + "ige": "Igede", + "igg": "Igana", + "igl": "Igala", + "igm": "Kanggape", + "ign": "Ignaciano", + "igo": "Isebe", + "igs": "Interglossa", + "igw": "Igwe", + "ihb": "Iha Based Pidgin", + "ihi": "Ihievbe", + "ihp": "Iha", + "ihw": "Bidhawal", + "ii": "Sichuan Yi; Nuosu", + "iin": "Thiin", + "iir": "Indo-Iranian languages", + "ijc": "Izon", + "ije": "Biseni", + "ijj": "Ede Ije", + "ijn": "Kalabari", + "ijo": "Ijo languages", + "ijs": "Southeast Ijo", + "ik": "Inupiaq", + "ike": "Eastern Canadian Inuktitut", + "iki": "Iko", + "ikk": "Ika", + "ikl": "Ikulu", + "iko": "Olulumo-Ikom", + "ikp": "Ikpeshi", + "ikr": "Ikaranggal", + "iks": "Inuit Sign Language", + "ikt": "Inuinnaqtun; Western Canadian Inuktitut", + "ikv": "Iku-Gora-Ankwa", + "ikw": "Ikwere", + "ikx": "Ik", + "ikz": "Ikizu", + "ila": "Ile Ape", + "ilb": "Ila", + "ilg": "Garig-Ilgar", + "ili": "Ili Turki", + "ilk": "Ilongot", + "ilm": "Iranun (Malaysia)", + "ilo": "Iloko", + "ilp": "Iranun (Philippines)", + "ils": "International Sign", + "ilu": "Ili'uun", + "ilv": "Ilue", + "ima": "Mala Malasar", + "imi": "Anamgura", + "iml": "Miluk", + "imn": "Imonda", + "imo": "Imbongu", + "imr": "Imroing", + "ims": "Marsian", + "imt": "Imotong", + "imy": "Milyan", + "inb": "Inga", + "inc": "Indic languages", + "ine": "Indo-European languages", + "ing": "Degexit'an", + "inh": "Ingush", + "inj": "Jungle Inga", + "inl": "Indonesian Sign Language", + "inm": "Minaean", + "inn": "Isinai", + "ino": "Inoke-Yate", + "inp": "Iñapari", + "ins": "Indian Sign Language", + "int": "Intha", + "inz": "Ineseño", + "io": "Ido", + "ior": "Inor", + "iou": "Tuma-Irumu", + "iow": "Iowa-Oto", + "ipi": "Ipili", + "ipo": "Ipiko", + "iqu": "Iquito", + "iqw": "Ikwo", + "ira": "Iranian languages", + "ire": "Iresim", + "irh": "Irarutu", + "iri": "Rigwe; Irigwe", + "irk": "Iraqw", + "irn": "Irántxe", + "iro": "Iroquoian languages", + "irr": "Ir", + "iru": "Irula", + "irx": "Kamberau", + "iry": "Iraya", + "is": "Icelandic", + "isa": "Isabi", + "isc": "Isconahua", + "isd": "Isnag", + "ise": "Italian Sign Language", + "isg": "Irish Sign Language", + "ish": "Esan", + "isi": "Nkem-Nkum", + "isk": "Ishkashimi", + "ism": "Masimasi", + "isn": "Isanzu", + "iso": "Isoko", + "isr": "Israeli Sign Language", + "ist": "Istriot", + "isu": "Isu (Menchum Division)", + "it": "Italian", + "itb": "Binongan Itneg", + "itc": "Italic languages", + "itd": "Southern Tidung", + "ite": "Itene", + "iti": "Inlaod Itneg", + "itk": "Judeo-Italian", + "itl": "Itelmen", + "itm": "Itu Mbon Uzo", + "ito": "Itonama", + "itr": "Iteri", + "its": "Isekiri", + "itt": "Maeng Itneg", + "itv": "Itawit", + "itw": "Ito", + "itx": "Itik", + "ity": "Moyadan Itneg", + "itz": "Itzá", + "iu": "Inuktitut", + "ium": "Iu Mien", + "ivb": "Ibatan", + "ivv": "Ivatan", + "iwk": "I-Wak", + "iwm": "Iwam", + "iwo": "Iwur", + "iws": "Sepik Iwam", + "ixc": "Ixcatec", + "ixl": "Ixil", + "iya": "Iyayu", + "iyo": "Mesaka", + "iyx": "Yaka (Congo)", + "izh": "Ingrian", + "izr": "Izere", + "izz": "Izii", + "ja": "Japanese", + "jaa": "Jamamadí", + "jab": "Hyam", + "jac": "Popti'; Jakalteko", + "jad": "Jahanka", + "jae": "Yabem", + "jaf": "Jara", + "jah": "Jah Hut", + "jaj": "Zazao", + "jak": "Jakun", + "jal": "Yalahatan", + "jam": "Jamaican Creole English", + "jan": "Jandai", + "jao": "Yanyuwa", + "jaq": "Yaqay", + "jas": "New Caledonian Javanese", + "jat": "Jakati", + "jau": "Yaur", + "jax": "Jambi Malay", + "jay": "Yan-nhangu; Nhangu", + "jaz": "Jawe", + "jbe": "Judeo-Berber", + "jbi": "Badjiri", + "jbj": "Arandai", + "jbk": "Barikewa", + "jbm": "Bijim", + "jbn": "Nafusi", + "jbo": "Lojban", + "jbr": "Jofotek-Bromnya", + "jbt": "Jabutí", + "jbu": "Jukun Takum", + "jbw": "Yawijibaya", + "jcs": "Jamaican Country Sign Language", + "jct": "Krymchak", + "jda": "Jad", + "jdg": "Jadgali", + "jdt": "Judeo-Tat", + "jeb": "Jebero", + "jee": "Jerung", + "jeh": "Jeh", + "jei": "Yei", + "jek": "Jeri Kuo", + "jel": "Yelmek", + "jen": "Dza", + "jer": "Jere", + "jet": "Manem", + "jeu": "Jonkor Bourmataguil", + "jgb": "Ngbee", + "jge": "Judeo-Georgian", + "jgk": "Gwak", + "jgo": "Ngomba", + "jhi": "Jehai", + "jhs": "Jhankot Sign Language", + "jia": "Jina", + "jib": "Jibu", + "jic": "Tol", + "jid": "Bu (Kaduna State)", + "jie": "Jilbe", + "jig": "Jingulu; Djingili", + "jih": "sTodsde; Shangzhai", + "jii": "Jiiddu", + "jil": "Jilim", + "jim": "Jimi (Cameroon)", + "jio": "Jiamao", + "jiq": "Guanyinqiao; Lavrung", + "jit": "Jita", + "jiu": "Youle Jinuo", + "jiv": "Shuar", + "jiy": "Buyuan Jinuo", + "jje": "Jejueo", + "jjr": "Bankal", + "jka": "Kaera", + "jkm": "Mobwa Karen", + "jko": "Kubo", + "jkp": "Paku Karen", + "jkr": "Koro (India)", + "jks": "Amami Koniya Sign Language", + "jku": "Labir", + "jle": "Ngile", + "jls": "Jamaican Sign Language", + "jma": "Dima", + "jmb": "Zumbun", + "jmc": "Machame", + "jmd": "Yamdena", + "jmi": "Jimi (Nigeria)", + "jml": "Jumli", + "jmn": "Makuri Naga", + "jmr": "Kamara", + "jms": "Mashi (Nigeria)", + "jmw": "Mouwase", + "jmx": "Western Juxtlahuaca Mixtec", + "jna": "Jangshung", + "jnd": "Jandavra", + "jng": "Yangman", + "jni": "Janji", + "jnj": "Yemsa", + "jnl": "Rawat", + "jns": "Jaunsari", + "job": "Joba", + "jod": "Wojenaka", + "jog": "Jogi", + "jor": "Jorá", + "jos": "Jordanian Sign Language", + "jow": "Jowulu", + "jpa": "Jewish Palestinian Aramaic", + "jpr": "Judeo-Persian", + "jpx": "Japanese (family)", + "jqr": "Jaqaru", + "jra": "Jarai", + "jrb": "Judeo-Arabic", + "jrr": "Jiru", + "jrt": "Jakattoe", + "jru": "Japrería", + "jsl": "Japanese Sign Language", + "jua": "Júma", + "jub": "Wannu", + "juc": "Jurchen", + "jud": "Worodougou", + "juh": "Hõne", + "jui": "Ngadjuri", + "juk": "Wapan", + "jul": "Jirel", + "jum": "Jumjum", + "jun": "Juang", + "juo": "Jiba", + "jup": "Hupdë", + "jur": "Jurúna", + "jus": "Jumla Sign Language", + "jut": "Jutish", + "juu": "Ju", + "juw": "Wãpha", + "juy": "Juray", + "jv": "Javanese", + "jvd": "Javindo", + "jvn": "Caribbean Javanese", + "jwi": "Jwira-Pepesa", + "jya": "Jiarong", + "jye": "Judeo-Yemeni Arabic", + "jyy": "Jaya", + "ka": "Georgian", + "kaa": "Kara-Kalpak; Karakalpak", + "kab": "Kabyle", + "kac": "Kachin; Jingpho", + "kad": "Adara", + "kae": "Ketangalan", + "kaf": "Katso", + "kag": "Kajaman", + "kah": "Kara (Central African Republic)", + "kai": "Karekare", + "kaj": "Jju", + "kak": "Kalanguya; Kayapa Kallahan", + "kam": "Kamba (Kenya)", + "kao": "Xaasongaxango", + "kap": "Bezhta", + "kaq": "Capanahua", + "kar": "Karen languages", + "kav": "Katukína", + "kaw": "Kawi", + "kax": "Kao", + "kay": "Kamayurá", + "kba": "Kalarko", + "kbb": "Kaxuiâna", + "kbc": "Kadiwéu", + "kbd": "Kabardian", + "kbe": "Kanju", + "kbg": "Khamba", + "kbh": "Camsá", + "kbi": "Kaptiau", + "kbj": "Kari", + "kbk": "Grass Koiari", + "kbl": "Kanembu", + "kbm": "Iwal", + "kbn": "Kare (Central African Republic)", + "kbo": "Keliko", + "kbp": "Kabiyè", + "kbq": "Kamano", + "kbr": "Kafa", + "kbs": "Kande", + "kbt": "Abadi", + "kbu": "Kabutra", + "kbv": "Dera (Indonesia)", + "kbw": "Kaiep", + "kbx": "Ap Ma", + "kby": "Manga Kanuri", + "kbz": "Duhwa", + "kca": "Khanty", + "kcb": "Kawacha", + "kcc": "Lubila", + "kcd": "Ngkâlmpw Kanum", + "kce": "Kaivi", + "kcf": "Ukaan", + "kcg": "Tyap", + "kch": "Vono", + "kci": "Kamantan", + "kcj": "Kobiana", + "kck": "Kalanga", + "kcl": "Kela (Papua New Guinea); Kala", + "kcm": "Gula (Central African Republic)", + "kcn": "Nubi", + "kco": "Kinalakna", + "kcp": "Kanga", + "kcq": "Kamo", + "kcr": "Katla", + "kcs": "Koenoem", + "kct": "Kaian", + "kcu": "Kami (Tanzania)", + "kcv": "Kete", + "kcw": "Kabwari", + "kcx": "Kachama-Ganjule", + "kcy": "Korandje", + "kcz": "Konongo", + "kda": "Worimi", + "kdc": "Kutu", + "kdd": "Yankunytjatjara", + "kde": "Makonde", + "kdf": "Mamusi", + "kdg": "Seba", + "kdh": "Tem", + "kdi": "Kumam", + "kdj": "Karamojong", + "kdk": "Numèè; Kwényi", + "kdl": "Tsikimba", + "kdm": "Kagoma", + "kdn": "Kunda", + "kdo": "Kordofanian languages", + "kdp": "Kaningdon-Nindem", + "kdq": "Koch", + "kdr": "Karaim", + "kdt": "Kuy", + "kdu": "Kadaru", + "kdw": "Koneraw", + "kdx": "Kam", + "kdy": "Keder; Keijar", + "kdz": "Kwaja", + "kea": "Kabuverdianu", + "keb": "Kélé", + "kec": "Keiga", + "ked": "Kerewe", + "kee": "Eastern Keres", + "kef": "Kpessi", + "keg": "Tese", + "keh": "Keak", + "kei": "Kei", + "kej": "Kadar", + "kek": "Kekchí", + "kel": "Kela (Democratic Republic of Congo)", + "kem": "Kemak", + "ken": "Kenyang", + "keo": "Kakwa", + "kep": "Kaikadi", + "keq": "Kamar", + "ker": "Kera", + "kes": "Kugbo", + "ket": "Ket", + "keu": "Akebu", + "kev": "Kanikkaran", + "kew": "West Kewa", + "kex": "Kukna", + "key": "Kupia", + "kez": "Kukele", + "kfa": "Kodava", + "kfb": "Northwestern Kolami", + "kfc": "Konda-Dora", + "kfd": "Korra Koraga", + "kfe": "Kota (India)", + "kff": "Koya", + "kfg": "Kudiya", + "kfh": "Kurichiya", + "kfi": "Kannada Kurumba", + "kfj": "Kemiehua", + "kfk": "Kinnauri", + "kfl": "Kung", + "kfm": "Khunsari", + "kfn": "Kuk", + "kfo": "Koro (Côte d'Ivoire)", + "kfp": "Korwa", + "kfq": "Korku", + "kfr": "Kachhi; Kutchi", + "kfs": "Bilaspuri", + "kft": "Kanjari", + "kfu": "Katkari", + "kfv": "Kurmukar", + "kfw": "Kharam Naga", + "kfx": "Kullu Pahari", + "kfy": "Kumaoni", + "kfz": "Koromfé", + "kg": "Kongo", + "kga": "Koyaga", + "kgb": "Kawe", + "kge": "Komering", + "kgf": "Kube", + "kgg": "Kusunda", + "kgi": "Selangor Sign Language", + "kgj": "Gamale Kham", + "kgk": "Kaiwá", + "kgl": "Kunggari", + "kgm": "Karipúna", + "kgn": "Karingani", + "kgo": "Krongo", + "kgp": "Kaingang", + "kgq": "Kamoro", + "kgr": "Abun", + "kgs": "Kumbainggar", + "kgt": "Somyev", + "kgu": "Kobol", + "kgv": "Karas", + "kgw": "Karon Dori", + "kgx": "Kamaru", + "kgy": "Kyerung", + "kha": "Khasi", + "khb": "Lü", + "khc": "Tukang Besi North", + "khd": "Bädi Kanum", + "khe": "Korowai", + "khf": "Khuen", + "khg": "Khams Tibetan", + "khh": "Kehu", + "khi": "Khoisan languages", + "khj": "Kuturmi", + "khk": "Halh Mongolian", + "khl": "Lusi", + "khn": "Khandesi", + "kho": "Khotanese; Sakan", + "khp": "Kapori; Kapauri", + "khq": "Koyra Chiini Songhay", + "khr": "Kharia", + "khs": "Kasua", + "kht": "Khamti", + "khu": "Nkhumbi", + "khv": "Khvarshi", + "khw": "Khowar", + "khx": "Kanu", + "khy": "Kele (Democratic Republic of Congo)", + "khz": "Keapara", + "ki": "Kikuyu; Gikuyu", + "kia": "Kim", + "kib": "Koalib", + "kic": "Kickapoo", + "kid": "Koshin", + "kie": "Kibet", + "kif": "Eastern Parbate Kham", + "kig": "Kimaama; Kimaghima", + "kih": "Kilmeri", + "kii": "Kitsai", + "kij": "Kilivila", + "kil": "Kariya", + "kim": "Karagas", + "kio": "Kiowa", + "kip": "Sheshi Kham", + "kiq": "Kosadle; Kosare", + "kis": "Kis", + "kit": "Agob", + "kiu": "Kirmanjki (individual language)", + "kiv": "Kimbu", + "kiw": "Northeast Kiwai", + "kix": "Khiamniungan Naga", + "kiy": "Kirikiri", + "kiz": "Kisi", + "kj": "Kuanyama; Kwanyama", + "kja": "Mlap", + "kjb": "Q'anjob'al; Kanjobal", + "kjc": "Coastal Konjo", + "kjd": "Southern Kiwai", + "kje": "Kisar", + "kjg": "Khmu", + "kjh": "Khakas", + "kji": "Zabana", + "kjj": "Khinalugh", + "kjk": "Highland Konjo", + "kjl": "Western Parbate Kham", + "kjm": "Kháng", + "kjn": "Kunjen", + "kjo": "Harijan Kinnauri", + "kjp": "Pwo Eastern Karen", + "kjq": "Western Keres", + "kjr": "Kurudu", + "kjs": "East Kewa", + "kjt": "Phrae Pwo Karen", + "kju": "Kashaya", + "kjv": "Kaikavian Literary Language", + "kjx": "Ramopa", + "kjy": "Erave", + "kjz": "Bumthangkha", + "kk": "Kazakh", + "kka": "Kakanda", + "kkb": "Kwerisa", + "kkc": "Odoodee", + "kkd": "Kinuku", + "kke": "Kakabe", + "kkf": "Kalaktang Monpa", + "kkg": "Mabaka Valley Kalinga", + "kkh": "Khün", + "kki": "Kagulu", + "kkj": "Kako", + "kkk": "Kokota", + "kkl": "Kosarek Yale", + "kkm": "Kiong", + "kkn": "Kon Keu", + "kko": "Karko", + "kkp": "Gugubera; Koko-Bera", + "kkq": "Kaeku", + "kkr": "Kir-Balar", + "kks": "Giiwo", + "kkt": "Koi", + "kku": "Tumi", + "kkv": "Kangean", + "kkw": "Teke-Kukuya", + "kkx": "Kohin", + "kky": "Guugu Yimidhirr; Guguyimidjir", + "kkz": "Kaska", + "kl": "Kalaallisut; Greenlandic", + "kla": "Klamath-Modoc", + "klb": "Kiliwa", + "klc": "Kolbila", + "kld": "Gamilaraay", + "kle": "Kulung (Nepal)", + "klf": "Kendeje", + "klg": "Tagakaulo", + "klh": "Weliki", + "kli": "Kalumpang", + "klj": "Khalaj", + "klk": "Kono (Nigeria)", + "kll": "Kagan Kalagan", + "klm": "Migum", + "kln": "Kalenjin", + "klo": "Kapya", + "klp": "Kamasa", + "klq": "Rumu", + "klr": "Khaling", + "kls": "Kalasha", + "klt": "Nukna", + "klu": "Klao", + "klv": "Maskelynes", + "klw": "Tado; Lindu", + "klx": "Koluwawa", + "kly": "Kalao", + "klz": "Kabola", + "km": "Khmer; Central Khmer", + "kma": "Konni", + "kmb": "Kimbundu", + "kmc": "Southern Dong", + "kmd": "Majukayang Kalinga", + "kme": "Bakole", + "kmf": "Kare (Papua New Guinea)", + "kmg": "Kâte", + "kmh": "Kalam", + "kmi": "Kami (Nigeria)", + "kmj": "Kumarbhag Paharia", + "kmk": "Limos Kalinga", + "kml": "Tanudan Kalinga", + "kmm": "Kom (India)", + "kmn": "Awtuw", + "kmo": "Kwoma", + "kmp": "Gimme", + "kmq": "Kwama", + "kmr": "Northern Kurdish", + "kms": "Kamasau", + "kmt": "Kemtuik", + "kmu": "Kanite", + "kmv": "Karipúna Creole French", + "kmw": "Komo (Democratic Republic of Congo)", + "kmx": "Waboda", + "kmy": "Koma", + "kmz": "Khorasani Turkish", + "kn": "Kannada", + "kna": "Dera (Nigeria)", + "knb": "Lubuagan Kalinga", + "knc": "Central Kanuri", + "knd": "Konda", + "kne": "Kankanaey", + "knf": "Mankanya", + "kng": "Koongo", + "kni": "Kanufi", + "knj": "Western Kanjobal", + "knk": "Kuranko", + "knl": "Keninjal", + "knm": "Kanamarí", + "knn": "Konkani (individual language)", + "kno": "Kono (Sierra Leone)", + "knp": "Kwanja", + "knq": "Kintaq", + "knr": "Kaningra", + "kns": "Kensiu", + "knt": "Panoan Katukína", + "knu": "Kono (Guinea)", + "knv": "Tabo", + "knw": "Kung-Ekoka", + "knx": "Kendayan; Salako", + "kny": "Kanyok", + "knz": "Kalamsé", + "ko": "Korean", + "koa": "Konomala", + "koc": "Kpati", + "kod": "Kodi", + "koe": "Kacipo-Bale Suri", + "kof": "Kubi", + "kog": "Cogui; Kogi", + "koh": "Koyo", + "koi": "Komi-Permyak", + "kok": "Konkani (macrolanguage)", + "kol": "Kol (Papua New Guinea)", + "koo": "Konzo", + "kop": "Waube", + "koq": "Kota (Gabon)", + "kos": "Kosraean", + "kot": "Lagwan", + "kou": "Koke", + "kov": "Kudu-Camo", + "kow": "Kugama", + "koy": "Koyukon", + "koz": "Korak", + "kpa": "Kutto", + "kpb": "Mullu Kurumba", + "kpc": "Curripaco", + "kpd": "Koba", + "kpe": "Kpelle", + "kpf": "Komba", + "kpg": "Kapingamarangi", + "kph": "Kplang", + "kpi": "Kofei", + "kpj": "Karajá", + "kpk": "Kpan", + "kpl": "Kpala", + "kpm": "Koho", + "kpn": "Kepkiriwát", + "kpo": "Ikposo", + "kpq": "Korupun-Sela", + "kpr": "Korafe-Yegha", + "kps": "Tehit", + "kpt": "Karata", + "kpu": "Kafoa", + "kpv": "Komi-Zyrian", + "kpw": "Kobon", + "kpx": "Mountain Koiali", + "kpy": "Koryak", + "kpz": "Kupsabiny", + "kqa": "Mum", + "kqb": "Kovai", + "kqc": "Doromu-Koki", + "kqd": "Koy Sanjaq Surat", + "kqe": "Kalagan", + "kqf": "Kakabai", + "kqg": "Khe", + "kqh": "Kisankasa", + "kqi": "Koitabu", + "kqj": "Koromira", + "kqk": "Kotafon Gbe", + "kql": "Kyenele", + "kqm": "Khisa", + "kqn": "Kaonde", + "kqo": "Eastern Krahn", + "kqp": "Kimré", + "kqq": "Krenak", + "kqr": "Kimaragang", + "kqs": "Northern Kissi", + "kqt": "Klias River Kadazan", + "kqu": "Seroa", + "kqv": "Okolod", + "kqw": "Kandas", + "kqx": "Mser", + "kqy": "Koorete", + "kqz": "Korana", + "kr": "Kanuri", + "kra": "Kumhali", + "krb": "Karkin", + "krc": "Karachay-Balkar", + "krd": "Kairui-Midiki", + "kre": "Panará", + "krf": "Koro (Vanuatu)", + "krh": "Kurama", + "kri": "Krio", + "krj": "Kinaray-A", + "krk": "Kerek", + "krl": "Karelian", + "krn": "Sapo", + "kro": "Kru languages", + "krp": "Korop", + "krr": "Krung", + "krs": "Gbaya (Sudan)", + "krt": "Tumari Kanuri", + "kru": "Kurukh", + "krv": "Kavet", + "krw": "Western Krahn", + "krx": "Karon", + "kry": "Kryts", + "krz": "Sota Kanum", + "ks": "Kashmiri", + "ksa": "Shuwa-Zamani", + "ksb": "Shambala", + "ksc": "Southern Kalinga", + "ksd": "Kuanua", + "kse": "Kuni", + "ksf": "Bafia", + "ksg": "Kusaghe", + "ksh": "Kölsch", + "ksi": "Krisa; I'saka", + "ksj": "Uare", + "ksk": "Kansa", + "ksl": "Kumalu", + "ksm": "Kumba", + "ksn": "Kasiguranin", + "kso": "Kofa", + "ksp": "Kaba", + "ksq": "Kwaami", + "ksr": "Borong", + "kss": "Southern Kisi", + "kst": "Winyé", + "ksu": "Khamyang", + "ksv": "Kusu", + "ksw": "S'gaw Karen", + "ksx": "Kedang", + "ksy": "Kharia Thar", + "ksz": "Kodaku", + "kta": "Katua", + "ktb": "Kambaata", + "ktc": "Kholok", + "ktd": "Kokata; Kukatha", + "kte": "Nubri", + "ktf": "Kwami", + "ktg": "Kalkutung", + "kth": "Karanga", + "kti": "North Muyu", + "ktj": "Plapo Krumen", + "ktk": "Kaniet", + "ktl": "Koroshi", + "ktm": "Kurti", + "ktn": "Karitiâna", + "kto": "Kuot", + "ktp": "Kaduo", + "ktq": "Katabaga", + "kts": "South Muyu", + "ktt": "Ketum", + "ktu": "Kituba (Democratic Republic of Congo)", + "ktv": "Eastern Katu", + "ktw": "Kato", + "ktx": "Kaxararí", + "kty": "Kango (Bas-Uélé District)", + "ktz": "Juǀʼhoan; Juǀʼhoansi", + "ku": "Kurdish", + "kub": "Kutep", + "kuc": "Kwinsu", + "kud": "'Auhelawa", + "kue": "Kuman (Papua New Guinea)", + "kuf": "Western Katu", + "kug": "Kupa", + "kuh": "Kushi", + "kui": "Kuikúro-Kalapálo; Kalapalo", + "kuj": "Kuria", + "kuk": "Kepo'", + "kul": "Kulere", + "kum": "Kumyk", + "kun": "Kunama", + "kuo": "Kumukio", + "kup": "Kunimaipa", + "kuq": "Karipuna", + "kus": "Kusaal", + "kut": "Kutenai", + "kuu": "Upper Kuskokwim", + "kuv": "Kur", + "kuw": "Kpagua", + "kux": "Kukatja", + "kuy": "Kuuku-Ya'u", + "kuz": "Kunza", + "kv": "Komi", + "kva": "Bagvalal", + "kvb": "Kubu", + "kvc": "Kove", + "kvd": "Kui (Indonesia)", + "kve": "Kalabakan", + "kvf": "Kabalai", + "kvg": "Kuni-Boazi", + "kvh": "Komodo", + "kvi": "Kwang", + "kvj": "Psikye", + "kvk": "Korean Sign Language", + "kvl": "Kayaw", + "kvm": "Kendem", + "kvn": "Border Kuna", + "kvo": "Dobel", + "kvp": "Kompane", + "kvq": "Geba Karen", + "kvr": "Kerinci", + "kvt": "Lahta Karen; Lahta", + "kvu": "Yinbaw Karen", + "kvv": "Kola", + "kvw": "Wersing", + "kvx": "Parkari Koli", + "kvy": "Yintale Karen; Yintale", + "kvz": "Tsakwambo; Tsaukambo", + "kw": "Cornish", + "kwa": "Dâw", + "kwb": "Kwa", + "kwc": "Likwala", + "kwd": "Kwaio", + "kwe": "Kwerba", + "kwf": "Kwara'ae", + "kwg": "Sara Kaba Deme", + "kwh": "Kowiai", + "kwi": "Awa-Cuaiquer", + "kwj": "Kwanga", + "kwk": "Kwakiutl", + "kwl": "Kofyar", + "kwm": "Kwambi", + "kwn": "Kwangali", + "kwo": "Kwomtari", + "kwp": "Kodia", + "kwr": "Kwer", + "kws": "Kwese", + "kwt": "Kwesten", + "kwu": "Kwakum", + "kwv": "Sara Kaba Náà", + "kww": "Kwinti", + "kwx": "Khirwar", + "kwy": "San Salvador Kongo", + "kwz": "Kwadi", + "kxa": "Kairiru", + "kxb": "Krobu", + "kxc": "Konso; Khonso", + "kxd": "Brunei", + "kxf": "Manumanaw Karen; Manumanaw", + "kxh": "Karo (Ethiopia)", + "kxi": "Keningau Murut", + "kxj": "Kulfa", + "kxk": "Zayein Karen", + "kxm": "Northern Khmer", + "kxn": "Kanowit-Tanjong Melanau", + "kxo": "Kanoé", + "kxp": "Wadiyara Koli", + "kxq": "Smärky Kanum", + "kxr": "Koro (Papua New Guinea)", + "kxs": "Kangjia", + "kxt": "Koiwat", + "kxv": "Kuvi", + "kxw": "Konai", + "kxx": "Likuba", + "kxy": "Kayong", + "kxz": "Kerewo", + "ky": "Kirghiz; Kyrgyz", + "kya": "Kwaya", + "kyb": "Butbut Kalinga", + "kyc": "Kyaka", + "kyd": "Karey", + "kye": "Krache", + "kyf": "Kouya", + "kyg": "Keyagana", + "kyh": "Karok", + "kyi": "Kiput", + "kyj": "Karao", + "kyk": "Kamayo", + "kyl": "Kalapuya", + "kym": "Kpatili", + "kyn": "Northern Binukidnon", + "kyo": "Kelon", + "kyp": "Kang", + "kyq": "Kenga", + "kyr": "Kuruáya", + "kys": "Baram Kayan", + "kyt": "Kayagar", + "kyu": "Western Kayah", + "kyv": "Kayort", + "kyw": "Kudmali", + "kyx": "Rapoisi", + "kyy": "Kambaira", + "kyz": "Kayabí", + "kza": "Western Karaboro", + "kzb": "Kaibobo", + "kzc": "Bondoukou Kulango", + "kzd": "Kadai", + "kze": "Kosena", + "kzf": "Da'a Kaili", + "kzg": "Kikai", + "kzi": "Kelabit", + "kzk": "Kazukuru", + "kzl": "Kayeli", + "kzm": "Kais", + "kzn": "Kokola", + "kzo": "Kaningi", + "kzp": "Kaidipang", + "kzq": "Kaike", + "kzr": "Karang", + "kzs": "Sugut Dusun", + "kzu": "Kayupulau", + "kzv": "Komyandaret", + "kzw": "Karirí-Xocó", + "kzx": "Kamarian", + "kzy": "Kango (Tshopo District)", + "kzz": "Kalabra", + "la": "Latin", + "laa": "Southern Subanen", + "lab": "Linear A", + "lac": "Lacandon", + "lad": "Ladino", + "lae": "Pattani", + "laf": "Lafofa", + "lag": "Langi", + "lah": "Lahnda", + "lai": "Lambya", + "laj": "Lango (Uganda)", + "lal": "Lalia", + "lam": "Lamba", + "lan": "Laru", + "lap": "Laka (Chad)", + "laq": "Qabiao", + "lar": "Larteh", + "las": "Lama (Togo)", + "lau": "Laba", + "law": "Lauje", + "lax": "Tiwa", + "lay": "Lama Bai", + "laz": "Aribwatsa", + "lb": "Luxembourgish; Letzeburgesch", + "lbb": "Label", + "lbc": "Lakkia", + "lbe": "Lak", + "lbf": "Tinani", + "lbg": "Laopang", + "lbi": "La'bi", + "lbj": "Ladakhi", + "lbk": "Central Bontok", + "lbl": "Libon Bikol", + "lbm": "Lodhi", + "lbn": "Rmeet", + "lbo": "Laven", + "lbq": "Wampar", + "lbr": "Lohorung", + "lbs": "Libyan Sign Language", + "lbt": "Lachi", + "lbu": "Labu", + "lbv": "Lavatbura-Lamusong", + "lbw": "Tolaki", + "lbx": "Lawangan", + "lby": "Lamalama; Lamu-Lamu", + "lbz": "Lardil", + "lcc": "Legenyem", + "lcd": "Lola", + "lce": "Loncong; Sekak", + "lcf": "Lubu", + "lch": "Luchazi", + "lcl": "Lisela", + "lcm": "Tungag", + "lcp": "Western Lawa", + "lcq": "Luhu", + "lcs": "Lisabata-Nuniali", + "lda": "Kla-Dan", + "ldb": "Dũya", + "ldd": "Luri", + "ldg": "Lenyima", + "ldh": "Lamja-Dengsa-Tola", + "ldi": "Laari", + "ldj": "Lemoro", + "ldk": "Leelau", + "ldl": "Kaan", + "ldm": "Landoma", + "ldn": "Láadan", + "ldo": "Loo", + "ldp": "Tso", + "ldq": "Lufu", + "lea": "Lega-Shabunda", + "leb": "Lala-Bisa", + "lec": "Leco", + "led": "Lendu", + "lee": "Lyélé", + "lef": "Lelemi", + "leh": "Lenje", + "lei": "Lemio", + "lej": "Lengola", + "lek": "Leipon", + "lel": "Lele (Democratic Republic of Congo)", + "lem": "Nomaande", + "len": "Lenca", + "leo": "Leti (Cameroon)", + "lep": "Lepcha", + "leq": "Lembena", + "ler": "Lenkau", + "les": "Lese", + "let": "Lesing-Gelimi; Amio-Gelimi", + "leu": "Kara (Papua New Guinea)", + "lev": "Lamma", + "lew": "Ledo Kaili", + "lex": "Luang", + "ley": "Lemolang", + "lez": "Lezghian", + "lfa": "Lefa", + "lfn": "Lingua Franca Nova", + "lg": "Ganda; Luganda", + "lga": "Lungga", + "lgb": "Laghu", + "lgg": "Lugbara", + "lgh": "Laghuu", + "lgi": "Lengilu", + "lgk": "Lingarak; Neverver", + "lgl": "Wala", + "lgm": "Lega-Mwenga", + "lgn": "T'apo; Opuuo", + "lgo": "Lango (South Sudan)", + "lgq": "Logba", + "lgr": "Lengo", + "lgt": "Pahi", + "lgu": "Longgu", + "lgz": "Ligenza", + "lha": "Laha (Viet Nam)", + "lhh": "Laha (Indonesia)", + "lhi": "Lahu Shi", + "lhl": "Lahul Lohar", + "lhm": "Lhomi", + "lhn": "Lahanan", + "lhp": "Lhokpu", + "lhs": "Mlahsö", + "lht": "Lo-Toga", + "lhu": "Lahu", + "li": "Limburgan; Limburger; Limburgish", + "lia": "West-Central Limba", + "lib": "Likum", + "lic": "Hlai", + "lid": "Nyindrou", + "lie": "Likila", + "lif": "Limbu", + "lig": "Ligbi", + "lih": "Lihir", + "lij": "Ligurian", + "lik": "Lika", + "lil": "Lillooet", + "lio": "Liki", + "lip": "Sekpele", + "liq": "Libido", + "lir": "Liberian English", + "lis": "Lisu", + "liu": "Logorik", + "liv": "Liv", + "liw": "Col", + "lix": "Liabuku", + "liy": "Banda-Bambari", + "liz": "Libinza", + "lja": "Golpa", + "lje": "Rampi", + "lji": "Laiyolo", + "ljl": "Li'o", + "ljp": "Lampung Api", + "ljw": "Yirandali", + "ljx": "Yuru", + "lka": "Lakalei", + "lkb": "Kabras; Lukabaras", + "lkc": "Kucong", + "lkd": "Lakondê", + "lke": "Kenyi", + "lkh": "Lakha", + "lki": "Laki", + "lkj": "Remun", + "lkl": "Laeko-Libuat", + "lkm": "Kalaamaya", + "lkn": "Lakon; Vure", + "lko": "Khayo; Olukhayo", + "lkr": "Päri", + "lks": "Kisa; Olushisa", + "lkt": "Lakota", + "lku": "Kungkari", + "lky": "Lokoya", + "lla": "Lala-Roba", + "llb": "Lolo", + "llc": "Lele (Guinea)", + "lld": "Ladin", + "lle": "Lele (Papua New Guinea)", + "llf": "Hermit", + "llg": "Lole", + "llh": "Lamu", + "lli": "Teke-Laali", + "llj": "Ladji Ladji", + "llk": "Lelak", + "lll": "Lilau", + "llm": "Lasalimu", + "lln": "Lele (Chad)", + "llp": "North Efate", + "llq": "Lolak", + "lls": "Lithuanian Sign Language", + "llu": "Lau", + "llx": "Lauan", + "lma": "East Limba", + "lmb": "Merei", + "lmc": "Limilngan", + "lmd": "Lumun", + "lme": "Pévé", + "lmf": "South Lembata", + "lmg": "Lamogai", + "lmh": "Lambichhong", + "lmi": "Lombi", + "lmj": "West Lembata", + "lmk": "Lamkang", + "lml": "Hano", + "lmn": "Lambadi", + "lmo": "Lombard", + "lmp": "Limbum", + "lmq": "Lamatuka", + "lmr": "Lamalera", + "lmu": "Lamenu", + "lmv": "Lomaiviti", + "lmw": "Lake Miwok", + "lmx": "Laimbue", + "lmy": "Lamboya", + "ln": "Lingala", + "lna": "Langbashe", + "lnb": "Mbalanhu", + "lnd": "Lundayeh; Lun Bawang", + "lng": "Langobardic", + "lnh": "Lanoh", + "lni": "Daantanai'", + "lnj": "Leningitij", + "lnl": "South Central Banda", + "lnm": "Langam", + "lnn": "Lorediakarkar", + "lns": "Lamnso'", + "lnu": "Longuda", + "lnw": "Lanima", + "lnz": "Lonzo", + "lo": "Lao", + "loa": "Loloda", + "lob": "Lobi", + "loc": "Inonhan", + "loe": "Saluan", + "lof": "Logol", + "log": "Logo", + "loh": "Narim", + "loi": "Loma (Côte d'Ivoire)", + "loj": "Lou", + "lok": "Loko", + "lol": "Mongo", + "lom": "Loma (Liberia)", + "lon": "Malawi Lomwe", + "loo": "Lombo", + "lop": "Lopa", + "loq": "Lobala", + "lor": "Téén", + "los": "Loniu", + "lot": "Otuho", + "lou": "Louisiana Creole", + "lov": "Lopi", + "low": "Tampias Lobu", + "lox": "Loun", + "loy": "Loke", + "loz": "Lozi", + "lpa": "Lelepa", + "lpe": "Lepki", + "lpn": "Long Phuri Naga", + "lpo": "Lipo", + "lpx": "Lopit", + "lqr": "Logir", + "lra": "Rara Bakati'", + "lrc": "Northern Luri", + "lre": "Laurentian", + "lrg": "Laragia", + "lri": "Marachi; Olumarachi", + "lrk": "Loarki", + "lrl": "Lari", + "lrm": "Marama; Olumarama", + "lrn": "Lorang", + "lro": "Laro", + "lrr": "Southern Yamphu", + "lrt": "Larantuka Malay", + "lrv": "Larevat", + "lrz": "Lemerig", + "lsa": "Lasgerdi", + "lsb": "Burundian Sign Language; Langue des Signes Burundaise", + "lsc": "Albarradas Sign Language; Lengua de señas Albarradas", + "lsd": "Lishana Deni", + "lse": "Lusengo", + "lsh": "Lish", + "lsi": "Lashi", + "lsl": "Latvian Sign Language", + "lsm": "Saamia; Olusamia", + "lsn": "Tibetan Sign Language", + "lso": "Laos Sign Language", + "lsp": "Panamanian Sign Language; Lengua de Señas Panameñas", + "lsr": "Aruop", + "lss": "Lasi", + "lst": "Trinidad and Tobago Sign Language", + "lsv": "Sivia Sign Language", + "lsw": "Seychelles Sign Language; Lalang Siny Seselwa; Langue des Signes Seychelloise", + "lsy": "Mauritian Sign Language", + "lt": "Lithuanian", + "ltc": "Late Middle Chinese", + "ltg": "Latgalian", + "lth": "Thur", + "lti": "Leti (Indonesia)", + "ltn": "Latundê", + "lto": "Tsotso; Olutsotso", + "lts": "Tachoni; Lutachoni", + "ltu": "Latu", + "lu": "Luba-Katanga", + "lua": "Luba-Lulua", + "luc": "Aringa", + "lud": "Ludian", + "lue": "Luvale", + "luf": "Laua", + "lui": "Luiseno", + "luj": "Luna", + "luk": "Lunanakha", + "lul": "Olu'bo", + "lum": "Luimbi", + "lun": "Lunda", + "luo": "Luo (Kenya and Tanzania); Dholuo", + "lup": "Lumbu", + "luq": "Lucumi", + "lur": "Laura", + "lus": "Lushai", + "lut": "Lushootseed", + "luu": "Lumba-Yakkha", + "luv": "Luwati", + "luw": "Luo (Cameroon)", + "luy": "Luyia; Oluluyia", + "luz": "Southern Luri", + "lv": "Latvian", + "lva": "Maku'a", + "lvi": "Lavi", + "lvk": "Lavukaleve", + "lvs": "Standard Latvian", + "lvu": "Levuka", + "lwa": "Lwalu", + "lwe": "Lewo Eleng", + "lwg": "Wanga; Oluwanga", + "lwh": "White Lachi", + "lwl": "Eastern Lawa", + "lwm": "Laomian", + "lwo": "Luwo", + "lws": "Malawian Sign Language", + "lwt": "Lewotobi", + "lwu": "Lawu", + "lww": "Lewo", + "lxm": "Lakurumau", + "lya": "Layakha", + "lyg": "Lyngngam", + "lyn": "Luyana", + "lzh": "Literary Chinese", + "lzl": "Litzlitz", + "lzn": "Leinong Naga", + "lzz": "Laz", + "maa": "San Jerónimo Tecóatl Mazatec", + "mab": "Yutanduchi Mixtec", + "mad": "Madurese", + "mae": "Bo-Rukul", + "maf": "Mafa", + "mag": "Magahi", + "mai": "Maithili", + "maj": "Jalapa De Díaz Mazatec", + "mak": "Makasar", + "mam": "Mam", + "man": "Mandingo; Manding", + "map": "Austronesian languages", + "maq": "Chiquihuitlán Mazatec", + "mas": "Masai", + "mat": "San Francisco Matlatzinca", + "mau": "Huautla Mazatec", + "mav": "Sateré-Mawé", + "maw": "Mampruli", + "max": "North Moluccan Malay", + "maz": "Central Mazahua", + "mba": "Higaonon", + "mbb": "Western Bukidnon Manobo", + "mbc": "Macushi", + "mbd": "Dibabawon Manobo", + "mbe": "Molale", + "mbf": "Baba Malay", + "mbh": "Mangseng", + "mbi": "Ilianen Manobo", + "mbj": "Nadëb", + "mbk": "Malol", + "mbl": "Maxakalí", + "mbm": "Ombamba", + "mbn": "Macaguán", + "mbo": "Mbo (Cameroon)", + "mbp": "Malayo", + "mbq": "Maisin", + "mbr": "Nukak Makú", + "mbs": "Sarangani Manobo", + "mbt": "Matigsalug Manobo", + "mbu": "Mbula-Bwazza", + "mbv": "Mbulungish", + "mbw": "Maring", + "mbx": "Mari (East Sepik Province)", + "mby": "Memoni", + "mbz": "Amoltepec Mixtec", + "mca": "Maca", + "mcb": "Machiguenga", + "mcc": "Bitur", + "mcd": "Sharanahua", + "mce": "Itundujia Mixtec", + "mcf": "Matsés", + "mcg": "Mapoyo", + "mch": "Maquiritari", + "mci": "Mese", + "mcj": "Mvanip", + "mck": "Mbunda", + "mcl": "Macaguaje", + "mcm": "Malaccan Creole Portuguese", + "mcn": "Masana", + "mco": "Coatlán Mixe", + "mcp": "Makaa", + "mcq": "Ese", + "mcr": "Menya", + "mcs": "Mambai", + "mct": "Mengisa", + "mcu": "Cameroon Mambila", + "mcv": "Minanibai", + "mcw": "Mawa (Chad)", + "mcx": "Mpiemo", + "mcy": "South Watut", + "mcz": "Mawan", + "mda": "Mada (Nigeria)", + "mdb": "Morigi", + "mdc": "Male (Papua New Guinea)", + "mdd": "Mbum", + "mde": "Maba (Chad)", + "mdf": "Moksha", + "mdg": "Massalat", + "mdh": "Maguindanaon", + "mdi": "Mamvu", + "mdj": "Mangbetu", + "mdk": "Mangbutu", + "mdl": "Maltese Sign Language", + "mdm": "Mayogo", + "mdn": "Mbati", + "mdp": "Mbala", + "mdq": "Mbole", + "mdr": "Mandar", + "mds": "Maria (Papua New Guinea)", + "mdt": "Mbere", + "mdu": "Mboko", + "mdv": "Santa Lucía Monteverde Mixtec", + "mdw": "Mbosi", + "mdx": "Dizin", + "mdy": "Male (Ethiopia)", + "mdz": "Suruí Do Pará", + "mea": "Menka", + "meb": "Ikobi", + "mec": "Marra", + "med": "Melpa", + "mee": "Mengen", + "mef": "Megam", + "meh": "Southwestern Tlaxiaco Mixtec", + "mei": "Midob", + "mej": "Meyah", + "mek": "Mekeo", + "mel": "Central Melanau", + "mem": "Mangala", + "men": "Mende (Sierra Leone)", + "meo": "Kedah Malay", + "mep": "Miriwoong", + "meq": "Merey", + "mer": "Meru", + "mes": "Masmaje", + "met": "Mato", + "meu": "Motu", + "mev": "Mano", + "mew": "Maaka", + "mey": "Hassaniyya", + "mez": "Menominee", + "mfa": "Pattani Malay", + "mfb": "Bangka", + "mfc": "Mba", + "mfd": "Mendankwe-Nkwen", + "mfe": "Morisyen", + "mff": "Naki", + "mfg": "Mogofin", + "mfh": "Matal", + "mfi": "Wandala", + "mfj": "Mefele", + "mfk": "North Mofu", + "mfl": "Putai", + "mfm": "Marghi South", + "mfn": "Cross River Mbembe", + "mfo": "Mbe", + "mfp": "Makassar Malay", + "mfq": "Moba", + "mfr": "Marrithiyel", + "mfs": "Mexican Sign Language", + "mft": "Mokerang", + "mfu": "Mbwela", + "mfv": "Mandjak", + "mfw": "Mulaha", + "mfx": "Melo", + "mfy": "Mayo", + "mfz": "Mabaan", + "mg": "Malagasy", + "mga": "Middle Irish (900-1200)", + "mgb": "Mararit", + "mgc": "Morokodo", + "mgd": "Moru", + "mge": "Mango", + "mgf": "Maklew", + "mgg": "Mpumpong", + "mgh": "Makhuwa-Meetto", + "mgi": "Lijili", + "mgj": "Abureni", + "mgk": "Mawes", + "mgl": "Maleu-Kilenge", + "mgm": "Mambae", + "mgn": "Mbangi", + "mgo": "Meta'", + "mgp": "Eastern Magar", + "mgq": "Malila", + "mgr": "Mambwe-Lungu", + "mgs": "Manda (Tanzania)", + "mgt": "Mongol", + "mgu": "Mailu", + "mgv": "Matengo", + "mgw": "Matumbi", + "mgy": "Mbunga", + "mgz": "Mbugwe", + "mh": "Marshallese", + "mha": "Manda (India)", + "mhb": "Mahongwe", + "mhc": "Mocho", + "mhd": "Mbugu", + "mhe": "Besisi; Mah Meri", + "mhf": "Mamaa", + "mhg": "Margu", + "mhi": "Ma'di", + "mhj": "Mogholi", + "mhk": "Mungaka", + "mhl": "Mauwake", + "mhm": "Makhuwa-Moniga", + "mhn": "Mócheno", + "mho": "Mashi (Zambia)", + "mhp": "Balinese Malay", + "mhq": "Mandan", + "mhr": "Eastern Mari", + "mhs": "Buru (Indonesia)", + "mht": "Mandahuaca", + "mhu": "Digaro-Mishmi; Darang Deng", + "mhw": "Mbukushu", + "mhx": "Maru; Lhaovo", + "mhy": "Ma'anyan", + "mhz": "Mor (Mor Islands)", + "mi": "Maori", + "mia": "Miami", + "mib": "Atatláhuca Mixtec", + "mic": "Mi'kmaq; Micmac", + "mid": "Mandaic", + "mie": "Ocotepec Mixtec", + "mif": "Mofu-Gudur", + "mig": "San Miguel El Grande Mixtec", + "mih": "Chayuco Mixtec", + "mii": "Chigmecatitlán Mixtec", + "mij": "Abar; Mungbam", + "mik": "Mikasuki", + "mil": "Peñoles Mixtec", + "mim": "Alacatlatzala Mixtec", + "min": "Minangkabau", + "mio": "Pinotepa Nacional Mixtec", + "mip": "Apasco-Apoala Mixtec", + "miq": "Mískito", + "mir": "Isthmus Mixe", + "mit": "Southern Puebla Mixtec", + "miu": "Cacaloxtepec Mixtec", + "miw": "Akoye", + "mix": "Mixtepec Mixtec", + "miy": "Ayutla Mixtec", + "miz": "Coatzospan Mixtec", + "mjb": "Makalero", + "mjc": "San Juan Colorado Mixtec", + "mjd": "Northwest Maidu", + "mje": "Muskum", + "mjg": "Tu", + "mjh": "Mwera (Nyasa)", + "mji": "Kim Mun", + "mjj": "Mawak", + "mjk": "Matukar", + "mjl": "Mandeali", + "mjm": "Medebur", + "mjn": "Ma (Papua New Guinea)", + "mjo": "Malankuravan", + "mjp": "Malapandaram", + "mjq": "Malaryan", + "mjr": "Malavedan", + "mjs": "Miship", + "mjt": "Sauria Paharia", + "mju": "Manna-Dora", + "mjv": "Mannan", + "mjw": "Karbi", + "mjx": "Mahali", + "mjy": "Mahican", + "mjz": "Majhi", + "mk": "Macedonian", + "mka": "Mbre", + "mkb": "Mal Paharia", + "mkc": "Siliput", + "mke": "Mawchi", + "mkf": "Miya", + "mkg": "Mak (China)", + "mkh": "Mon-Khmer languages", + "mki": "Dhatki", + "mkj": "Mokilese", + "mkk": "Byep", + "mkl": "Mokole", + "mkm": "Moklen", + "mkn": "Kupang Malay", + "mko": "Mingang Doso", + "mkp": "Moikodi", + "mkq": "Bay Miwok", + "mkr": "Malas", + "mks": "Silacayoapan Mixtec", + "mkt": "Vamale", + "mku": "Konyanka Maninka", + "mkv": "Mafea", + "mkw": "Kituba (Congo)", + "mkx": "Kinamiging Manobo", + "mky": "East Makian", + "mkz": "Makasae", + "ml": "Malayalam", + "mla": "Malo", + "mlb": "Mbule", + "mlc": "Cao Lan", + "mle": "Manambu", + "mlf": "Mal", + "mlh": "Mape", + "mli": "Malimpung", + "mlj": "Miltu", + "mlk": "Ilwana; Kiwilwana", + "mll": "Malua Bay", + "mlm": "Mulam", + "mln": "Malango", + "mlo": "Mlomp", + "mlp": "Bargam", + "mlq": "Western Maninkakan", + "mlr": "Vame", + "mls": "Masalit", + "mlu": "To'abaita", + "mlv": "Motlav; Mwotlap", + "mlw": "Moloko", + "mlx": "Malfaxal; Naha'ai", + "mlz": "Malaynon", + "mma": "Mama", + "mmb": "Momina", + "mmc": "Michoacán Mazahua", + "mmd": "Maonan", + "mme": "Mae", + "mmf": "Mundat", + "mmg": "North Ambrym", + "mmh": "Mehináku", + "mmi": "Musar", + "mmj": "Majhwar", + "mmk": "Mukha-Dora", + "mml": "Man Met", + "mmm": "Maii", + "mmn": "Mamanwa", + "mmo": "Mangga Buang", + "mmp": "Siawi", + "mmq": "Musak", + "mmr": "Western Xiangxi Miao", + "mmt": "Malalamai", + "mmu": "Mmaala", + "mmv": "Miriti", + "mmw": "Emae", + "mmx": "Madak", + "mmy": "Migaama", + "mmz": "Mabaale", + "mn": "Mongolian", + "mna": "Mbula", + "mnb": "Muna", + "mnc": "Manchu", + "mnd": "Mondé", + "mne": "Naba", + "mnf": "Mundani", + "mng": "Eastern Mnong", + "mnh": "Mono (Democratic Republic of Congo)", + "mni": "Manipuri", + "mnj": "Munji", + "mnk": "Mandinka", + "mnl": "Tiale", + "mnm": "Mapena", + "mnn": "Southern Mnong", + "mno": "Manobo languages", + "mnp": "Min Bei Chinese", + "mnq": "Minriq", + "mnr": "Mono (USA)", + "mns": "Mansi", + "mnu": "Mer", + "mnv": "Rennell-Bellona", + "mnw": "Mon", + "mnx": "Manikion", + "mny": "Manyawa", + "mnz": "Moni", + "moa": "Mwan", + "moc": "Mocoví", + "mod": "Mobilian", + "moe": "Innu; Montagnais", + "mog": "Mongondow", + "moh": "Mohawk", + "moi": "Mboi", + "moj": "Monzombo", + "mok": "Morori", + "mom": "Mangue", + "moo": "Monom", + "mop": "Mopán Maya", + "moq": "Mor (Bomberai Peninsula)", + "mor": "Moro", + "mos": "Mossi", + "mot": "Barí", + "mou": "Mogum", + "mov": "Mohave", + "mow": "Moi (Congo)", + "mox": "Molima", + "moy": "Shekkacho", + "moz": "Mukulu; Gergiko", + "mpa": "Mpoto", + "mpb": "Malak Malak; Mullukmulluk", + "mpc": "Mangarrayi", + "mpd": "Machinere", + "mpe": "Majang", + "mpg": "Marba", + "mph": "Maung", + "mpi": "Mpade", + "mpj": "Martu Wangka; Wangkajunga", + "mpk": "Mbara (Chad)", + "mpl": "Middle Watut", + "mpm": "Yosondúa Mixtec", + "mpn": "Mindiri", + "mpo": "Miu", + "mpp": "Migabac", + "mpq": "Matís", + "mpr": "Vangunu", + "mps": "Dadibi", + "mpt": "Mian", + "mpu": "Makuráp", + "mpv": "Mungkip", + "mpw": "Mapidian", + "mpx": "Misima-Panaeati", + "mpy": "Mapia", + "mpz": "Mpi", + "mqa": "Maba (Indonesia)", + "mqb": "Mbuko", + "mqc": "Mangole", + "mqe": "Matepi", + "mqf": "Momuna", + "mqg": "Kota Bangun Kutai Malay", + "mqh": "Tlazoyaltepec Mixtec", + "mqi": "Mariri", + "mqj": "Mamasa", + "mqk": "Rajah Kabunsuwan Manobo", + "mql": "Mbelime", + "mqm": "South Marquesan", + "mqn": "Moronene", + "mqo": "Modole", + "mqp": "Manipa", + "mqq": "Minokok", + "mqr": "Mander", + "mqs": "West Makian", + "mqt": "Mok", + "mqu": "Mandari", + "mqv": "Mosimo", + "mqw": "Murupi", + "mqx": "Mamuju", + "mqy": "Manggarai", + "mqz": "Pano", + "mr": "Marathi", + "mra": "Mlabri", + "mrb": "Marino", + "mrc": "Maricopa", + "mrd": "Western Magar", + "mre": "Martha's Vineyard Sign Language", + "mrf": "Elseng", + "mrg": "Mising", + "mrh": "Mara Chin", + "mrj": "Western Mari", + "mrk": "Hmwaveke", + "mrl": "Mortlockese", + "mrm": "Merlav; Mwerlap", + "mrn": "Cheke Holo", + "mro": "Mru", + "mrp": "Morouas", + "mrq": "North Marquesan", + "mrr": "Maria (India)", + "mrs": "Maragus", + "mrt": "Marghi Central", + "mru": "Mono (Cameroon)", + "mrv": "Mangareva", + "mrw": "Maranao", + "mrx": "Maremgi; Dineor", + "mry": "Mandaya", + "mrz": "Marind", + "ms": "Malay (macrolanguage)", + "msb": "Masbatenyo", + "msc": "Sankaran Maninka", + "msd": "Yucatec Maya Sign Language", + "mse": "Musey", + "msf": "Mekwei", + "msg": "Moraid", + "msh": "Masikoro Malagasy", + "msi": "Sabah Malay", + "msj": "Ma (Democratic Republic of Congo)", + "msk": "Mansaka", + "msl": "Molof; Poule", + "msm": "Agusan Manobo", + "msn": "Vurës", + "mso": "Mombum", + "msp": "Maritsauá", + "msq": "Caac", + "msr": "Mongolian Sign Language", + "mss": "West Masela", + "msu": "Musom", + "msv": "Maslam", + "msw": "Mansoanka", + "msx": "Moresada", + "msy": "Aruamu", + "msz": "Momare", + "mt": "Maltese", + "mta": "Cotabato Manobo", + "mtb": "Anyin Morofo", + "mtc": "Munit", + "mtd": "Mualang", + "mte": "Mono (Solomon Islands)", + "mtf": "Murik (Papua New Guinea)", + "mtg": "Una", + "mth": "Munggui", + "mti": "Maiwa (Papua New Guinea)", + "mtj": "Moskona", + "mtk": "Mbe'", + "mtl": "Montol", + "mtm": "Mator", + "mtn": "Matagalpa", + "mto": "Totontepec Mixe", + "mtp": "Wichí Lhamtés Nocten", + "mtq": "Muong", + "mtr": "Mewari", + "mts": "Yora", + "mtt": "Mota", + "mtu": "Tututepec Mixtec", + "mtv": "Asaro'o", + "mtw": "Southern Binukidnon", + "mtx": "Tidaá Mixtec", + "mty": "Nabi", + "mua": "Mundang", + "mub": "Mubi", + "muc": "Ajumbu", + "mud": "Mednyj Aleut", + "mue": "Media Lengua", + "mug": "Musgu", + "muh": "Mündü", + "mui": "Musi", + "muj": "Mabire", + "muk": "Mugom", + "mum": "Maiwala", + "mun": "Munda languages", + "muo": "Nyong", + "mup": "Malvi", + "muq": "Eastern Xiangxi Miao", + "mur": "Murle", + "mus": "Creek", + "mut": "Western Muria", + "muu": "Yaaku", + "muv": "Muthuvan", + "mux": "Bo-Ung", + "muy": "Muyang", + "muz": "Mursi", + "mva": "Manam", + "mvb": "Mattole", + "mvd": "Mamboru", + "mve": "Marwari (Pakistan)", + "mvf": "Peripheral Mongolian", + "mvg": "Yucuañe Mixtec", + "mvh": "Mulgi", + "mvi": "Miyako", + "mvk": "Mekmek", + "mvl": "Mbara (Australia)", + "mvn": "Minaveha", + "mvo": "Marovo", + "mvp": "Duri", + "mvq": "Moere", + "mvr": "Marau", + "mvs": "Massep", + "mvt": "Mpotovoro", + "mvu": "Marfa", + "mvv": "Tagal Murut", + "mvw": "Machinga", + "mvx": "Meoswar", + "mvy": "Indus Kohistani", + "mvz": "Mesqan", + "mwa": "Mwatebu", + "mwb": "Juwal", + "mwc": "Are", + "mwe": "Mwera (Chimwera)", + "mwf": "Murrinh-Patha", + "mwg": "Aiklep", + "mwh": "Mouk-Aria", + "mwi": "Labo; Ninde", + "mwk": "Kita Maninkakan", + "mwl": "Mirandese", + "mwm": "Sar", + "mwn": "Nyamwanga", + "mwo": "Central Maewo", + "mwp": "Kala Lagaw Ya", + "mwq": "Mün Chin", + "mwr": "Marwari", + "mws": "Mwimbi-Muthambi", + "mwt": "Moken", + "mwu": "Mittu", + "mwv": "Mentawai", + "mww": "Hmong Daw", + "mwz": "Moingi", + "mxa": "Northwest Oaxaca Mixtec", + "mxb": "Tezoatlán Mixtec", + "mxc": "Manyika", + "mxd": "Modang", + "mxe": "Mele-Fila", + "mxf": "Malgbe", + "mxg": "Mbangala", + "mxh": "Mvuba", + "mxi": "Mozarabic", + "mxj": "Miju-Mishmi; Geman Deng", + "mxk": "Monumbo", + "mxl": "Maxi Gbe", + "mxm": "Meramera", + "mxn": "Moi (Indonesia)", + "mxo": "Mbowe", + "mxp": "Tlahuitoltepec Mixe", + "mxq": "Juquila Mixe", + "mxr": "Murik (Malaysia)", + "mxs": "Huitepec Mixtec", + "mxt": "Jamiltepec Mixtec", + "mxu": "Mada (Cameroon)", + "mxv": "Metlatónoc Mixtec", + "mxw": "Namo", + "mxx": "Mahou; Mawukakan", + "mxy": "Southeastern Nochixtlán Mixtec", + "mxz": "Central Masela", + "my": "Burmese", + "myb": "Mbay", + "myc": "Mayeka", + "mye": "Myene", + "myf": "Bambassi", + "myg": "Manta", + "myh": "Makah", + "myj": "Mangayat", + "myk": "Mamara Senoufo", + "myl": "Moma", + "mym": "Me'en", + "myn": "Mayan languages", + "myo": "Anfillo", + "myp": "Pirahã", + "myr": "Muniche", + "mys": "Mesmes", + "myu": "Mundurukú", + "myv": "Erzya", + "myw": "Muyuw", + "myx": "Masaaba", + "myy": "Macuna", + "myz": "Classical Mandaic", + "mza": "Santa María Zacatepec Mixtec", + "mzb": "Tumzabt", + "mzc": "Madagascar Sign Language", + "mzd": "Malimba", + "mze": "Morawa", + "mzg": "Monastic Sign Language", + "mzh": "Wichí Lhamtés Güisnay", + "mzi": "Ixcatlán Mazatec", + "mzj": "Manya", + "mzk": "Nigeria Mambila", + "mzl": "Mazatlán Mixe", + "mzm": "Mumuye", + "mzn": "Mazanderani", + "mzo": "Matipuhy", + "mzp": "Movima", + "mzq": "Mori Atas", + "mzr": "Marúbo", + "mzs": "Macanese", + "mzt": "Mintil", + "mzu": "Inapang", + "mzv": "Manza", + "mzw": "Deg", + "mzx": "Mawayana", + "mzy": "Mozambican Sign Language", + "mzz": "Maiadomu", + "na": "Nauru", + "naa": "Namla", + "nab": "Southern Nambikuára", + "nac": "Narak", + "nae": "Naka'ela", + "naf": "Nabak", + "nag": "Naga Pidgin", + "nah": "Nahuatl languages", + "nai": "North American Indian languages", + "naj": "Nalu", + "nak": "Nakanai", + "nal": "Nalik", + "nam": "Ngan'gityemerri", + "nan": "Min Nan Chinese", + "nao": "Naaba", + "nap": "Neapolitan", + "naq": "Khoekhoe; Nama (Namibia)", + "nar": "Iguta", + "nas": "Naasioi", + "nat": "Ca̱hungwa̱rya̱; Hungworo", + "naw": "Nawuri", + "nax": "Nakwi", + "nay": "Ngarrindjeri", + "naz": "Coatepec Nahuatl", + "nb": "Norwegian Bokmål", + "nba": "Nyemba", + "nbb": "Ndoe", + "nbc": "Chang Naga", + "nbd": "Ngbinda", + "nbe": "Konyak Naga", + "nbg": "Nagarchal", + "nbh": "Ngamo", + "nbi": "Mao Naga", + "nbj": "Ngarinyman", + "nbk": "Nake", + "nbm": "Ngbaka Ma'bo", + "nbn": "Kuri", + "nbo": "Nkukoli", + "nbp": "Nnam", + "nbq": "Nggem", + "nbr": "Numana", + "nbs": "Namibian Sign Language", + "nbt": "Na", + "nbu": "Rongmei Naga", + "nbv": "Ngamambo", + "nbw": "Southern Ngbandi", + "nby": "Ningera", + "nca": "Iyo", + "ncb": "Central Nicobarese", + "ncc": "Ponam", + "ncd": "Nachering", + "nce": "Yale", + "ncf": "Notsi", + "ncg": "Nisga'a", + "nch": "Central Huasteca Nahuatl", + "nci": "Classical Nahuatl", + "ncj": "Northern Puebla Nahuatl", + "nck": "Na-kara", + "ncl": "Michoacán Nahuatl", + "ncm": "Nambo", + "ncn": "Nauna", + "nco": "Sibe", + "ncq": "Northern Katang", + "ncr": "Ncane", + "ncs": "Nicaraguan Sign Language", + "nct": "Chothe Naga", + "ncu": "Chumburung", + "ncx": "Central Puebla Nahuatl", + "ncz": "Natchez", + "nd": "North Ndebele", + "nda": "Ndasa", + "ndb": "Kenswei Nsei", + "ndc": "Ndau", + "ndd": "Nde-Nsele-Nta", + "ndf": "Nadruvian", + "ndg": "Ndengereko", + "ndh": "Ndali", + "ndi": "Samba Leko", + "ndj": "Ndamba", + "ndk": "Ndaka", + "ndl": "Ndolo", + "ndm": "Ndam", + "ndn": "Ngundi", + "ndp": "Ndo", + "ndq": "Ndombe", + "ndr": "Ndoola", + "nds": "Low German; Low Saxon", + "ndt": "Ndunga", + "ndu": "Dugun", + "ndv": "Ndut", + "ndw": "Ndobo", + "ndx": "Nduga", + "ndy": "Lutos", + "ndz": "Ndogo", + "ne": "Nepali (macrolanguage)", + "nea": "Eastern Ngad'a", + "neb": "Toura (Côte d'Ivoire)", + "nec": "Nedebang", + "ned": "Nde-Gbite", + "nee": "Nêlêmwa-Nixumwak", + "nef": "Nefamese", + "neg": "Negidal", + "neh": "Nyenkha", + "nei": "Neo-Hittite", + "nej": "Neko", + "nek": "Neku", + "nem": "Nemi", + "nen": "Nengone", + "neo": "Ná-Meo", + "neq": "North Central Mixe", + "ner": "Yahadian", + "nes": "Bhoti Kinnauri", + "net": "Nete", + "neu": "Neo", + "nev": "Nyaheun", + "new": "Newari; Nepal Bhasa", + "nex": "Neme", + "ney": "Neyo", + "nez": "Nez Perce", + "nfa": "Dhao", + "nfd": "Ahwai", + "nfl": "Ayiwo; Äiwoo", + "nfr": "Nafaanra", + "nfu": "Mfumte", + "ng": "Ndonga", + "nga": "Ngbaka", + "ngb": "Northern Ngbandi", + "ngc": "Ngombe (Democratic Republic of Congo)", + "ngd": "Ngando (Central African Republic)", + "nge": "Ngemba", + "ngf": "Trans-New Guinea languages", + "ngg": "Ngbaka Manza", + "ngh": "Nǁng", + "ngi": "Ngizim", + "ngj": "Ngie", + "ngk": "Dalabon", + "ngl": "Lomwe", + "ngm": "Ngatik Men's Creole", + "ngn": "Ngwo", + "ngp": "Ngulu", + "ngq": "Ngurimi; Ngoreme", + "ngr": "Engdewu", + "ngs": "Gvoko", + "ngt": "Kriang; Ngeq", + "ngu": "Guerrero Nahuatl", + "ngv": "Nagumi", + "ngw": "Ngwaba", + "ngx": "Nggwahyi", + "ngy": "Tibea", + "ngz": "Ngungwel", + "nha": "Nhanda", + "nhb": "Beng", + "nhc": "Tabasco Nahuatl", + "nhd": "Chiripá; Ava Guaraní", + "nhe": "Eastern Huasteca Nahuatl", + "nhf": "Nhuwala", + "nhg": "Tetelcingo Nahuatl", + "nhh": "Nahari", + "nhi": "Zacatlán-Ahuacatlán-Tepetzintla Nahuatl", + "nhk": "Isthmus-Cosoleacaque Nahuatl", + "nhm": "Morelos Nahuatl", + "nhn": "Central Nahuatl", + "nho": "Takuu", + "nhp": "Isthmus-Pajapan Nahuatl", + "nhq": "Huaxcaleca Nahuatl", + "nhr": "Naro", + "nht": "Ometepec Nahuatl", + "nhu": "Noone", + "nhv": "Temascaltepec Nahuatl", + "nhw": "Western Huasteca Nahuatl", + "nhx": "Isthmus-Mecayapan Nahuatl", + "nhy": "Northern Oaxaca Nahuatl", + "nhz": "Santa María La Alta Nahuatl", + "nia": "Nias", + "nib": "Nakame", + "nic": "Niger-Kordofanian languages", + "nid": "Ngandi", + "nie": "Niellim", + "nif": "Nek", + "nig": "Ngalakgan", + "nih": "Nyiha (Tanzania)", + "nii": "Nii", + "nij": "Ngaju", + "nik": "Southern Nicobarese", + "nil": "Nila", + "nim": "Nilamba", + "nin": "Ninzo", + "nio": "Nganasan", + "niq": "Nandi", + "nir": "Nimboran", + "nis": "Nimi", + "nit": "Southeastern Kolami", + "niu": "Niuean", + "niv": "Gilyak", + "niw": "Nimo", + "nix": "Hema", + "niy": "Ngiti", + "niz": "Ningil", + "nja": "Nzanyi", + "njb": "Nocte Naga", + "njd": "Ndonde Hamba", + "njh": "Lotha Naga", + "nji": "Gudanji", + "njj": "Njen", + "njl": "Njalgulgule", + "njm": "Angami Naga", + "njn": "Liangmai Naga", + "njo": "Ao Naga", + "njr": "Njerep", + "njs": "Nisa", + "njt": "Ndyuka-Trio Pidgin", + "nju": "Ngadjunmaya", + "njx": "Kunyi", + "njy": "Njyem", + "njz": "Nyishi", + "nka": "Nkoya", + "nkb": "Khoibu Naga", + "nkc": "Nkongho", + "nkd": "Koireng", + "nke": "Duke", + "nkf": "Inpui Naga", + "nkg": "Nekgini", + "nkh": "Khezha Naga", + "nki": "Thangal Naga", + "nkj": "Nakai", + "nkk": "Nokuku", + "nkm": "Namat", + "nkn": "Nkangala", + "nko": "Nkonya", + "nkp": "Niuatoputapu", + "nkq": "Nkami", + "nkr": "Nukuoro", + "nks": "North Asmat", + "nkt": "Nyika (Tanzania)", + "nku": "Bouna Kulango", + "nkv": "Nyika (Malawi and Zambia)", + "nkw": "Nkutu", + "nkx": "Nkoroo", + "nkz": "Nkari", + "nl": "Dutch; Flemish", + "nla": "Ngombale", + "nlc": "Nalca", + "nle": "East Nyala", + "nlg": "Gela", + "nli": "Grangali", + "nlj": "Nyali", + "nlk": "Ninia Yali", + "nll": "Nihali", + "nlm": "Mankiyali", + "nlo": "Ngul", + "nlq": "Lao Naga", + "nlu": "Nchumbulu", + "nlv": "Orizaba Nahuatl", + "nlw": "Walangama", + "nlx": "Nahali", + "nly": "Nyamal", + "nlz": "Nalögo", + "nma": "Maram Naga", + "nmb": "Big Nambas; V'ënen Taut", + "nmc": "Ngam", + "nmd": "Ndumu", + "nme": "Mzieme Naga", + "nmf": "Tangkhul Naga (India)", + "nmg": "Kwasio", + "nmh": "Monsang Naga", + "nmi": "Nyam", + "nmj": "Ngombe (Central African Republic)", + "nmk": "Namakura", + "nml": "Ndemli", + "nmm": "Manangba", + "nmn": "ǃXóõ", + "nmo": "Moyon Naga", + "nmp": "Nimanbur", + "nmq": "Nambya", + "nmr": "Nimbari", + "nms": "Letemboi", + "nmt": "Namonuito", + "nmu": "Northeast Maidu", + "nmv": "Ngamini", + "nmw": "Nimoa; Rifao", + "nmx": "Nama (Papua New Guinea)", + "nmy": "Namuyi", + "nmz": "Nawdm", + "nn": "Norwegian Nynorsk", + "nna": "Nyangumarta", + "nnb": "Nande", + "nnc": "Nancere", + "nnd": "West Ambae", + "nne": "Ngandyera", + "nnf": "Ngaing", + "nng": "Maring Naga", + "nnh": "Ngiemboon", + "nni": "North Nuaulu", + "nnj": "Nyangatom", + "nnk": "Nankina", + "nnl": "Northern Rengma Naga", + "nnm": "Namia", + "nnn": "Ngete", + "nnp": "Wancho Naga", + "nnq": "Ngindo", + "nnr": "Narungga", + "nnt": "Nanticoke", + "nnu": "Dwang", + "nnv": "Nugunu (Australia)", + "nnw": "Southern Nuni", + "nny": "Nyangga", + "nnz": "Nda'nda'", + "no": "Norwegian", + "noa": "Woun Meu", + "noc": "Nuk", + "nod": "Northern Thai", + "noe": "Nimadi", + "nof": "Nomane", + "nog": "Nogai", + "noh": "Nomu", + "noi": "Noiri", + "noj": "Nonuya", + "nok": "Nooksack", + "nol": "Nomlaki", + "nom": "Nocamán", + "non": "Old Norse", + "nop": "Numanggang", + "noq": "Ngongo", + "nos": "Eastern Nisu", + "not": "Nomatsiguenga", + "nou": "Ewage-Notu", + "nov": "Novial", + "now": "Nyambo", + "noy": "Noy", + "noz": "Nayi", + "npa": "Nar Phu", + "npb": "Nupbikha", + "npg": "Ponyo-Gongwang Naga", + "nph": "Phom Naga", + "npi": "Nepali (individual language)", + "npl": "Southeastern Puebla Nahuatl", + "npn": "Mondropolon", + "npo": "Pochuri Naga", + "nps": "Nipsan", + "npu": "Puimei Naga", + "npx": "Noipx", + "npy": "Napu", + "nqg": "Southern Nago", + "nqk": "Kura Ede Nago", + "nql": "Ngendelengo", + "nqm": "Ndom", + "nqn": "Nen", + "nqo": "N'Ko; N’Ko", + "nqq": "Kyan-Karyaw Naga", + "nqt": "Nteng", + "nqy": "Akyaung Ari Naga", + "nr": "South Ndebele", + "nra": "Ngom", + "nrb": "Nara", + "nrc": "Noric", + "nre": "Southern Rengma Naga", + "nrf": "Jèrriais; Guernésiais", + "nrg": "Narango", + "nri": "Chokri Naga", + "nrk": "Ngarla", + "nrl": "Ngarluma", + "nrm": "Narom", + "nrn": "Norn", + "nrp": "North Picene", + "nrr": "Norra; Nora", + "nrt": "Northern Kalapuya", + "nru": "Narua", + "nrx": "Ngurmbur", + "nrz": "Lala", + "nsa": "Sangtam Naga", + "nsb": "Lower Nossob", + "nsc": "Nshi", + "nsd": "Southern Nisu", + "nse": "Nsenga", + "nsf": "Northwestern Nisu", + "nsg": "Ngasa", + "nsh": "Ngoshie", + "nsi": "Nigerian Sign Language", + "nsk": "Naskapi", + "nsl": "Norwegian Sign Language", + "nsm": "Sumi Naga", + "nsn": "Nehan", + "nso": "Pedi; Northern Sotho; Sepedi", + "nsp": "Nepalese Sign Language", + "nsq": "Northern Sierra Miwok", + "nsr": "Maritime Sign Language", + "nss": "Nali", + "nst": "Tase Naga", + "nsu": "Sierra Negra Nahuatl", + "nsv": "Southwestern Nisu", + "nsw": "Navut", + "nsx": "Nsongo", + "nsy": "Nasal", + "nsz": "Nisenan", + "ntd": "Northern Tidung", + "nte": "Nathembo", + "ntg": "Ngantangarra", + "nti": "Natioro", + "ntj": "Ngaanyatjarra", + "ntk": "Ikoma-Nata-Isenye", + "ntm": "Nateni", + "nto": "Ntomba", + "ntp": "Northern Tepehuan", + "ntr": "Delo", + "ntu": "Natügu", + "ntw": "Nottoway", + "ntx": "Tangkhul Naga (Myanmar)", + "nty": "Mantsi", + "ntz": "Natanzi", + "nua": "Yuanga", + "nub": "Nubian languages", + "nuc": "Nukuini", + "nud": "Ngala", + "nue": "Ngundu", + "nuf": "Nusu", + "nug": "Nungali", + "nuh": "Ndunda", + "nui": "Ngumbi", + "nuj": "Nyole", + "nuk": "Nuu-chah-nulth; Nuuchahnulth", + "nul": "Nusa Laut", + "num": "Niuafo'ou", + "nun": "Anong", + "nuo": "Nguôn", + "nup": "Nupe-Nupe-Tako", + "nuq": "Nukumanu", + "nur": "Nukuria", + "nus": "Nuer", + "nut": "Nung (Viet Nam)", + "nuu": "Ngbundu", + "nuv": "Northern Nuni", + "nuw": "Nguluwan", + "nux": "Mehek", + "nuy": "Nunggubuyu", + "nuz": "Tlamacazapa Nahuatl", + "nv": "Navajo; Navaho", + "nvh": "Nasarian", + "nvm": "Namiae", + "nvo": "Nyokon", + "nwa": "Nawathinehena", + "nwb": "Nyabwa", + "nwc": "Classical Newari; Classical Nepal Bhasa; Old Newari", + "nwe": "Ngwe", + "nwg": "Ngayawung", + "nwi": "Southwest Tanna", + "nwm": "Nyamusa-Molo", + "nwo": "Nauo", + "nwr": "Nawaru", + "nww": "Ndwewe", + "nwx": "Middle Newar", + "nwy": "Nottoway-Meherrin", + "nxa": "Nauete", + "nxd": "Ngando (Democratic Republic of Congo)", + "nxe": "Nage", + "nxg": "Ngad'a", + "nxi": "Nindi", + "nxk": "Koki Naga", + "nxl": "South Nuaulu", + "nxm": "Numidian", + "nxn": "Ngawun", + "nxo": "Ndambomo", + "nxq": "Naxi", + "nxr": "Ninggerum", + "nxx": "Nafri", + "ny": "Nyanja; Chewa; Chichewa", + "nyb": "Nyangbo", + "nyc": "Nyanga-li", + "nyd": "Nyore; Olunyole", + "nye": "Nyengo", + "nyf": "Giryama; Kigiryama", + "nyg": "Nyindu", + "nyh": "Nyikina", + "nyi": "Ama (Sudan)", + "nyj": "Nyanga", + "nyk": "Nyaneka", + "nyl": "Nyeu", + "nym": "Nyamwezi", + "nyn": "Nyankole", + "nyo": "Nyoro", + "nyp": "Nyang'i", + "nyq": "Nayini", + "nyr": "Nyiha (Malawi)", + "nys": "Nyungar", + "nyt": "Nyawaygi", + "nyu": "Nyungwe", + "nyv": "Nyulnyul", + "nyw": "Nyaw", + "nyx": "Nganyaywana", + "nyy": "Nyakyusa-Ngonde", + "nza": "Tigon Mbembe", + "nzb": "Njebi", + "nzd": "Nzadi", + "nzi": "Nzima", + "nzk": "Nzakara", + "nzm": "Zeme Naga", + "nzs": "New Zealand Sign Language", + "nzu": "Teke-Nzikou", + "nzy": "Nzakambay", + "nzz": "Nanga Dama Dogon", + "oaa": "Orok", + "oac": "Oroch", + "oar": "Old Aramaic (up to 700 BCE); Ancient Aramaic (up to 700 BCE)", + "oav": "Old Avar", + "obi": "Obispeño", + "obk": "Southern Bontok", + "obl": "Oblo", + "obm": "Moabite", + "obo": "Obo Manobo", + "obr": "Old Burmese", + "obt": "Old Breton", + "obu": "Obulom", + "oc": "Occitan (post 1500)", + "oca": "Ocaina", + "och": "Old Chinese", + "ocm": "Old Cham", + "oco": "Old Cornish", + "ocu": "Atzingo Matlatzinca", + "oda": "Odut", + "odk": "Od", + "odt": "Old Dutch", + "odu": "Odual", + "ofo": "Ofo", + "ofs": "Old Frisian", + "ofu": "Efutop", + "ogb": "Ogbia", + "ogc": "Ogbah", + "oge": "Old Georgian", + "ogg": "Ogbogolo", + "ogo": "Khana", + "ogu": "Ogbronuagum", + "oht": "Old Hittite", + "ohu": "Old Hungarian", + "oia": "Oirata", + "oie": "Okolie", + "oin": "Inebu One", + "oj": "Ojibwa", + "ojb": "Northwestern Ojibwa", + "ojc": "Central Ojibwa", + "ojg": "Eastern Ojibwa", + "ojp": "Old Japanese", + "ojs": "Severn Ojibwa", + "ojv": "Ontong Java", + "ojw": "Western Ojibwa", + "oka": "Okanagan", + "okb": "Okobo", + "okc": "Kobo", + "okd": "Okodia", + "oke": "Okpe (Southwestern Edo)", + "okg": "Koko Babangk", + "okh": "Koresh-e Rostam", + "oki": "Okiek", + "okj": "Oko-Juwoi", + "okk": "Kwamtim One", + "okl": "Old Kentish Sign Language", + "okm": "Middle Korean (10th-16th cent.)", + "okn": "Oki-No-Erabu", + "oko": "Old Korean (3rd-9th cent.)", + "okr": "Kirike", + "oks": "Oko-Eni-Osayen", + "oku": "Oku", + "okv": "Orokaiva", + "okx": "Okpe (Northwestern Edo)", + "okz": "Old Khmer", + "ola": "Walungge", + "old": "Mochi", + "ole": "Olekha", + "olk": "Olkol", + "olm": "Oloma", + "olo": "Livvi", + "olr": "Olrat", + "olt": "Old Lithuanian", + "olu": "Kuvale", + "om": "Oromo", + "oma": "Omaha-Ponca", + "omb": "East Ambae", + "omc": "Mochica", + "omg": "Omagua", + "omi": "Omi", + "omk": "Omok", + "oml": "Ombo", + "omn": "Minoan", + "omo": "Utarmbung", + "omp": "Old Manipuri", + "omq": "Oto-Manguean languages", + "omr": "Old Marathi", + "omt": "Omotik", + "omu": "Omurano", + "omv": "Omotic languages", + "omw": "South Tairora", + "omx": "Old Mon", + "omy": "Old Malay", + "ona": "Ona", + "onb": "Lingao", + "one": "Oneida", + "ong": "Olo", + "oni": "Onin", + "onj": "Onjob", + "onk": "Kabore One", + "onn": "Onobasulu", + "ono": "Onondaga", + "onp": "Sartang", + "onr": "Northern One", + "ons": "Ono", + "ont": "Ontenu", + "onu": "Unua", + "onw": "Old Nubian", + "onx": "Onin Based Pidgin", + "ood": "Tohono O'odham", + "oog": "Ong", + "oon": "Önge", + "oor": "Oorlams", + "oos": "Old Ossetic", + "opa": "Okpamheri", + "opk": "Kopkaka", + "opm": "Oksapmin", + "opo": "Opao", + "opt": "Opata", + "opy": "Ofayé", + "or": "Oriya (macrolanguage); Odia (macrolanguage)", + "ora": "Oroha", + "orc": "Orma", + "ore": "Orejón", + "org": "Oring", + "orh": "Oroqen", + "orn": "Orang Kanaq", + "oro": "Orokolo", + "orr": "Oruma", + "ors": "Orang Seletar", + "ort": "Adivasi Oriya", + "oru": "Ormuri", + "orv": "Old Russian", + "orw": "Oro Win", + "orx": "Oro", + "ory": "Odia (individual language); Oriya (individual language)", + "orz": "Ormu", + "os": "Ossetian; Ossetic", + "osa": "Osage", + "osc": "Oscan", + "osi": "Osing", + "osn": "Old Sundanese", + "oso": "Ososo", + "osp": "Old Spanish", + "ost": "Osatu", + "osu": "Southern One", + "osx": "Old Saxon", + "ota": "Ottoman Turkish (1500-1928)", + "otb": "Old Tibetan", + "otd": "Ot Danum", + "ote": "Mezquital Otomi", + "oti": "Oti", + "otk": "Old Turkish", + "otl": "Tilapa Otomi", + "otm": "Eastern Highland Otomi", + "otn": "Tenango Otomi", + "oto": "Otomian languages", + "otq": "Querétaro Otomi", + "otr": "Otoro", + "ots": "Estado de México Otomi", + "ott": "Temoaya Otomi", + "otu": "Otuke", + "otw": "Ottawa", + "otx": "Texcatepec Otomi", + "oty": "Old Tamil", + "otz": "Ixtenco Otomi", + "oua": "Tagargrent", + "oub": "Glio-Oubi", + "oue": "Oune", + "oui": "Old Uighur", + "oum": "Ouma", + "ovd": "Elfdalian; Övdalian", + "owi": "Owiniga", + "owl": "Old Welsh", + "oyb": "Oy", + "oyd": "Oyda", + "oym": "Wayampi", + "oyy": "Oya'oya", + "ozm": "Koonzime", + "pa": "Panjabi; Punjabi", + "paa": "Papuan languages", + "pab": "Parecís", + "pac": "Pacoh", + "pad": "Paumarí", + "pae": "Pagibete", + "paf": "Paranawát", + "pag": "Pangasinan", + "pah": "Tenharim", + "pai": "Pe", + "pak": "Parakanã", + "pal": "Pahlavi", + "pam": "Pampanga; Kapampangan", + "pao": "Northern Paiute", + "pap": "Papiamento", + "paq": "Parya", + "par": "Panamint; Timbisha", + "pas": "Papasena", + "pau": "Palauan", + "pav": "Pakaásnovos", + "paw": "Pawnee", + "pax": "Pankararé", + "pay": "Pech", + "paz": "Pankararú", + "pbb": "Páez", + "pbc": "Patamona", + "pbe": "Mezontla Popoloca", + "pbf": "Coyotepec Popoloca", + "pbg": "Paraujano", + "pbh": "E'ñapa Woromaipu", + "pbi": "Parkwa", + "pbl": "Mak (Nigeria)", + "pbm": "Puebla Mazatec", + "pbn": "Kpasam", + "pbo": "Papel", + "pbp": "Badyara", + "pbr": "Pangwa", + "pbs": "Central Pame", + "pbt": "Southern Pashto", + "pbu": "Northern Pashto", + "pbv": "Pnar", + "pby": "Pyu (Papua New Guinea)", + "pca": "Santa Inés Ahuatempan Popoloca", + "pcb": "Pear", + "pcc": "Bouyei", + "pcd": "Picard", + "pce": "Ruching Palaung", + "pcf": "Paliyan", + "pcg": "Paniya", + "pch": "Pardhan", + "pci": "Duruwa", + "pcj": "Parenga", + "pck": "Paite Chin", + "pcl": "Pardhi", + "pcm": "Nigerian Pidgin", + "pcn": "Piti", + "pcp": "Pacahuara", + "pcw": "Pyapun", + "pda": "Anam", + "pdc": "Pennsylvania German", + "pdi": "Pa Di", + "pdn": "Podena; Fedan", + "pdo": "Padoe", + "pdt": "Plautdietsch", + "pdu": "Kayan", + "pea": "Peranakan Indonesian", + "peb": "Eastern Pomo", + "ped": "Mala (Papua New Guinea)", + "pee": "Taje", + "pef": "Northeastern Pomo", + "peg": "Pengo", + "peh": "Bonan", + "pei": "Chichimeca-Jonaz", + "pej": "Northern Pomo", + "pek": "Penchal", + "pel": "Pekal", + "pem": "Phende", + "peo": "Old Persian (ca. 600-400 B.C.)", + "pep": "Kunja", + "peq": "Southern Pomo", + "pes": "Iranian Persian", + "pev": "Pémono", + "pex": "Petats", + "pey": "Petjo", + "pez": "Eastern Penan", + "pfa": "Pááfang", + "pfe": "Pere", + "pfl": "Pfaelzisch", + "pga": "Sudanese Creole Arabic", + "pgd": "Gāndhārī", + "pgg": "Pangwali", + "pgi": "Pagi", + "pgk": "Rerep", + "pgl": "Primitive Irish", + "pgn": "Paelignian", + "pgs": "Pangseng", + "pgu": "Pagu", + "pgz": "Papua New Guinean Sign Language", + "pha": "Pa-Hng", + "phd": "Phudagi", + "phg": "Phuong", + "phh": "Phukha", + "phi": "Philippine languages", + "phj": "Pahari", + "phk": "Phake", + "phl": "Phalura; Palula", + "phm": "Phimbi", + "phn": "Phoenician", + "pho": "Phunoi", + "phq": "Phana'", + "phr": "Pahari-Potwari", + "pht": "Phu Thai", + "phu": "Phuan", + "phv": "Pahlavani", + "phw": "Phangduwali", + "pi": "Pali", + "pia": "Pima Bajo", + "pib": "Yine", + "pic": "Pinji", + "pid": "Piaroa", + "pie": "Piro", + "pif": "Pingelapese", + "pig": "Pisabo", + "pih": "Pitcairn-Norfolk", + "pij": "Pijao", + "pil": "Yom", + "pim": "Powhatan", + "pin": "Piame", + "pio": "Piapoco", + "pip": "Pero", + "pir": "Piratapuyo", + "pis": "Pijin", + "pit": "Pitta Pitta", + "piu": "Pintupi-Luritja", + "piv": "Pileni; Vaeakau-Taumako", + "piw": "Pimbwe", + "pix": "Piu", + "piy": "Piya-Kwonci", + "piz": "Pije", + "pjt": "Pitjantjatjara", + "pka": "Ardhamāgadhī Prākrit", + "pkb": "Pokomo; Kipfokomo", + "pkc": "Paekche", + "pkg": "Pak-Tong", + "pkh": "Pankhu", + "pkn": "Pakanha", + "pko": "Pökoot", + "pkp": "Pukapuka", + "pkr": "Attapady Kurumba", + "pks": "Pakistan Sign Language", + "pkt": "Maleng", + "pku": "Paku", + "pl": "Polish", + "pla": "Miani", + "plb": "Polonombauk", + "plc": "Central Palawano", + "pld": "Polari", + "ple": "Palu'e", + "plf": "Central Malayo-Polynesian languages", + "plg": "Pilagá", + "plh": "Paulohi", + "plj": "Polci", + "plk": "Kohistani Shina", + "pll": "Shwe Palaung", + "pln": "Palenquero", + "plo": "Oluta Popoluca", + "plq": "Palaic", + "plr": "Palaka Senoufo", + "pls": "San Marcos Tlacoyalco Popoloca; San Marcos Tlalcoyalco Popoloca", + "plt": "Plateau Malagasy", + "plu": "Palikúr", + "plv": "Southwest Palawano", + "plw": "Brooke's Point Palawano", + "ply": "Bolyu", + "plz": "Paluan", + "pma": "Paama", + "pmb": "Pambia", + "pmd": "Pallanganmiddang", + "pme": "Pwaamei", + "pmf": "Pamona", + "pmh": "Māhārāṣṭri Prākrit", + "pmi": "Northern Pumi", + "pmj": "Southern Pumi", + "pmk": "Pamlico", + "pml": "Lingua Franca", + "pmm": "Pomo", + "pmn": "Pam", + "pmo": "Pom", + "pmq": "Northern Pame", + "pmr": "Paynamar", + "pms": "Piemontese", + "pmt": "Tuamotuan", + "pmw": "Plains Miwok", + "pmx": "Poumei Naga", + "pmy": "Papuan Malay", + "pmz": "Southern Pame", + "pna": "Punan Bah-Biau", + "pnb": "Western Panjabi", + "pnc": "Pannei", + "pnd": "Mpinda", + "pne": "Western Penan", + "png": "Pangu; Pongu", + "pnh": "Penrhyn", + "pni": "Aoheng", + "pnj": "Pinjarup", + "pnk": "Paunaka", + "pnl": "Paleni", + "pnm": "Punan Batu 1", + "pnn": "Pinai-Hagahai", + "pno": "Panobo", + "pnp": "Pancana", + "pnq": "Pana (Burkina Faso)", + "pnr": "Panim", + "pns": "Ponosakan", + "pnt": "Pontic", + "pnu": "Jiongnai Bunu", + "pnv": "Pinigura", + "pnw": "Banyjima; Panytyima", + "pnx": "Phong-Kniang", + "pny": "Pinyin", + "pnz": "Pana (Central African Republic)", + "poc": "Poqomam", + "poe": "San Juan Atzingo Popoloca", + "pof": "Poke", + "pog": "Potiguára", + "poh": "Poqomchi'", + "poi": "Highland Popoluca", + "pok": "Pokangá", + "pom": "Southeastern Pomo", + "pon": "Pohnpeian", + "poo": "Central Pomo", + "pop": "Pwapwâ", + "poq": "Texistepec Popoluca", + "pos": "Sayula Popoluca", + "pot": "Potawatomi", + "pov": "Upper Guinea Crioulo", + "pow": "San Felipe Otlaltepec Popoloca", + "pox": "Polabian", + "poy": "Pogolo", + "poz": "Malayo-Polynesian languages", + "ppe": "Papi", + "ppi": "Paipai", + "ppk": "Uma", + "ppl": "Pipil; Nicarao", + "ppm": "Papuma", + "ppn": "Papapana", + "ppo": "Folopa", + "ppp": "Pelende", + "ppq": "Pei", + "pps": "San Luís Temalacayuca Popoloca", + "ppt": "Pare", + "ppu": "Papora", + "pqa": "Pa'a", + "pqe": "Eastern Malayo-Polynesian languages", + "pqm": "Malecite-Passamaquoddy", + "pqw": "Western Malayo-Polynesian languages", + "pra": "Prakrit languages", + "prc": "Parachi", + "prd": "Parsi-Dari", + "pre": "Principense", + "prf": "Paranan", + "prg": "Prussian", + "prh": "Porohanon", + "pri": "Paicî", + "prk": "Parauk", + "prl": "Peruvian Sign Language", + "prm": "Kibiri", + "prn": "Prasuni", + "pro": "Old Provençal (to 1500); Old Occitan (to 1500)", + "prp": "Parsi", + "prq": "Ashéninka Perené", + "prr": "Puri", + "prs": "Dari; Afghan Persian", + "prt": "Phai", + "pru": "Puragi", + "prw": "Parawen", + "prx": "Purik", + "prz": "Providencia Sign Language", + "ps": "Pushto; Pashto", + "psa": "Asue Awyu", + "psc": "Iranian Sign Language; Persian Sign Language", + "psd": "Plains Indian Sign Language", + "pse": "Central Malay", + "psg": "Penang Sign Language", + "psh": "Southwest Pashai; Southwest Pashayi", + "psi": "Southeast Pashai; Southeast Pashayi", + "psl": "Puerto Rican Sign Language", + "psm": "Pauserna", + "psn": "Panasuan", + "pso": "Polish Sign Language", + "psp": "Philippine Sign Language", + "psq": "Pasi", + "psr": "Portuguese Sign Language", + "pss": "Kaulong", + "pst": "Central Pashto", + "psu": "Sauraseni Prākrit", + "psw": "Port Sandwich", + "psy": "Piscataway", + "pt": "Portuguese", + "pta": "Pai Tavytera", + "pth": "Pataxó Hã-Ha-Hãe", + "pti": "Pindiini; Wangkatha", + "ptn": "Patani", + "pto": "Zo'é", + "ptp": "Patep", + "ptq": "Pattapu", + "ptr": "Piamatsina", + "ptt": "Enrekang", + "ptu": "Bambam", + "ptv": "Port Vato", + "ptw": "Pentlatch", + "pty": "Pathiya", + "pua": "Western Highland Purepecha", + "pub": "Purum", + "puc": "Punan Merap", + "pud": "Punan Aput", + "pue": "Puelche", + "puf": "Punan Merah", + "pug": "Phuie", + "pui": "Puinave", + "puj": "Punan Tubu", + "pum": "Puma", + "puo": "Puoc", + "pup": "Pulabu", + "puq": "Puquina", + "pur": "Puruborá", + "put": "Putoh", + "puu": "Punu", + "puw": "Puluwatese", + "pux": "Puare", + "puy": "Purisimeño", + "pwa": "Pawaia", + "pwb": "Panawa", + "pwg": "Gapapaiwa", + "pwi": "Patwin", + "pwm": "Molbog", + "pwn": "Paiwan", + "pwo": "Pwo Western Karen", + "pwr": "Powari", + "pww": "Pwo Northern Karen", + "pxm": "Quetzaltepec Mixe", + "pye": "Pye Krumen", + "pym": "Fyam", + "pyn": "Poyanáwa", + "pys": "Paraguayan Sign Language; Lengua de Señas del Paraguay", + "pyu": "Puyuma", + "pyx": "Pyu (Myanmar)", + "pyy": "Pyen", + "pzh": "Pazeh", + "pzn": "Jejara Naga; Para Naga", + "qu": "Quechua", + "qua": "Quapaw", + "qub": "Huallaga Huánuco Quechua", + "quc": "K'iche'; Quiché", + "qud": "Calderón Highland Quichua", + "quf": "Lambayeque Quechua", + "qug": "Chimborazo Highland Quichua", + "quh": "South Bolivian Quechua", + "qui": "Quileute", + "quk": "Chachapoyas Quechua", + "qul": "North Bolivian Quechua", + "qum": "Sipacapense", + "qun": "Quinault", + "qup": "Southern Pastaza Quechua", + "quq": "Quinqui", + "qur": "Yanahuanca Pasco Quechua", + "qus": "Santiago del Estero Quichua", + "quv": "Sacapulteco", + "quw": "Tena Lowland Quichua", + "qux": "Yauyos Quechua", + "quy": "Ayacucho Quechua", + "quz": "Cusco Quechua", + "qva": "Ambo-Pasco Quechua", + "qvc": "Cajamarca Quechua", + "qve": "Eastern Apurímac Quechua", + "qvh": "Huamalíes-Dos de Mayo Huánuco Quechua", + "qvi": "Imbabura Highland Quichua", + "qvj": "Loja Highland Quichua", + "qvl": "Cajatambo North Lima Quechua", + "qvm": "Margos-Yarowilca-Lauricocha Quechua", + "qvn": "North Junín Quechua", + "qvo": "Napo Lowland Quechua", + "qvp": "Pacaraos Quechua", + "qvs": "San Martín Quechua", + "qvw": "Huaylla Wanca Quechua", + "qvy": "Queyu", + "qvz": "Northern Pastaza Quichua", + "qwa": "Corongo Ancash Quechua", + "qwc": "Classical Quechua", + "qwe": "Quechuan (family)", + "qwh": "Huaylas Ancash Quechua", + "qwm": "Kuman (Russia)", + "qws": "Sihuas Ancash Quechua", + "qwt": "Kwalhioqua-Tlatskanai", + "qxa": "Chiquián Ancash Quechua", + "qxc": "Chincha Quechua", + "qxh": "Panao Huánuco Quechua", + "qxl": "Salasaca Highland Quichua", + "qxn": "Northern Conchucos Ancash Quechua", + "qxo": "Southern Conchucos Ancash Quechua", + "qxp": "Puno Quechua", + "qxq": "Qashqa'i", + "qxr": "Cañar Highland Quichua", + "qxs": "Southern Qiang", + "qxt": "Santa Ana de Tusi Pasco Quechua", + "qxu": "Arequipa-La Unión Quechua", + "qxw": "Jauja Wanca Quechua", + "qya": "Quenya", + "qyp": "Quiripi", + "raa": "Dungmali", + "rab": "Camling", + "rac": "Rasawa", + "rad": "Rade", + "raf": "Western Meohang", + "rag": "Logooli; Lulogooli", + "rah": "Rabha", + "rai": "Ramoaaina", + "raj": "Rajasthani", + "rak": "Tulu-Bohuai", + "ral": "Ralte", + "ram": "Canela", + "ran": "Riantana", + "rao": "Rao", + "rap": "Rapanui", + "raq": "Saam", + "rar": "Rarotongan; Cook Islands Maori", + "ras": "Tegali", + "rat": "Razajerdi", + "rau": "Raute", + "rav": "Sampang", + "raw": "Rawang", + "rax": "Rang", + "ray": "Rapa", + "raz": "Rahambuu", + "rbb": "Rumai Palaung", + "rbk": "Northern Bontok", + "rbl": "Miraya Bikol", + "rbp": "Barababaraba", + "rcf": "Réunion Creole French", + "rdb": "Rudbari", + "rea": "Rerau", + "reb": "Rembong", + "ree": "Rejang Kayan", + "reg": "Kara (Tanzania)", + "rei": "Reli", + "rej": "Rejang", + "rel": "Rendille", + "rem": "Remo", + "ren": "Rengao", + "rer": "Rer Bare", + "res": "Reshe", + "ret": "Retta", + "rey": "Reyesano", + "rga": "Roria", + "rge": "Romano-Greek", + "rgk": "Rangkas", + "rgn": "Romagnol", + "rgr": "Resígaro", + "rgs": "Southern Roglai", + "rgu": "Ringgou", + "rhg": "Rohingya", + "rhp": "Yahang", + "ria": "Riang (India)", + "rib": "Bribri Sign Language", + "rif": "Tarifit", + "ril": "Riang Lang; Riang (Myanmar)", + "rim": "Nyaturu", + "rin": "Nungu", + "rir": "Ribun", + "rit": "Ritharrngu", + "riu": "Riung", + "rjg": "Rajong", + "rji": "Raji", + "rjs": "Rajbanshi", + "rka": "Kraol", + "rkb": "Rikbaktsa", + "rkh": "Rakahanga-Manihiki", + "rki": "Rakhine", + "rkm": "Marka", + "rkt": "Rangpuri; Kamta", + "rkw": "Arakwal", + "rm": "Romansh", + "rma": "Rama", + "rmb": "Rembarrnga", + "rmc": "Carpathian Romani", + "rmd": "Traveller Danish", + "rme": "Angloromani", + "rmf": "Kalo Finnish Romani", + "rmg": "Traveller Norwegian", + "rmh": "Murkim", + "rmi": "Lomavren", + "rmk": "Romkun", + "rml": "Baltic Romani", + "rmm": "Roma", + "rmn": "Balkan Romani", + "rmo": "Sinte Romani", + "rmp": "Rempi", + "rmq": "Caló", + "rms": "Romanian Sign Language", + "rmt": "Domari", + "rmu": "Tavringer Romani", + "rmv": "Romanova", + "rmw": "Welsh Romani", + "rmx": "Romam", + "rmy": "Vlax Romani", + "rmz": "Marma", + "rn": "Rundi", + "rnb": "Brunca Sign Language", + "rnd": "Ruund", + "rng": "Ronga", + "rnl": "Ranglong", + "rnn": "Roon", + "rnp": "Rongpo", + "rnr": "Nari Nari", + "rnw": "Rungwa", + "ro": "Romanian; Moldavian; Moldovan", + "roa": "Romance languages", + "rob": "Tae'", + "roc": "Cacgia Roglai", + "rod": "Rogo", + "roe": "Ronji", + "rof": "Rombo", + "rog": "Northern Roglai", + "rol": "Romblomanon", + "rom": "Romany", + "roo": "Rotokas", + "rop": "Kriol", + "ror": "Rongga", + "rou": "Runga", + "row": "Dela-Oenale", + "rpn": "Repanbitip", + "rpt": "Rapting", + "rri": "Ririo", + "rro": "Waima", + "rrt": "Arritinngithigh", + "rsb": "Romano-Serbian", + "rsk": "Ruthenian; Rusyn", + "rsl": "Russian Sign Language", + "rsm": "Miriwoong Sign Language", + "rsn": "Rwandan Sign Language", + "rtc": "Rungtu Chin", + "rth": "Ratahan", + "rtm": "Rotuman", + "rts": "Yurats", + "rtw": "Rathawi", + "ru": "Russian", + "rub": "Gungu", + "ruc": "Ruuli", + "rue": "Rusyn", + "ruf": "Luguru", + "rug": "Roviana", + "ruh": "Ruga", + "rui": "Rufiji", + "ruk": "Che", + "ruo": "Istro Romanian", + "rup": "Macedo-Romanian; Aromanian; Arumanian", + "ruq": "Megleno Romanian", + "rut": "Rutul", + "ruu": "Lanas Lobu", + "ruy": "Mala (Nigeria)", + "ruz": "Ruma", + "rw": "Kinyarwanda", + "rwa": "Rawo", + "rwk": "Rwa", + "rwl": "Ruwila", + "rwm": "Amba (Uganda)", + "rwo": "Rawa", + "rwr": "Marwari (India)", + "rxd": "Ngardi", + "rxw": "Karuwali; Garuwali", + "ryn": "Northern Amami-Oshima", + "rys": "Yaeyama", + "ryu": "Central Okinawan", + "rzh": "Rāziḥī", + "sa": "Sanskrit", + "saa": "Saba", + "sab": "Buglere", + "sac": "Meskwaki", + "sad": "Sandawe", + "sae": "Sabanê", + "saf": "Safaliba", + "sah": "Yakut", + "sai": "South American Indian languages", + "saj": "Sahu", + "sak": "Sake", + "sal": "Salishan languages", + "sam": "Samaritan Aramaic", + "sao": "Sause", + "saq": "Samburu", + "sar": "Saraveca", + "sas": "Sasak", + "sat": "Santali", + "sau": "Saleman", + "sav": "Saafi-Saafi", + "saw": "Sawi", + "sax": "Sa", + "say": "Saya", + "saz": "Saurashtra", + "sba": "Ngambay", + "sbb": "Simbo", + "sbc": "Kele (Papua New Guinea)", + "sbd": "Southern Samo", + "sbe": "Saliba", + "sbf": "Chabu; Shabo", + "sbg": "Seget", + "sbh": "Sori-Harengan", + "sbi": "Seti", + "sbj": "Surbakhal", + "sbk": "Safwa", + "sbl": "Botolan Sambal", + "sbm": "Sagala", + "sbn": "Sindhi Bhil", + "sbo": "Sabüm", + "sbp": "Sangu (Tanzania)", + "sbq": "Sileibi", + "sbr": "Sembakung Murut", + "sbs": "Subiya", + "sbt": "Kimki", + "sbu": "Stod Bhoti", + "sbv": "Sabine", + "sbw": "Simba", + "sbx": "Seberuang", + "sby": "Soli", + "sbz": "Sara Kaba", + "sc": "Sardinian", + "scb": "Chut", + "sce": "Dongxiang", + "scf": "San Miguel Creole French", + "scg": "Sanggau", + "sch": "Sakachep", + "sci": "Sri Lankan Creole Malay", + "sck": "Sadri", + "scl": "Shina", + "scn": "Sicilian", + "sco": "Scots", + "scp": "Hyolmo; Helambu Sherpa", + "scq": "Sa'och", + "scs": "North Slavey", + "sct": "Southern Katang", + "scu": "Shumcho", + "scv": "Sheni", + "scw": "Sha", + "scx": "Sicel", + "sd": "Sindhi", + "sda": "Toraja-Sa'dan", + "sdb": "Shabak", + "sdc": "Sassarese Sardinian", + "sde": "Surubu", + "sdf": "Sarli", + "sdg": "Savi", + "sdh": "Southern Kurdish", + "sdj": "Suundi", + "sdk": "Sos Kundi", + "sdl": "Saudi Arabian Sign Language", + "sdn": "Gallurese Sardinian", + "sdo": "Bukar-Sadung Bidayuh", + "sdp": "Sherdukpen", + "sdq": "Semandang", + "sdr": "Oraon Sadri", + "sds": "Sened", + "sdt": "Shuadit", + "sdu": "Sarudu", + "sdv": "Eastern Sudanic languages", + "sdx": "Sibu Melanau", + "sdz": "Sallands", + "se": "Northern Sami", + "sea": "Semai", + "seb": "Shempire Senoufo", + "sec": "Sechelt", + "sed": "Sedang", + "see": "Seneca", + "sef": "Cebaara Senoufo", + "seg": "Segeju", + "seh": "Sena", + "sei": "Seri", + "sej": "Sene", + "sek": "Sekani", + "sel": "Selkup", + "sem": "Semitic languages", + "sen": "Nanerigé Sénoufo", + "seo": "Suarmin", + "sep": "Sìcìté Sénoufo", + "seq": "Senara Sénoufo", + "ser": "Serrano", + "ses": "Koyraboro Senni Songhai", + "set": "Sentani", + "seu": "Serui-Laut", + "sev": "Nyarafolo Senoufo", + "sew": "Sewa Bay", + "sey": "Secoya", + "sez": "Senthang Chin", + "sfb": "Langue des signes de Belgique Francophone; French Belgian Sign Language", + "sfe": "Eastern Subanen", + "sfm": "Small Flowery Miao", + "sfs": "South African Sign Language", + "sfw": "Sehwi", + "sg": "Sango", + "sga": "Old Irish (to 900)", + "sgb": "Mag-antsi Ayta", + "sgc": "Kipsigis", + "sgd": "Surigaonon", + "sge": "Segai", + "sgg": "Swiss-German Sign Language", + "sgh": "Shughni", + "sgi": "Suga", + "sgj": "Surgujia", + "sgk": "Sangkong", + "sgm": "Singa", + "sgn": "Sign languages", + "sgp": "Singpho", + "sgr": "Sangisari", + "sgs": "Samogitian", + "sgt": "Brokpake", + "sgu": "Salas", + "sgw": "Sebat Bet Gurage", + "sgx": "Sierra Leone Sign Language", + "sgy": "Sanglechi", + "sgz": "Sursurunga", + "sh": "Serbo-Croatian", + "sha": "Shall-Zwall", + "shb": "Ninam", + "shc": "Sonde", + "shd": "Kundal Shahi", + "she": "Sheko", + "shg": "Shua", + "shh": "Shoshoni", + "shi": "Tachelhit", + "shj": "Shatt", + "shk": "Shilluk", + "shl": "Shendu", + "shm": "Shahrudi", + "shn": "Shan", + "sho": "Shanga", + "shp": "Shipibo-Conibo", + "shq": "Sala", + "shr": "Shi", + "shs": "Shuswap", + "sht": "Shasta", + "shu": "Chadian Arabic", + "shv": "Shehri", + "shw": "Shwai", + "shx": "She", + "shy": "Tachawit", + "shz": "Syenara Senoufo", + "si": "Sinhala; Sinhalese", + "sia": "Akkala Sami", + "sib": "Sebop", + "sid": "Sidamo", + "sie": "Simaa", + "sif": "Siamou", + "sig": "Paasaal", + "sih": "Zire; Sîshëë", + "sii": "Shom Peng", + "sij": "Numbami", + "sik": "Sikiana", + "sil": "Tumulung Sisaala", + "sim": "Mende (Papua New Guinea)", + "sio": "Siouan languages", + "sip": "Sikkimese", + "siq": "Sonia", + "sir": "Siri", + "sis": "Siuslaw", + "sit": "Sino-Tibetan languages", + "siu": "Sinagen", + "siv": "Sumariup", + "siw": "Siwai", + "six": "Sumau", + "siy": "Sivandi", + "siz": "Siwi", + "sja": "Epena", + "sjb": "Sajau Basap", + "sjd": "Kildin Sami", + "sje": "Pite Sami", + "sjg": "Assangori", + "sjk": "Kemi Sami", + "sjl": "Sajalong; Miji", + "sjm": "Mapun", + "sjn": "Sindarin", + "sjo": "Xibe", + "sjp": "Surjapuri", + "sjr": "Siar-Lak", + "sjs": "Senhaja De Srair", + "sjt": "Ter Sami", + "sju": "Ume Sami", + "sjw": "Shawnee", + "sk": "Slovak", + "ska": "Skagit", + "skb": "Saek", + "skc": "Ma Manda", + "skd": "Southern Sierra Miwok", + "ske": "Seke (Vanuatu)", + "skf": "Sakirabiá", + "skg": "Sakalava Malagasy", + "skh": "Sikule", + "ski": "Sika", + "skj": "Seke (Nepal)", + "skm": "Kutong", + "skn": "Kolibugan Subanon", + "sko": "Seko Tengah", + "skp": "Sekapan", + "skq": "Sininkere", + "skr": "Saraiki; Seraiki", + "sks": "Maia", + "skt": "Sakata", + "sku": "Sakao", + "skv": "Skou", + "skw": "Skepi Creole Dutch", + "skx": "Seko Padang", + "sky": "Sikaiana", + "skz": "Sekar", + "sl": "Slovenian", + "sla": "Slavic languages", + "slc": "Sáliba", + "sld": "Sissala", + "sle": "Sholaga", + "slf": "Swiss-Italian Sign Language", + "slg": "Selungai Murut", + "slh": "Southern Puget Sound Salish", + "sli": "Lower Silesian", + "slj": "Salumá", + "sll": "Salt-Yui", + "slm": "Pangutaran Sama", + "sln": "Salinan", + "slp": "Lamaholot", + "slq": "Salchuq", + "slr": "Salar", + "sls": "Singapore Sign Language", + "slt": "Sila", + "slu": "Selaru", + "slw": "Sialum", + "slx": "Salampasu", + "sly": "Selayar", + "slz": "Ma'ya", + "sm": "Samoan", + "sma": "Southern Sami", + "smb": "Simbari", + "smc": "Som", + "smf": "Auwe", + "smg": "Simbali", + "smh": "Samei", + "smi": "Sami languages", + "smj": "Lule Sami", + "smk": "Bolinao", + "sml": "Central Sama", + "smm": "Musasa", + "smn": "Inari Sami", + "smp": "Samaritan", + "smq": "Samo", + "smr": "Simeulue", + "sms": "Skolt Sami", + "smt": "Simte", + "smu": "Somray", + "smv": "Samvedi", + "smw": "Sumbawa", + "smx": "Samba", + "smy": "Semnani", + "smz": "Simeku", + "sn": "Shona", + "snc": "Sinaugoro", + "sne": "Bau Bidayuh", + "snf": "Noon", + "sng": "Sanga (Democratic Republic of Congo)", + "sni": "Sensi", + "snj": "Riverain Sango", + "snk": "Soninke", + "snl": "Sangil", + "snm": "Southern Ma'di", + "snn": "Siona", + "sno": "Snohomish", + "snp": "Siane", + "snq": "Sangu (Gabon)", + "snr": "Sihan", + "sns": "South West Bay; Nahavaq", + "snu": "Senggi; Viid", + "snv": "Sa'ban", + "snw": "Selee", + "snx": "Sam", + "sny": "Saniyo-Hiyewe", + "snz": "Kou", + "so": "Somali", + "soa": "Thai Song", + "sob": "Sobei", + "soc": "So (Democratic Republic of Congo)", + "sod": "Songoora", + "soe": "Songomeno", + "sog": "Sogdian", + "soh": "Aka", + "soi": "Sonha", + "soj": "Soi", + "sok": "Sokoro", + "sol": "Solos", + "son": "Songhai languages", + "soo": "Songo", + "sop": "Songe", + "soq": "Kanasi", + "sor": "Somrai", + "sos": "Seeku", + "sou": "Southern Thai", + "sov": "Sonsorol", + "sow": "Sowanda", + "sox": "Swo", + "soy": "Miyobe", + "soz": "Temi", + "spb": "Sepa (Indonesia)", + "spc": "Sapé", + "spd": "Saep", + "spe": "Sepa (Papua New Guinea)", + "spg": "Sian", + "spi": "Saponi", + "spk": "Sengo", + "spl": "Selepet", + "spm": "Akukem", + "spn": "Sanapaná", + "spo": "Spokane", + "spp": "Supyire Senoufo", + "spq": "Loreto-Ucayali Spanish", + "spr": "Saparua", + "sps": "Saposa", + "spt": "Spiti Bhoti", + "spu": "Sapuan", + "spv": "Sambalpuri; Kosli", + "spx": "South Picene", + "spy": "Sabaot", + "sq": "Albanian", + "sqa": "Shama-Sambuga", + "sqh": "Shau", + "sqj": "Albanian languages", + "sqk": "Albanian Sign Language", + "sqm": "Suma", + "sqn": "Susquehannock", + "sqo": "Sorkhei", + "sqq": "Sou", + "sqr": "Siculo Arabic", + "sqs": "Sri Lankan Sign Language", + "sqt": "Soqotri", + "squ": "Squamish", + "sqx": "Kufr Qassem Sign Language (KQSL)", + "sr": "Serbian", + "sra": "Saruga", + "srb": "Sora", + "src": "Logudorese Sardinian", + "sre": "Sara", + "srf": "Nafi", + "srg": "Sulod", + "srh": "Sarikoli", + "sri": "Siriano", + "srk": "Serudung Murut", + "srl": "Isirawa", + "srm": "Saramaccan", + "srn": "Sranan Tongo", + "sro": "Campidanese Sardinian", + "srq": "Sirionó", + "srr": "Serer", + "srs": "Sarsi", + "srt": "Sauri", + "sru": "Suruí", + "srv": "Southern Sorsoganon", + "srw": "Serua", + "srx": "Sirmauri", + "sry": "Sera", + "srz": "Shahmirzadi", + "ss": "Swati", + "ssa": "Nilo-Saharan languages", + "ssb": "Southern Sama", + "ssc": "Suba-Simbiti", + "ssd": "Siroi", + "sse": "Balangingi; Bangingih Sama", + "ssf": "Thao", + "ssg": "Seimat", + "ssh": "Shihhi Arabic", + "ssi": "Sansi", + "ssj": "Sausi", + "ssk": "Sunam", + "ssl": "Western Sisaala", + "ssm": "Semnam", + "ssn": "Waata", + "sso": "Sissano", + "ssp": "Spanish Sign Language", + "ssq": "So'a", + "ssr": "Swiss-French Sign Language", + "sss": "Sô", + "sst": "Sinasina", + "ssu": "Susuami", + "ssv": "Shark Bay", + "ssx": "Samberigi", + "ssy": "Saho", + "ssz": "Sengseng", + "st": "Southern Sotho", + "sta": "Settla", + "stb": "Northern Subanen", + "std": "Sentinel", + "ste": "Liana-Seti", + "stf": "Seta", + "stg": "Trieng", + "sth": "Shelta", + "sti": "Bulo Stieng", + "stj": "Matya Samo", + "stk": "Arammba", + "stl": "Stellingwerfs", + "stm": "Setaman", + "stn": "Owa", + "sto": "Stoney", + "stp": "Southeastern Tepehuan", + "stq": "Saterfriesisch", + "str": "Straits Salish", + "sts": "Shumashti", + "stt": "Budeh Stieng", + "stu": "Samtao", + "stv": "Silt'e", + "stw": "Satawalese", + "sty": "Siberian Tatar", + "su": "Sundanese", + "sua": "Sulka", + "sub": "Suku", + "suc": "Western Subanon", + "sue": "Suena", + "sug": "Suganga", + "sui": "Suki", + "suj": "Shubi", + "suk": "Sukuma", + "suo": "Bouni", + "suq": "Tirmaga-Chai Suri; Suri", + "sur": "Mwaghavul", + "sus": "Susu", + "sut": "Subtiaba", + "suv": "Puroik", + "suw": "Sumbwa", + "sux": "Sumerian", + "suy": "Suyá", + "suz": "Sunwar", + "sv": "Swedish", + "sva": "Svan", + "svb": "Ulau-Suain", + "svc": "Vincentian Creole English", + "sve": "Serili", + "svk": "Slovakian Sign Language", + "svm": "Slavomolisano", + "svs": "Savosavo", + "svx": "Skalvian", + "sw": "Swahili (macrolanguage)", + "swb": "Maore Comorian", + "swc": "Congo Swahili", + "swf": "Sere", + "swg": "Swabian", + "swh": "Swahili (individual language); Kiswahili", + "swi": "Sui", + "swj": "Sira", + "swk": "Malawi Sena", + "swl": "Swedish Sign Language", + "swm": "Samosa", + "swn": "Sawknah", + "swo": "Shanenawa", + "swp": "Suau", + "swq": "Sharwa", + "swr": "Saweru", + "sws": "Seluwasan", + "swt": "Sawila", + "swu": "Suwawa", + "swv": "Shekhawati", + "sww": "Sowa", + "swx": "Suruahá", + "swy": "Sarua", + "sxb": "Suba", + "sxc": "Sicanian", + "sxe": "Sighu", + "sxg": "Shuhi; Shixing", + "sxk": "Southern Kalapuya", + "sxl": "Selian", + "sxm": "Samre", + "sxn": "Sangir", + "sxo": "Sorothaptic", + "sxr": "Saaroa", + "sxs": "Sasaru", + "sxu": "Upper Saxon", + "sxw": "Saxwe Gbe", + "sya": "Siang", + "syb": "Central Subanen", + "syc": "Classical Syriac", + "syd": "Samoyedic languages", + "syi": "Seki", + "syk": "Sukur", + "syl": "Sylheti", + "sym": "Maya Samo", + "syn": "Senaya", + "syo": "Suoy", + "syr": "Syriac", + "sys": "Sinyar", + "syw": "Kagate", + "syx": "Samay", + "syy": "Al-Sayyid Bedouin Sign Language", + "sza": "Semelai", + "szb": "Ngalum", + "szc": "Semaq Beri", + "szd": "Seru", + "sze": "Seze", + "szg": "Sengele", + "szl": "Silesian", + "szn": "Sula", + "szp": "Suabo", + "szs": "Solomon Islands Sign Language", + "szv": "Isu (Fako Division)", + "szw": "Sawai", + "szy": "Sakizaya", + "ta": "Tamil", + "taa": "Lower Tanana", + "tab": "Tabassaran", + "tac": "Lowland Tarahumara", + "tad": "Tause", + "tae": "Tariana", + "taf": "Tapirapé", + "tag": "Tagoi", + "tai": "Tai languages", + "taj": "Eastern Tamang", + "tak": "Tala", + "tal": "Tal", + "tan": "Tangale", + "tao": "Yami", + "tap": "Taabwa", + "taq": "Tamasheq", + "tar": "Central Tarahumara", + "tas": "Tay Boi", + "tau": "Upper Tanana", + "tav": "Tatuyo", + "taw": "Tai", + "tax": "Tamki", + "tay": "Atayal", + "taz": "Tocho", + "tba": "Aikanã", + "tbc": "Takia", + "tbd": "Kaki Ae", + "tbe": "Tanimbili", + "tbf": "Mandara", + "tbg": "North Tairora", + "tbh": "Dharawal; Thurawal", + "tbi": "Gaam", + "tbj": "Tiang", + "tbk": "Calamian Tagbanwa", + "tbl": "Tboli", + "tbm": "Tagbu", + "tbn": "Barro Negro Tunebo", + "tbo": "Tawala", + "tbp": "Taworta; Diebroud", + "tbq": "Tibeto-Burman languages", + "tbr": "Tumtum", + "tbs": "Tanguat", + "tbt": "Tembo (Kitembo)", + "tbu": "Tubar", + "tbv": "Tobo", + "tbw": "Tagbanwa", + "tbx": "Kapin", + "tby": "Tabaru", + "tbz": "Ditammari", + "tca": "Ticuna", + "tcb": "Tanacross", + "tcc": "Datooga", + "tcd": "Tafi", + "tce": "Southern Tutchone", + "tcf": "Malinaltepec Me'phaa; Malinaltepec Tlapanec", + "tcg": "Tamagario", + "tch": "Turks And Caicos Creole English", + "tci": "Wára", + "tck": "Tchitchege", + "tcl": "Taman (Myanmar)", + "tcm": "Tanahmerah", + "tcn": "Tichurong", + "tco": "Taungyo", + "tcp": "Tawr Chin", + "tcq": "Kaiy", + "tcs": "Torres Strait Creole; Yumplatok", + "tct": "T'en", + "tcu": "Southeastern Tarahumara", + "tcw": "Tecpatlán Totonac", + "tcx": "Toda", + "tcy": "Tulu", + "tcz": "Thado Chin", + "tda": "Tagdal", + "tdb": "Panchpargania", + "tdc": "Emberá-Tadó", + "tdd": "Tai Nüa", + "tde": "Tiranige Diga Dogon", + "tdf": "Talieng", + "tdg": "Western Tamang", + "tdh": "Thulung", + "tdi": "Tomadino", + "tdj": "Tajio", + "tdk": "Tambas", + "tdl": "Sur", + "tdm": "Taruma", + "tdn": "Tondano", + "tdo": "Teme", + "tdq": "Tita", + "tdr": "Todrah", + "tds": "Doutai", + "tdt": "Tetun Dili", + "tdv": "Toro", + "tdx": "Tandroy-Mahafaly Malagasy", + "tdy": "Tadyawan", + "te": "Telugu", + "tea": "Temiar", + "teb": "Tetete", + "tec": "Terik", + "ted": "Tepo Krumen", + "tee": "Huehuetla Tepehua", + "tef": "Teressa", + "teg": "Teke-Tege", + "teh": "Tehuelche", + "tei": "Torricelli", + "tek": "Ibali Teke", + "tem": "Timne", + "ten": "Tama (Colombia)", + "teo": "Teso", + "tep": "Tepecano", + "teq": "Temein", + "ter": "Tereno", + "tes": "Tengger", + "tet": "Tetum", + "teu": "Soo", + "tev": "Teor", + "tew": "Tewa (USA)", + "tex": "Tennet", + "tey": "Tulishi", + "tez": "Tetserret", + "tfi": "Tofin Gbe", + "tfn": "Tanaina", + "tfo": "Tefaro", + "tfr": "Teribe", + "tft": "Ternate", + "tg": "Tajik", + "tga": "Sagalla", + "tgb": "Tobilung", + "tgc": "Tigak", + "tgd": "Ciwogai", + "tge": "Eastern Gorkha Tamang", + "tgf": "Chalikha", + "tgh": "Tobagonian Creole English", + "tgi": "Lawunuia", + "tgj": "Tagin", + "tgn": "Tandaganon", + "tgo": "Sudest", + "tgp": "Tangoa", + "tgq": "Tring", + "tgr": "Tareng", + "tgs": "Nume", + "tgt": "Central Tagbanwa", + "tgu": "Tanggu", + "tgv": "Tingui-Boto", + "tgw": "Tagwana Senoufo", + "tgx": "Tagish", + "tgy": "Togoyo", + "tgz": "Tagalaka", + "th": "Thai", + "thd": "Kuuk Thaayorre; Thayore", + "the": "Chitwania Tharu", + "thf": "Thangmi", + "thh": "Northern Tarahumara", + "thi": "Tai Long", + "thk": "Tharaka; Kitharaka", + "thl": "Dangaura Tharu", + "thm": "Aheu", + "thn": "Thachanadan", + "thp": "Thompson", + "thq": "Kochila Tharu", + "thr": "Rana Tharu", + "ths": "Thakali", + "tht": "Tahltan", + "thu": "Thuri", + "thv": "Tahaggart Tamahaq", + "thy": "Tha", + "thz": "Tayart Tamajeq", + "ti": "Tigrinya", + "tia": "Tidikelt Tamazight", + "tic": "Tira", + "tif": "Tifal", + "tig": "Tigre", + "tih": "Timugon Murut", + "tii": "Tiene", + "tij": "Tilung", + "tik": "Tikar", + "til": "Tillamook", + "tim": "Timbe", + "tin": "Tindi", + "tio": "Teop", + "tip": "Trimuris", + "tiq": "Tiéfo", + "tis": "Masadiit Itneg", + "tit": "Tinigua", + "tiu": "Adasen", + "tiv": "Tiv", + "tiw": "Tiwi", + "tix": "Southern Tiwa", + "tiy": "Tiruray", + "tiz": "Tai Hongjin", + "tja": "Tajuasohn", + "tjg": "Tunjung", + "tji": "Northern Tujia", + "tjj": "Tjungundji", + "tjl": "Tai Laing", + "tjm": "Timucua", + "tjn": "Tonjon", + "tjo": "Temacine Tamazight", + "tjp": "Tjupany", + "tjs": "Southern Tujia", + "tju": "Tjurruru", + "tjw": "Djabwurrung", + "tk": "Turkmen", + "tka": "Truká", + "tkb": "Buksa", + "tkd": "Tukudede", + "tke": "Takwane", + "tkf": "Tukumanféd", + "tkg": "Tesaka Malagasy", + "tkl": "Tokelau", + "tkm": "Takelma", + "tkn": "Toku-No-Shima", + "tkp": "Tikopia", + "tkq": "Tee", + "tkr": "Tsakhur", + "tks": "Takestani", + "tkt": "Kathoriya Tharu", + "tku": "Upper Necaxa Totonac", + "tkv": "Mur Pano", + "tkw": "Teanu", + "tkx": "Tangko", + "tkz": "Takua", + "tl": "Tagalog", + "tla": "Southwestern Tepehuan", + "tlb": "Tobelo", + "tlc": "Yecuatla Totonac", + "tld": "Talaud", + "tlf": "Telefol", + "tlg": "Tofanma", + "tlh": "Klingon; tlhIngan Hol", + "tli": "Tlingit", + "tlj": "Talinga-Bwisi", + "tlk": "Taloki", + "tll": "Tetela", + "tlm": "Tolomako", + "tln": "Talondo'", + "tlo": "Talodi", + "tlp": "Filomena Mata-Coahuitlán Totonac", + "tlq": "Tai Loi", + "tlr": "Talise", + "tls": "Tambotalo", + "tlt": "Sou Nama; Teluti", + "tlu": "Tulehu", + "tlv": "Taliabu", + "tlx": "Khehek", + "tly": "Talysh", + "tma": "Tama (Chad)", + "tmb": "Katbol; Avava", + "tmc": "Tumak", + "tmd": "Haruai", + "tme": "Tremembé", + "tmf": "Toba-Maskoy", + "tmg": "Ternateño", + "tmh": "Tamashek", + "tmi": "Tutuba", + "tmj": "Samarokena", + "tmk": "Northwestern Tamang", + "tml": "Tamnim Citak", + "tmm": "Tai Thanh", + "tmn": "Taman (Indonesia)", + "tmo": "Temoq", + "tmq": "Tumleo", + "tmr": "Jewish Babylonian Aramaic (ca. 200-1200 CE)", + "tms": "Tima", + "tmt": "Tasmate", + "tmu": "Iau", + "tmv": "Tembo (Motembo)", + "tmw": "Temuan", + "tmy": "Tami", + "tmz": "Tamanaku", + "tn": "Tswana", + "tna": "Tacana", + "tnb": "Western Tunebo", + "tnc": "Tanimuca-Retuarã", + "tnd": "Angosturas Tunebo", + "tng": "Tobanga", + "tnh": "Maiani", + "tni": "Tandia", + "tnk": "Kwamera", + "tnl": "Lenakel", + "tnm": "Tabla", + "tnn": "North Tanna", + "tno": "Toromono", + "tnp": "Whitesands", + "tnq": "Taino", + "tnr": "Ménik", + "tns": "Tenis", + "tnt": "Tontemboan", + "tnu": "Tay Khang", + "tnv": "Tangchangya", + "tnw": "Tonsawang", + "tnx": "Tanema", + "tny": "Tongwe", + "tnz": "Ten'edn", + "to": "Tonga (Tonga Islands)", + "tob": "Toba", + "toc": "Coyutla Totonac", + "tod": "Toma", + "tof": "Gizrra", + "tog": "Tonga (Nyasa)", + "toh": "Gitonga", + "toi": "Tonga (Zambia)", + "toj": "Tojolabal", + "tok": "Toki Pona", + "tol": "Tolowa", + "tom": "Tombulu", + "too": "Xicotepec De Juárez Totonac", + "top": "Papantla Totonac", + "toq": "Toposa", + "tor": "Togbo-Vara Banda", + "tos": "Highland Totonac", + "tou": "Tho", + "tov": "Upper Taromi", + "tow": "Jemez", + "tox": "Tobian", + "toy": "Topoiyo", + "toz": "To", + "tpa": "Taupota", + "tpc": "Azoyú Me'phaa; Azoyú Tlapanec", + "tpe": "Tippera", + "tpf": "Tarpia", + "tpg": "Kula", + "tpi": "Tok Pisin", + "tpj": "Tapieté", + "tpk": "Tupinikin", + "tpl": "Tlacoapa Me'phaa; Tlacoapa Tlapanec", + "tpm": "Tampulma", + "tpn": "Tupinambá", + "tpo": "Tai Pao", + "tpp": "Pisaflores Tepehua", + "tpq": "Tukpa", + "tpr": "Tuparí", + "tpt": "Tlachichilco Tepehua", + "tpu": "Tampuan", + "tpv": "Tanapag", + "tpw": "Tupí", + "tpx": "Acatepec Me'phaa; Acatepec Tlapanec", + "tpy": "Trumai", + "tpz": "Tinputz", + "tqb": "Tembé", + "tql": "Lehali", + "tqm": "Turumsa", + "tqn": "Tenino", + "tqo": "Toaripi", + "tqp": "Tomoip", + "tqq": "Tunni", + "tqr": "Torona", + "tqt": "Western Totonac", + "tqu": "Touo", + "tqw": "Tonkawa", + "tr": "Turkish", + "tra": "Tirahi", + "trb": "Terebu", + "trc": "Copala Triqui", + "trd": "Turi", + "tre": "East Tarangan", + "trf": "Trinidadian Creole English", + "trg": "Lishán Didán", + "trh": "Turaka", + "tri": "Trió", + "trj": "Toram", + "trk": "Turkic languages", + "trl": "Traveller Scottish", + "trm": "Tregami", + "trn": "Trinitario", + "tro": "Tarao Naga", + "trp": "Kok Borok", + "trq": "San Martín Itunyoso Triqui", + "trr": "Taushiro", + "trs": "Chicahuaxtla Triqui", + "trt": "Tunggare", + "tru": "Turoyo; Surayt", + "trv": "Sediq; Seediq; Taroko", + "trw": "Torwali", + "trx": "Tringgus-Sembaan Bidayuh", + "try": "Turung", + "trz": "Torá", + "ts": "Tsonga", + "tsa": "Tsaangi", + "tsb": "Tsamai", + "tsc": "Tswa", + "tsd": "Tsakonian", + "tse": "Tunisian Sign Language", + "tsg": "Tausug", + "tsh": "Tsuvan", + "tsi": "Tsimshian", + "tsj": "Tshangla", + "tsk": "Tseku", + "tsl": "Ts'ün-Lao", + "tsm": "Turkish Sign Language; Türk İşaret Dili", + "tsp": "Northern Toussian", + "tsq": "Thai Sign Language", + "tsr": "Akei", + "tss": "Taiwan Sign Language", + "tst": "Tondi Songway Kiini", + "tsu": "Tsou", + "tsv": "Tsogo", + "tsw": "Tsishingini", + "tsx": "Mubami", + "tsy": "Tebul Sign Language", + "tsz": "Purepecha", + "tt": "Tatar", + "tta": "Tutelo", + "ttb": "Gaa", + "ttc": "Tektiteko", + "ttd": "Tauade", + "tte": "Bwanabwana", + "ttf": "Tuotomb", + "ttg": "Tutong", + "tth": "Upper Ta'oih", + "tti": "Tobati", + "ttj": "Tooro", + "ttk": "Totoro", + "ttl": "Totela", + "ttm": "Northern Tutchone", + "ttn": "Towei", + "tto": "Lower Ta'oih", + "ttp": "Tombelala", + "ttq": "Tawallammat Tamajaq", + "ttr": "Tera", + "tts": "Northeastern Thai", + "ttt": "Muslim Tat", + "ttu": "Torau", + "ttv": "Titan", + "ttw": "Long Wat", + "tty": "Sikaritai", + "ttz": "Tsum", + "tua": "Wiarumus", + "tub": "Tübatulabal", + "tuc": "Mutu", + "tud": "Tuxá", + "tue": "Tuyuca", + "tuf": "Central Tunebo", + "tug": "Tunia", + "tuh": "Taulil", + "tui": "Tupuri", + "tuj": "Tugutil", + "tul": "Tula", + "tum": "Tumbuka", + "tun": "Tunica", + "tuo": "Tucano", + "tup": "Tupi languages", + "tuq": "Tedaga", + "tus": "Tuscarora", + "tut": "Altaic languages", + "tuu": "Tututni", + "tuv": "Turkana", + "tuw": "Tungus languages", + "tux": "Tuxináwa", + "tuy": "Tugen", + "tuz": "Turka", + "tva": "Vaghua", + "tvd": "Tsuvadi", + "tve": "Te'un", + "tvk": "Southeast Ambrym", + "tvl": "Tuvalu", + "tvm": "Tela-Masbuar", + "tvn": "Tavoyan", + "tvo": "Tidore", + "tvs": "Taveta", + "tvt": "Tutsa Naga", + "tvu": "Tunen", + "tvw": "Sedoa", + "tvx": "Taivoan", + "tvy": "Timor Pidgin", + "tw": "Twi", + "twa": "Twana", + "twb": "Western Tawbuid", + "twc": "Teshenawa", + "twd": "Twents", + "twe": "Tewa (Indonesia)", + "twf": "Northern Tiwa", + "twg": "Tereweng", + "twh": "Tai Dón", + "twl": "Tawara", + "twm": "Tawang Monpa", + "twn": "Twendi", + "two": "Tswapong", + "twp": "Ere", + "twq": "Tasawaq", + "twr": "Southwestern Tarahumara", + "twt": "Turiwára", + "twu": "Termanu", + "tww": "Tuwari", + "twx": "Tewe", + "twy": "Tawoyan", + "txa": "Tombonuo", + "txb": "Tokharian B", + "txc": "Tsetsaut", + "txe": "Totoli", + "txg": "Tangut", + "txh": "Thracian", + "txi": "Ikpeng", + "txj": "Tarjumo", + "txm": "Tomini", + "txn": "West Tarangan", + "txo": "Toto", + "txq": "Tii", + "txr": "Tartessian", + "txs": "Tonsea", + "txt": "Citak", + "txu": "Kayapó", + "txx": "Tatana", + "txy": "Tanosy Malagasy", + "ty": "Tahitian", + "tya": "Tauya", + "tye": "Kyanga", + "tyh": "O'du", + "tyi": "Teke-Tsaayi", + "tyj": "Tai Do; Tai Yo", + "tyl": "Thu Lao", + "tyn": "Kombai", + "typ": "Thaypan", + "tyr": "Tai Daeng", + "tys": "Tày Sa Pa", + "tyt": "Tày Tac", + "tyu": "Kua", + "tyv": "Tuvinian", + "tyx": "Teke-Tyee", + "tyy": "Tiyaa", + "tyz": "Tày", + "tza": "Tanzanian Sign Language", + "tzh": "Tzeltal", + "tzj": "Tz'utujil", + "tzl": "Talossan", + "tzm": "Central Atlas Tamazight", + "tzn": "Tugun", + "tzo": "Tzotzil", + "tzx": "Tabriak", + "uam": "Uamué", + "uan": "Kuan", + "uar": "Tairuma", + "uba": "Ubang", + "ubi": "Ubi", + "ubl": "Buhi'non Bikol", + "ubr": "Ubir", + "ubu": "Umbu-Ungu", + "uby": "Ubykh", + "uda": "Uda", + "ude": "Udihe", + "udg": "Muduga", + "udi": "Udi", + "udj": "Ujir", + "udl": "Wuzlam", + "udm": "Udmurt", + "udu": "Uduk", + "ues": "Kioko", + "ufi": "Ufim", + "ug": "Uighur; Uyghur", + "uga": "Ugaritic", + "ugb": "Kuku-Ugbanh", + "uge": "Ughele", + "ugh": "Kubachi", + "ugn": "Ugandan Sign Language", + "ugo": "Ugong", + "ugy": "Uruguayan Sign Language", + "uha": "Uhami", + "uhn": "Damal", + "uis": "Uisai", + "uiv": "Iyive", + "uji": "Tanjijili", + "uk": "Ukrainian", + "uka": "Kaburi", + "ukg": "Ukuriguma", + "ukh": "Ukhwejo", + "uki": "Kui (India)", + "ukk": "Muak Sa-aak", + "ukl": "Ukrainian Sign Language", + "ukp": "Ukpe-Bayobiri", + "ukq": "Ukwa", + "uks": "Urubú-Kaapor Sign Language; Kaapor Sign Language", + "uku": "Ukue", + "ukv": "Kuku", + "ukw": "Ukwuani-Aboh-Ndoni", + "uky": "Kuuk-Yak", + "ula": "Fungwa", + "ulb": "Ulukwumi", + "ulc": "Ulch", + "ule": "Lule", + "ulf": "Usku; Afra", + "uli": "Ulithian", + "ulk": "Meriam Mir", + "ull": "Ullatan", + "ulm": "Ulumanda'", + "uln": "Unserdeutsch", + "ulu": "Uma' Lung", + "ulw": "Ulwa", + "uma": "Umatilla", + "umb": "Umbundu", + "umc": "Marrucinian", + "umd": "Umbindhamu", + "umg": "Morrobalama; Umbuygamu", + "umi": "Ukit", + "umm": "Umon", + "umn": "Makyan Naga", + "umo": "Umotína", + "ump": "Umpila", + "umr": "Umbugarla", + "ums": "Pendau", + "umu": "Munsee", + "una": "North Watut", + "und": "Undetermined", + "une": "Uneme", + "ung": "Ngarinyin", + "uni": "Uni", + "unk": "Enawené-Nawé", + "unm": "Unami", + "unn": "Kurnai", + "unr": "Mundari", + "unu": "Unubahe", + "unx": "Munda", + "unz": "Unde Kaili", + "uon": "Kulon", + "upi": "Umeda", + "upv": "Uripiv-Wala-Rano-Atchin", + "ur": "Urdu", + "ura": "Urarina", + "urb": "Urubú-Kaapor; Kaapor", + "urc": "Urningangg", + "ure": "Uru", + "urf": "Uradhi", + "urg": "Urigina", + "urh": "Urhobo", + "uri": "Urim", + "urj": "Uralic languages", + "urk": "Urak Lawoi'", + "url": "Urali", + "urm": "Urapmin", + "urn": "Uruangnirin", + "uro": "Ura (Papua New Guinea)", + "urp": "Uru-Pa-In", + "urr": "Lehalurup; Löyöp", + "urt": "Urat", + "uru": "Urumi", + "urv": "Uruava", + "urw": "Sop", + "urx": "Urimo", + "ury": "Orya", + "urz": "Uru-Eu-Wau-Wau", + "usa": "Usarufa", + "ush": "Ushojo", + "usi": "Usui", + "usk": "Usaghade", + "usp": "Uspanteco", + "uss": "us-Saare", + "usu": "Uya", + "uta": "Otank", + "ute": "Ute-Southern Paiute", + "uth": "ut-Hun", + "utp": "Amba (Solomon Islands)", + "utr": "Etulo", + "utu": "Utu", + "uum": "Urum", + "uur": "Ura (Vanuatu)", + "uuu": "U", + "uve": "West Uvean; Fagauvea", + "uvh": "Uri", + "uvl": "Lote", + "uwa": "Kuku-Uwanh", + "uya": "Doko-Uyanga", + "uz": "Uzbek", + "uzn": "Northern Uzbek", + "uzs": "Southern Uzbek", + "vaa": "Vaagri Booli", + "vae": "Vale", + "vaf": "Vafsi", + "vag": "Vagla", + "vah": "Varhadi-Nagpuri", + "vai": "Vai", + "vaj": "Sekele; Northwestern ǃKung; Vasekele", + "val": "Vehes", + "vam": "Vanimo", + "van": "Valman", + "vao": "Vao", + "vap": "Vaiphei", + "var": "Huarijio", + "vas": "Vasavi", + "vau": "Vanuma", + "vav": "Varli", + "vay": "Wayu", + "vbb": "Southeast Babar", + "vbk": "Southwestern Bontok", + "ve": "Venda", + "vec": "Venetian", + "ved": "Veddah", + "vel": "Veluws", + "vem": "Vemgo-Mabas", + "veo": "Ventureño", + "vep": "Veps", + "ver": "Mom Jango", + "vgr": "Vaghri", + "vgt": "Vlaamse Gebarentaal; Flemish Sign Language", + "vi": "Vietnamese", + "vic": "Virgin Islands Creole English", + "vid": "Vidunda", + "vif": "Vili", + "vig": "Viemo", + "vil": "Vilela", + "vin": "Vinza", + "vis": "Vishavan", + "vit": "Viti", + "viv": "Iduna", + "vka": "Kariyarra", + "vkj": "Kujarge", + "vkk": "Kaur", + "vkl": "Kulisusu", + "vkm": "Kamakan", + "vkn": "Koro Nulu", + "vko": "Kodeoha", + "vkp": "Korlai Creole Portuguese", + "vkt": "Tenggarong Kutai Malay", + "vku": "Kurrama", + "vkz": "Koro Zuba", + "vlp": "Valpei", + "vls": "Vlaams", + "vma": "Martuyhunira", + "vmb": "Barbaram", + "vmc": "Juxtlahuaca Mixtec", + "vmd": "Mudu Koraga", + "vme": "East Masela", + "vmf": "Mainfränkisch", + "vmg": "Lungalunga", + "vmh": "Maraghei", + "vmi": "Miwa", + "vmj": "Ixtayutla Mixtec", + "vmk": "Makhuwa-Shirima", + "vml": "Malgana", + "vmm": "Mitlatongo Mixtec", + "vmp": "Soyaltepec Mazatec", + "vmq": "Soyaltepec Mixtec", + "vmr": "Marenje", + "vms": "Moksela", + "vmu": "Muluridyi", + "vmv": "Valley Maidu", + "vmw": "Makhuwa", + "vmx": "Tamazola Mixtec", + "vmy": "Ayautla Mazatec", + "vmz": "Mazatlán Mazatec", + "vnk": "Vano; Lovono", + "vnm": "Vinmavis; Neve'ei", + "vnp": "Vunapu", + "vo": "Volapük", + "vor": "Voro", + "vot": "Votic", + "vra": "Vera'a", + "vro": "Võro", + "vrs": "Varisi", + "vrt": "Burmbar; Banam Bay", + "vsi": "Moldova Sign Language", + "vsl": "Venezuelan Sign Language", + "vsv": "Valencian Sign Language; Llengua de signes valenciana", + "vto": "Vitou", + "vum": "Vumbu", + "vun": "Vunjo", + "vut": "Vute", + "vwa": "Awa (China)", + "wa": "Walloon", + "waa": "Walla Walla", + "wab": "Wab", + "wac": "Wasco-Wishram", + "wad": "Wamesa; Wondama", + "wae": "Walser", + "waf": "Wakoná", + "wag": "Wa'ema", + "wah": "Watubela", + "wai": "Wares", + "waj": "Waffa", + "wak": "Wakashan languages", + "wal": "Wolaytta; Wolaitta", + "wam": "Wampanoag", + "wan": "Wan", + "wao": "Wappo", + "wap": "Wapishana", + "waq": "Wagiman", + "war": "Waray (Philippines)", + "was": "Washo", + "wat": "Kaninuwa", + "wau": "Waurá", + "wav": "Waka", + "waw": "Waiwai", + "wax": "Watam; Marangis", + "way": "Wayana", + "waz": "Wampur", + "wba": "Warao", + "wbb": "Wabo", + "wbe": "Waritai", + "wbf": "Wara", + "wbh": "Wanda", + "wbi": "Vwanji", + "wbj": "Alagwa", + "wbk": "Waigali", + "wbl": "Wakhi", + "wbm": "Wa", + "wbp": "Warlpiri", + "wbq": "Waddar", + "wbr": "Wagdi", + "wbs": "West Bengal Sign Language", + "wbt": "Warnman", + "wbv": "Wajarri", + "wbw": "Woi", + "wca": "Yanomámi", + "wci": "Waci Gbe", + "wdd": "Wandji", + "wdg": "Wadaginam", + "wdj": "Wadjiginy", + "wdk": "Wadikali", + "wdt": "Wendat", + "wdu": "Wadjigu", + "wdy": "Wadjabangayi", + "wea": "Wewaw", + "wec": "Wè Western", + "wed": "Wedau", + "weg": "Wergaia", + "weh": "Weh", + "wei": "Kiunum", + "wem": "Weme Gbe", + "wen": "Sorbian languages", + "weo": "Wemale", + "wep": "Westphalien", + "wer": "Weri", + "wes": "Cameroon Pidgin", + "wet": "Perai", + "weu": "Rawngtu Chin", + "wew": "Wejewa", + "wfg": "Yafi; Zorop", + "wga": "Wagaya", + "wgb": "Wagawaga", + "wgg": "Wangkangurru; Wangganguru", + "wgi": "Wahgi", + "wgo": "Waigeo", + "wgu": "Wirangu", + "wgy": "Warrgamay", + "wha": "Sou Upaa; Manusela", + "whg": "North Wahgi", + "whk": "Wahau Kenyah", + "whu": "Wahau Kayan", + "wib": "Southern Toussian", + "wic": "Wichita", + "wie": "Wik-Epa", + "wif": "Wik-Keyangan", + "wig": "Wik Ngathan", + "wih": "Wik-Me'anha", + "wii": "Minidien", + "wij": "Wik-Iiyanh", + "wik": "Wikalkan", + "wil": "Wilawila", + "wim": "Wik-Mungkan", + "win": "Ho-Chunk", + "wir": "Wiraféd", + "wiu": "Wiru", + "wiv": "Vitu", + "wiy": "Wiyot", + "wja": "Waja", + "wji": "Warji", + "wka": "Kw'adza", + "wkb": "Kumbaran", + "wkd": "Wakde; Mo", + "wkl": "Kalanadi", + "wkr": "Keerray-Woorroong", + "wku": "Kunduvadi", + "wkw": "Wakawaka", + "wky": "Wangkayutyuru", + "wla": "Walio", + "wlc": "Mwali Comorian", + "wle": "Wolane", + "wlg": "Kunbarlang", + "wlh": "Welaun", + "wli": "Waioli", + "wlk": "Wailaki", + "wll": "Wali (Sudan)", + "wlm": "Middle Welsh", + "wlo": "Wolio", + "wlr": "Wailapa", + "wls": "Wallisian", + "wlu": "Wuliwuli", + "wlv": "Wichí Lhamtés Vejoz", + "wlw": "Walak", + "wlx": "Wali (Ghana)", + "wly": "Waling", + "wma": "Mawa (Nigeria)", + "wmb": "Wambaya", + "wmc": "Wamas", + "wmd": "Mamaindé", + "wme": "Wambule", + "wmg": "Western Minyag", + "wmh": "Waima'a", + "wmi": "Wamin", + "wmm": "Maiwa (Indonesia)", + "wmn": "Waamwang", + "wmo": "Wom (Papua New Guinea)", + "wms": "Wambon", + "wmt": "Walmajarri", + "wmw": "Mwani", + "wmx": "Womo", + "wnb": "Wanambre", + "wnc": "Wantoat", + "wnd": "Wandarang", + "wne": "Waneci", + "wng": "Wanggom", + "wni": "Ndzwani Comorian", + "wnk": "Wanukaka", + "wnm": "Wanggamala", + "wnn": "Wunumara", + "wno": "Wano", + "wnp": "Wanap", + "wnu": "Usan", + "wnw": "Wintu", + "wny": "Wanyi; Waanyi", + "wo": "Wolof", + "woa": "Kuwema; Tyaraity", + "wob": "Wè Northern", + "woc": "Wogeo", + "wod": "Wolani", + "woe": "Woleaian", + "wof": "Gambian Wolof", + "wog": "Wogamusin", + "woi": "Kamang", + "wok": "Longto", + "wom": "Wom (Nigeria)", + "won": "Wongo", + "woo": "Manombai", + "wor": "Woria", + "wos": "Hanga Hundi", + "wow": "Wawonii", + "woy": "Weyto", + "wpc": "Maco", + "wrb": "Waluwarra; Warluwara", + "wrg": "Warungu; Gudjal", + "wrh": "Wiradjuri", + "wri": "Wariyangga", + "wrk": "Garrwa", + "wrl": "Warlmanpa", + "wrm": "Warumungu", + "wrn": "Warnang", + "wro": "Worrorra", + "wrp": "Waropen", + "wrr": "Wardaman", + "wrs": "Waris", + "wru": "Waru", + "wrv": "Waruna", + "wrw": "Gugu Warra", + "wrx": "Wae Rana", + "wry": "Merwari", + "wrz": "Waray (Australia)", + "wsa": "Warembori", + "wsg": "Adilabad Gondi", + "wsi": "Wusi", + "wsk": "Waskia", + "wsr": "Owenia", + "wss": "Wasa", + "wsu": "Wasu", + "wsv": "Wotapuri-Katarqalai", + "wtf": "Watiwa", + "wth": "Wathawurrung", + "wti": "Berta", + "wtk": "Watakataui", + "wtm": "Mewati", + "wtw": "Wotu", + "wua": "Wikngenchera", + "wub": "Wunambal", + "wud": "Wudu", + "wuh": "Wutunhua", + "wul": "Silimo", + "wum": "Wumbvu", + "wun": "Bungu", + "wur": "Wurrugu", + "wut": "Wutung", + "wuu": "Wu Chinese", + "wuv": "Wuvulu-Aua", + "wux": "Wulna", + "wuy": "Wauyai", + "wwa": "Waama", + "wwb": "Wakabunga", + "wwo": "Wetamut; Dorig", + "wwr": "Warrwa", + "www": "Wawa", + "wxa": "Waxianghua", + "wxw": "Wardandi", + "wyb": "Wangaaybuwan-Ngiyambaa", + "wyi": "Woiwurrung", + "wym": "Wymysorys", + "wyn": "Wyandot", + "wyr": "Wayoró", + "wyy": "Western Fijian", + "xaa": "Andalusian Arabic", + "xab": "Sambe", + "xac": "Kachari", + "xad": "Adai", + "xae": "Aequian", + "xag": "Aghwan", + "xai": "Kaimbé", + "xaj": "Ararandewára", + "xak": "Máku", + "xal": "Kalmyk; Oirat", + "xam": "ǀXam", + "xan": "Xamtanga", + "xao": "Khao", + "xap": "Apalachee", + "xaq": "Aquitanian", + "xar": "Karami", + "xas": "Kamas", + "xat": "Katawixi", + "xau": "Kauwera", + "xav": "Xavánte", + "xaw": "Kawaiisu", + "xay": "Kayan Mahakam", + "xbb": "Lower Burdekin", + "xbc": "Bactrian", + "xbd": "Bindal", + "xbe": "Bigambal", + "xbg": "Bunganditj", + "xbi": "Kombio", + "xbj": "Birrpayi", + "xbm": "Middle Breton", + "xbn": "Kenaboi", + "xbo": "Bolgarian", + "xbp": "Bibbulman", + "xbr": "Kambera", + "xbw": "Kambiwá", + "xby": "Batjala; Batyala", + "xcb": "Cumbric", + "xcc": "Camunic", + "xce": "Celtiberian", + "xcg": "Cisalpine Gaulish", + "xch": "Chemakum; Chimakum", + "xcl": "Classical Armenian", + "xcm": "Comecrudo", + "xcn": "Cotoname", + "xco": "Chorasmian", + "xcr": "Carian", + "xct": "Classical Tibetan", + "xcu": "Curonian", + "xcv": "Chuvantsy", + "xcw": "Coahuilteco", + "xcy": "Cayuse", + "xda": "Darkinyung", + "xdc": "Dacian", + "xdk": "Dharuk", + "xdm": "Edomite", + "xdo": "Kwandu", + "xdq": "Kaitag", + "xdy": "Malayic Dayak", + "xeb": "Eblan", + "xed": "Hdi", + "xeg": "ǁXegwi", + "xel": "Kelo", + "xem": "Kembayan", + "xep": "Epi-Olmec", + "xer": "Xerénte", + "xes": "Kesawai", + "xet": "Xetá", + "xeu": "Keoru-Ahia", + "xfa": "Faliscan", + "xga": "Galatian", + "xgb": "Gbin", + "xgd": "Gudang", + "xgf": "Gabrielino-Fernandeño", + "xgg": "Goreng", + "xgi": "Garingbal", + "xgl": "Galindan", + "xgm": "Dharumbal; Guwinmal", + "xgn": "Mongolian languages", + "xgr": "Garza", + "xgu": "Unggumi", + "xgw": "Guwa", + "xh": "Xhosa", + "xha": "Harami", + "xhc": "Hunnic", + "xhd": "Hadrami", + "xhe": "Khetrani", + "xhm": "Middle Khmer (1400 to 1850 CE)", + "xhr": "Hernican", + "xht": "Hattic", + "xhu": "Hurrian", + "xhv": "Khua", + "xib": "Iberian", + "xii": "Xiri", + "xil": "Illyrian", + "xin": "Xinca", + "xir": "Xiriâna", + "xis": "Kisan", + "xiv": "Indus Valley Language", + "xiy": "Xipaya", + "xjb": "Minjungbal", + "xjt": "Jaitmatang", + "xka": "Kalkoti", + "xkb": "Northern Nago", + "xkc": "Kho'ini", + "xkd": "Mendalam Kayan", + "xke": "Kereho", + "xkf": "Khengkha", + "xkg": "Kagoro", + "xki": "Kenyan Sign Language", + "xkj": "Kajali", + "xkk": "Kachok; Kaco'", + "xkl": "Mainstream Kenyah", + "xkn": "Kayan River Kayan", + "xko": "Kiorr", + "xkp": "Kabatei", + "xkq": "Koroni", + "xkr": "Xakriabá", + "xks": "Kumbewaha", + "xkt": "Kantosi", + "xku": "Kaamba", + "xkv": "Kgalagadi", + "xkw": "Kembra", + "xkx": "Karore", + "xky": "Uma' Lasan", + "xkz": "Kurtokha", + "xla": "Kamula", + "xlb": "Loup B", + "xlc": "Lycian", + "xld": "Lydian", + "xle": "Lemnian", + "xlg": "Ligurian (Ancient)", + "xli": "Liburnian", + "xln": "Alanic", + "xlo": "Loup A", + "xlp": "Lepontic", + "xls": "Lusitanian", + "xlu": "Cuneiform Luwian", + "xly": "Elymian", + "xma": "Mushungulu", + "xmb": "Mbonga", + "xmc": "Makhuwa-Marrevone", + "xmd": "Mbudum", + "xme": "Median", + "xmf": "Mingrelian", + "xmg": "Mengaka", + "xmh": "Kugu-Muminh", + "xmj": "Majera", + "xmk": "Ancient Macedonian", + "xml": "Malaysian Sign Language", + "xmm": "Manado Malay", + "xmn": "Manichaean Middle Persian", + "xmo": "Morerebi", + "xmp": "Kuku-Mu'inh", + "xmq": "Kuku-Mangk", + "xmr": "Meroitic", + "xms": "Moroccan Sign Language", + "xmt": "Matbat", + "xmu": "Kamu", + "xmv": "Antankarana Malagasy; Tankarana Malagasy", + "xmw": "Tsimihety Malagasy", + "xmx": "Salawati; Maden", + "xmy": "Mayaguduna", + "xmz": "Mori Bawah", + "xna": "Ancient North Arabian", + "xnb": "Kanakanabu", + "xnd": "Na-Dene languages", + "xng": "Middle Mongolian", + "xnh": "Kuanhua", + "xni": "Ngarigu", + "xnj": "Ngoni (Tanzania)", + "xnk": "Nganakarti", + "xnm": "Ngumbarl", + "xnn": "Northern Kankanay", + "xno": "Anglo-Norman", + "xnq": "Ngoni (Mozambique)", + "xnr": "Kangri", + "xns": "Kanashi", + "xnt": "Narragansett", + "xnu": "Nukunul", + "xny": "Nyiyaparli", + "xnz": "Kenzi; Mattoki", + "xoc": "O'chi'chi'", + "xod": "Kokoda", + "xog": "Soga", + "xoi": "Kominimung", + "xok": "Xokleng", + "xom": "Komo (Sudan)", + "xon": "Konkomba", + "xoo": "Xukurú", + "xop": "Kopar", + "xor": "Korubo", + "xow": "Kowaki", + "xpa": "Pirriya", + "xpb": "Northeastern Tasmanian; Pyemmairrener", + "xpc": "Pecheneg", + "xpd": "Oyster Bay Tasmanian", + "xpe": "Liberia Kpelle", + "xpf": "Southeast Tasmanian; Nuenonne", + "xpg": "Phrygian", + "xph": "North Midlands Tasmanian; Tyerrenoterpanner", + "xpi": "Pictish", + "xpj": "Mpalitjanh", + "xpk": "Kulina Pano", + "xpl": "Port Sorell Tasmanian", + "xpm": "Pumpokol", + "xpn": "Kapinawá", + "xpo": "Pochutec", + "xpp": "Puyo-Paekche", + "xpq": "Mohegan-Pequot", + "xpr": "Parthian", + "xps": "Pisidian", + "xpt": "Punthamara", + "xpu": "Punic", + "xpv": "Northern Tasmanian; Tommeginne", + "xpw": "Northwestern Tasmanian; Peerapper", + "xpx": "Southwestern Tasmanian; Toogee", + "xpy": "Puyo", + "xpz": "Bruny Island Tasmanian", + "xqa": "Karakhanid", + "xqt": "Qatabanian", + "xra": "Krahô", + "xrb": "Eastern Karaboro", + "xrd": "Gundungurra", + "xre": "Kreye", + "xrg": "Minang", + "xri": "Krikati-Timbira", + "xrm": "Armazic", + "xrn": "Arin", + "xrr": "Raetic", + "xrt": "Aranama-Tamique", + "xru": "Marriammu", + "xrw": "Karawa", + "xsa": "Sabaean", + "xsb": "Sambal", + "xsc": "Scythian", + "xsd": "Sidetic", + "xse": "Sempan", + "xsh": "Shamang", + "xsi": "Sio", + "xsj": "Subi", + "xsl": "South Slavey", + "xsm": "Kasem", + "xsn": "Sanga (Nigeria)", + "xso": "Solano", + "xsp": "Silopi", + "xsq": "Makhuwa-Saka", + "xsr": "Sherpa", + "xss": "Assan", + "xsu": "Sanumá", + "xsv": "Sudovian", + "xsy": "Saisiyat", + "xta": "Alcozauca Mixtec", + "xtb": "Chazumba Mixtec", + "xtc": "Katcha-Kadugli-Miri", + "xtd": "Diuxi-Tilantongo Mixtec", + "xte": "Ketengban", + "xtg": "Transalpine Gaulish", + "xth": "Yitha Yitha", + "xti": "Sinicahua Mixtec", + "xtj": "San Juan Teita Mixtec", + "xtl": "Tijaltepec Mixtec", + "xtm": "Magdalena Peñasco Mixtec", + "xtn": "Northern Tlaxiaco Mixtec", + "xto": "Tokharian A", + "xtp": "San Miguel Piedras Mixtec", + "xtq": "Tumshuqese", + "xtr": "Early Tripuri", + "xts": "Sindihui Mixtec", + "xtt": "Tacahua Mixtec", + "xtu": "Cuyamecalco Mixtec", + "xtv": "Thawa", + "xtw": "Tawandê", + "xty": "Yoloxochitl Mixtec", + "xua": "Alu Kurumba", + "xub": "Betta Kurumba", + "xud": "Umiida", + "xug": "Kunigami", + "xuj": "Jennu Kurumba", + "xul": "Ngunawal; Nunukul", + "xum": "Umbrian", + "xun": "Unggaranggu", + "xuo": "Kuo", + "xup": "Upper Umpqua", + "xur": "Urartian", + "xut": "Kuthant", + "xuu": "Kxoe; Khwedam", + "xve": "Venetic", + "xvi": "Kamviri", + "xvn": "Vandalic", + "xvo": "Volscian", + "xvs": "Vestinian", + "xwa": "Kwaza", + "xwc": "Woccon", + "xwd": "Wadi Wadi", + "xwe": "Xwela Gbe", + "xwg": "Kwegu", + "xwj": "Wajuk", + "xwk": "Wangkumara", + "xwl": "Western Xwla Gbe", + "xwo": "Written Oirat", + "xwr": "Kwerba Mamberamo", + "xwt": "Wotjobaluk", + "xww": "Wemba Wemba", + "xxb": "Boro (Ghana)", + "xxk": "Ke'o", + "xxm": "Minkin", + "xxr": "Koropó", + "xxt": "Tambora", + "xya": "Yaygir", + "xyb": "Yandjibara", + "xyj": "Mayi-Yapi", + "xyk": "Mayi-Kulan", + "xyl": "Yalakalore", + "xyt": "Mayi-Thakurti", + "xyy": "Yorta Yorta", + "xzh": "Zhang-Zhung", + "xzm": "Zemgalian", + "xzp": "Ancient Zapotec", + "yaa": "Yaminahua", + "yab": "Yuhup", + "yac": "Pass Valley Yali", + "yad": "Yagua", + "yae": "Pumé", + "yaf": "Yaka (Democratic Republic of Congo)", + "yag": "Yámana", + "yah": "Yazgulyam", + "yai": "Yagnobi", + "yaj": "Banda-Yangere", + "yak": "Yakama", + "yal": "Yalunka", + "yam": "Yamba", + "yan": "Mayangna", + "yao": "Yao", + "yap": "Yapese", + "yaq": "Yaqui", + "yar": "Yabarana", + "yas": "Nugunu (Cameroon)", + "yat": "Yambeta", + "yau": "Yuwana", + "yav": "Yangben", + "yaw": "Yawalapití", + "yax": "Yauma", + "yay": "Agwagwune", + "yaz": "Lokaa", + "yba": "Yala", + "ybb": "Yemba", + "ybe": "West Yugur", + "ybh": "Yakha", + "ybi": "Yamphu", + "ybj": "Hasha", + "ybk": "Bokha", + "ybl": "Yukuben", + "ybm": "Yaben", + "ybn": "Yabaâna", + "ybo": "Yabong", + "ybx": "Yawiyo", + "yby": "Yaweyuha", + "ych": "Chesu", + "ycl": "Lolopo", + "ycn": "Yucuna", + "ycp": "Chepya", + "yda": "Yanda", + "ydd": "Eastern Yiddish", + "yde": "Yangum Dey", + "ydg": "Yidgha", + "ydk": "Yoidik", + "yea": "Ravula", + "yec": "Yeniche", + "yee": "Yimas", + "yei": "Yeni", + "yej": "Yevanic", + "yel": "Yela", + "yer": "Tarok", + "yes": "Nyankpa", + "yet": "Yetfa", + "yeu": "Yerukula", + "yev": "Yapunda", + "yey": "Yeyi", + "yga": "Malyangapa", + "ygi": "Yiningayi", + "ygl": "Yangum Gel", + "ygm": "Yagomi", + "ygp": "Gepo", + "ygr": "Yagaria", + "ygs": "Yolŋu Sign Language", + "ygu": "Yugul", + "ygw": "Yagwoia", + "yha": "Baha Buyang", + "yhd": "Judeo-Iraqi Arabic", + "yhl": "Hlepho Phowa", + "yhs": "Yan-nhaŋu Sign Language", + "yi": "Yiddish", + "yia": "Yinggarda", + "yif": "Ache", + "yig": "Wusa Nasu", + "yih": "Western Yiddish", + "yii": "Yidiny", + "yij": "Yindjibarndi", + "yik": "Dongshanba Lalo", + "yil": "Yindjilandji", + "yim": "Yimchungru Naga", + "yin": "Riang Lai; Yinchia", + "yip": "Pholo", + "yiq": "Miqie", + "yir": "North Awyu", + "yis": "Yis", + "yit": "Eastern Lalu", + "yiu": "Awu", + "yiv": "Northern Nisu", + "yix": "Axi Yi", + "yiz": "Azhe", + "yka": "Yakan", + "ykg": "Northern Yukaghir", + "yki": "Yoke", + "ykk": "Yakaikeke", + "ykl": "Khlula", + "ykm": "Kap", + "ykn": "Kua-nsi", + "yko": "Yasa", + "ykr": "Yekora", + "ykt": "Kathu", + "yku": "Kuamasi", + "yky": "Yakoma", + "yla": "Yaul", + "ylb": "Yaleba", + "yle": "Yele", + "ylg": "Yelogu", + "yli": "Angguruk Yali", + "yll": "Yil", + "ylm": "Limi", + "yln": "Langnian Buyang", + "ylo": "Naluo Yi", + "ylr": "Yalarnnga", + "ylu": "Aribwaung", + "yly": "Nyâlayu; Nyelâyu", + "ymb": "Yambes", + "ymc": "Southern Muji", + "ymd": "Muda", + "yme": "Yameo", + "ymg": "Yamongeri", + "ymh": "Mili", + "ymi": "Moji", + "ymk": "Makwe", + "yml": "Iamalele", + "ymm": "Maay", + "ymn": "Yamna; Sunum", + "ymo": "Yangum Mon", + "ymp": "Yamap", + "ymq": "Qila Muji", + "ymr": "Malasar", + "yms": "Mysian", + "ymx": "Northern Muji", + "ymz": "Muzi", + "yna": "Aluo", + "ynd": "Yandruwandha", + "yne": "Lang'e", + "yng": "Yango", + "ynk": "Naukan Yupik", + "ynl": "Yangulam", + "ynn": "Yana", + "yno": "Yong", + "ynq": "Yendang", + "yns": "Yansi", + "ynu": "Yahuna", + "yo": "Yoruba", + "yob": "Yoba", + "yog": "Yogad", + "yoi": "Yonaguni", + "yok": "Yokuts", + "yol": "Yola", + "yom": "Yombe", + "yon": "Yongkom", + "yot": "Yotti", + "yox": "Yoron", + "yoy": "Yoy", + "ypa": "Phala", + "ypb": "Labo Phowa", + "ypg": "Phola", + "yph": "Phupha", + "ypk": "Yupik languages", + "ypm": "Phuma", + "ypn": "Ani Phowa", + "ypo": "Alo Phola", + "ypp": "Phupa", + "ypz": "Phuza", + "yra": "Yerakai", + "yrb": "Yareba", + "yre": "Yaouré", + "yrk": "Nenets", + "yrl": "Nhengatu", + "yrm": "Yirrk-Mel", + "yrn": "Yerong", + "yro": "Yaroamë", + "yrs": "Yarsun", + "yrw": "Yarawata", + "yry": "Yarluyandi", + "ysc": "Yassic", + "ysd": "Samatao", + "ysg": "Sonaga", + "ysl": "Yugoslavian Sign Language", + "ysm": "Myanmar Sign Language", + "ysn": "Sani", + "yso": "Nisi (China)", + "ysp": "Southern Lolopo", + "ysr": "Sirenik Yupik", + "yss": "Yessan-Mayo", + "ysy": "Sanie", + "yta": "Talu", + "ytl": "Tanglang", + "ytp": "Thopho", + "ytw": "Yout Wam", + "yty": "Yatay", + "yua": "Yucateco; Yucatec Maya", + "yub": "Yugambal", + "yuc": "Yuchi", + "yud": "Judeo-Tripolitanian Arabic", + "yue": "Yue Chinese; Cantonese", + "yuf": "Havasupai-Walapai-Yavapai", + "yug": "Yug", + "yui": "Yurutí", + "yuj": "Karkar-Yuri", + "yuk": "Yuki", + "yul": "Yulu", + "yum": "Quechan", + "yun": "Bena (Nigeria)", + "yup": "Yukpa", + "yuq": "Yuqui", + "yur": "Yurok", + "yut": "Yopno", + "yuw": "Yau (Morobe Province)", + "yux": "Southern Yukaghir", + "yuy": "East Yugur", + "yuz": "Yuracare", + "yva": "Yawa", + "yvt": "Yavitero", + "ywa": "Kalou", + "ywg": "Yinhawangka", + "ywl": "Western Lalu", + "ywn": "Yawanawa", + "ywq": "Wuding-Luquan Yi", + "ywr": "Yawuru", + "ywt": "Xishanba Lalo; Central Lalo", + "ywu": "Wumeng Nasu", + "yww": "Yawarawarga", + "yxa": "Mayawali", + "yxg": "Yagara", + "yxl": "Yardliyawarra", + "yxm": "Yinwum", + "yxu": "Yuyu", + "yxy": "Yabula Yabula", + "yyr": "Yir Yoront", + "yyu": "Yau (Sandaun Province)", + "yyz": "Ayizi", + "yzg": "E'ma Buyang", + "yzk": "Zokhuo", + "za": "Zhuang; Chuang", + "zaa": "Sierra de Juárez Zapotec", + "zab": "Western Tlacolula Valley Zapotec; San Juan Guelavía Zapotec", + "zac": "Ocotlán Zapotec", + "zad": "Cajonos Zapotec", + "zae": "Yareni Zapotec", + "zaf": "Ayoquesco Zapotec", + "zag": "Zaghawa", + "zah": "Zangwal", + "zai": "Isthmus Zapotec", + "zaj": "Zaramo", + "zak": "Zanaki", + "zal": "Zauzou", + "zam": "Miahuatlán Zapotec", + "zao": "Ozolotepec Zapotec", + "zap": "Zapotec", + "zaq": "Aloápam Zapotec", + "zar": "Rincón Zapotec", + "zas": "Santo Domingo Albarradas Zapotec", + "zat": "Tabaa Zapotec", + "zau": "Zangskari", + "zav": "Yatzachi Zapotec", + "zaw": "Mitla Zapotec", + "zax": "Xadani Zapotec", + "zay": "Zayse-Zergulla; Zaysete", + "zaz": "Zari", + "zba": "Balaibalan", + "zbc": "Central Berawan", + "zbe": "East Berawan", + "zbl": "Blissymbols; Bliss; Blissymbolics", + "zbt": "Batui", + "zbu": "Bu (Bauchi State)", + "zbw": "West Berawan", + "zca": "Coatecas Altas Zapotec", + "zcd": "Las Delicias Zapotec", + "zch": "Central Hongshuihe Zhuang", + "zdj": "Ngazidja Comorian", + "zea": "Zeeuws", + "zeg": "Zenag", + "zeh": "Eastern Hongshuihe Zhuang", + "zen": "Zenaga", + "zga": "Kinga", + "zgb": "Guibei Zhuang", + "zgh": "Standard Moroccan Tamazight", + "zgm": "Minz Zhuang", + "zgn": "Guibian Zhuang", + "zgr": "Magori", + "zh": "Chinese", + "zhb": "Zhaba", + "zhd": "Dai Zhuang", + "zhi": "Zhire", + "zhn": "Nong Zhuang", + "zhw": "Zhoa", + "zhx": "Chinese (family)", + "zia": "Zia", + "zib": "Zimbabwe Sign Language", + "zik": "Zimakani", + "zil": "Zialo", + "zim": "Mesme", + "zin": "Zinza", + "ziw": "Zigula", + "ziz": "Zizilivakan", + "zka": "Kaimbulawa", + "zkb": "Koibal", + "zkd": "Kadu", + "zkg": "Koguryo", + "zkh": "Khorezmian", + "zkk": "Karankawa", + "zkn": "Kanan", + "zko": "Kott", + "zkp": "São Paulo Kaingáng", + "zkr": "Zakhring", + "zkt": "Kitan", + "zku": "Kaurna", + "zkv": "Krevinian", + "zkz": "Khazar", + "zla": "Zula", + "zle": "East Slavic languages", + "zlj": "Liujiang Zhuang", + "zlm": "Malay (individual language)", + "zln": "Lianshan Zhuang", + "zlq": "Liuqian Zhuang", + "zls": "South Slavic languages", + "zlw": "West Slavic languages", + "zma": "Manda (Australia)", + "zmb": "Zimba", + "zmc": "Margany", + "zmd": "Maridan", + "zme": "Mangerr", + "zmf": "Mfinu", + "zmg": "Marti Ke", + "zmh": "Makolkol", + "zmi": "Negeri Sembilan Malay", + "zmj": "Maridjabin", + "zmk": "Mandandanyi", + "zml": "Matngala", + "zmm": "Marimanindji; Marramaninyshi", + "zmn": "Mbangwe", + "zmo": "Molo", + "zmp": "Mpuono", + "zmq": "Mituku", + "zmr": "Maranunggu", + "zms": "Mbesa", + "zmt": "Maringarr", + "zmu": "Muruwari", + "zmv": "Mbariman-Gudhinma", + "zmw": "Mbo (Democratic Republic of Congo)", + "zmx": "Bomitaba", + "zmy": "Mariyedi", + "zmz": "Mbandja", + "zna": "Zan Gula", + "znd": "Zande languages", + "zne": "Zande (individual language)", + "zng": "Mang", + "znk": "Manangkari", + "zns": "Mangas", + "zoc": "Copainalá Zoque", + "zoh": "Chimalapa Zoque", + "zom": "Zou", + "zoo": "Asunción Mixtepec Zapotec", + "zoq": "Tabasco Zoque", + "zor": "Rayón Zoque", + "zos": "Francisco León Zoque", + "zpa": "Lachiguiri Zapotec", + "zpb": "Yautepec Zapotec", + "zpc": "Choapan Zapotec", + "zpd": "Southeastern Ixtlán Zapotec", + "zpe": "Petapa Zapotec", + "zpf": "San Pedro Quiatoni Zapotec", + "zpg": "Guevea De Humboldt Zapotec", + "zph": "Totomachapan Zapotec", + "zpi": "Santa María Quiegolani Zapotec", + "zpj": "Quiavicuzas Zapotec", + "zpk": "Tlacolulita Zapotec", + "zpl": "Lachixío Zapotec", + "zpm": "Mixtepec Zapotec", + "zpn": "Santa Inés Yatzechi Zapotec", + "zpo": "Amatlán Zapotec", + "zpp": "El Alto Zapotec", + "zpq": "Zoogocho Zapotec", + "zpr": "Santiago Xanica Zapotec", + "zps": "Coatlán Zapotec", + "zpt": "San Vicente Coatlán Zapotec", + "zpu": "Yalálag Zapotec", + "zpv": "Chichicapan Zapotec", + "zpw": "Zaniza Zapotec", + "zpx": "San Baltazar Loxicha Zapotec", + "zpy": "Mazaltepec Zapotec", + "zpz": "Texmelucan Zapotec", + "zqe": "Qiubei Zhuang", + "zra": "Kara (Korea)", + "zrg": "Mirgan", + "zrn": "Zerenkel", + "zro": "Záparo", + "zrp": "Zarphatic", + "zrs": "Mairasi", + "zsa": "Sarasira", + "zsk": "Kaskean", + "zsl": "Zambian Sign Language", + "zsm": "Standard Malay", + "zsr": "Southern Rincon Zapotec", + "zsu": "Sukurum", + "zte": "Elotepec Zapotec", + "ztg": "Xanaguía Zapotec", + "ztl": "Lapaguía-Guivini Zapotec", + "ztm": "San Agustín Mixtepec Zapotec", + "ztn": "Santa Catarina Albarradas Zapotec", + "ztp": "Loxicha Zapotec", + "ztq": "Quioquitani-Quierí Zapotec", + "zts": "Tilquiapan Zapotec", + "ztt": "Tejalapan Zapotec", + "ztu": "Güilá Zapotec", + "ztx": "Zaachila Zapotec", + "zty": "Yatee Zapotec", + "zu": "Zulu", + "zua": "Zeem", + "zuh": "Tokano", + "zum": "Kumzari", + "zun": "Zuni", + "zuy": "Zumaya", + "zwa": "Zay", + "zyb": "Yongbei Zhuang", + "zyg": "Yang Zhuang", + "zyj": "Youjiang Zhuang", + "zyn": "Yongnan Zhuang", + "zyp": "Zyphe Chin", + "zza": "Zaza; Dimili; Dimli (macrolanguage); Kirdki; Kirmanjki (macrolanguage); Zazaki", + "zzj": "Zuojiang Zhuang" +} \ No newline at end of file diff --git a/testbed/huggingface__datasets/src/datasets/utils/resources/multilingualities.json b/testbed/huggingface__datasets/src/datasets/utils/resources/multilingualities.json new file mode 100644 index 0000000000000000000000000000000000000000..a35c79f03dfcf7c8a116b7fc8ee1b383ab5022fa --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/resources/multilingualities.json @@ -0,0 +1,6 @@ +{ + "monolingual": "contains a single language", + "multilingual": "contains multiple languages", + "translation": "contains translated or aligned text", + "other": "other type of language distribution" +} diff --git a/testbed/huggingface__datasets/src/datasets/utils/resources/readme_structure.yaml b/testbed/huggingface__datasets/src/datasets/utils/resources/readme_structure.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5b781e11e8258a446874ebf96104f642d0c190cf --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/resources/readme_structure.yaml @@ -0,0 +1,116 @@ +name: "" # Filename comes here +allow_empty: false +allow_empty_text: true +subsections: + - name: "Dataset Card for X" # First-level markdown heading + allow_empty: false + allow_empty_text: true + subsections: + - name: "Table of Contents" + allow_empty: false + allow_empty_text: false + subsections: null # meaning it should not be checked. + - name: "Dataset Description" + allow_empty: false + allow_empty_text: false + subsections: + - name: "Dataset Summary" + allow_empty: false + allow_empty_text: false + subsections: null + - name: "Supported Tasks and Leaderboards" + allow_empty: true + allow_empty_text: true + subsections: null + - name: Languages + allow_empty: true + allow_empty_text: true + subsections: null + - name: "Dataset Structure" + allow_empty: false + allow_empty_text: true + subsections: + - name: "Data Instances" + allow_empty: false + allow_empty_text: true + subsections: null + - name: "Data Fields" + allow_empty: false + allow_empty_text: true + subsections: null + - name: "Data Splits" + allow_empty: false + allow_empty_text: true + subsections: null + - name: "Dataset Creation" + allow_empty: false + allow_empty_text: true + subsections: + - name: "Curation Rationale" + allow_empty: true + allow_empty_text: true + subsections: null + - name: "Source Data" + allow_empty: false + allow_empty_text: true + subsections: + - name: "Initial Data Collection and Normalization" + allow_empty: true + allow_empty_text: true + subsections: null + - name: "Who are the source language producers?" + allow_empty: true + allow_empty_text: true + subsections: null + - name: "Annotations" + allow_empty: false + allow_empty_text: true + subsections: + - name: "Annotation process" + allow_empty: true + allow_empty_text: true + subsections: null + - name: "Who are the annotators?" + allow_empty: true + allow_empty_text: true + subsections: null + - name: "Personal and Sensitive Information" + allow_empty: true + allow_empty_text: true + subsections: null + - name: "Considerations for Using the Data" + allow_empty: true + allow_empty_text: true + subsections: + - name: "Social Impact of Dataset" + allow_empty: true + allow_empty_text: true + subsections: null + - name: "Discussion of Biases" + allow_empty: true + allow_empty_text: true + subsections: null + - name: "Other Known Limitations" + allow_empty: true + allow_empty_text: true + subsections: null + - name: "Additional Information" + allow_empty: true + allow_empty_text: true + subsections: + - name: "Dataset Curators" + allow_empty: true + allow_empty_text: true + subsections: null + - name: "Licensing Information" + allow_empty: true + allow_empty_text: true + subsections: null + - name: "Citation Information" + allow_empty: false + allow_empty_text: true + subsections: null + - name: "Contributions" + allow_empty: false + allow_empty_text: false + subsections: null diff --git a/testbed/huggingface__datasets/src/datasets/utils/resources/size_categories.json b/testbed/huggingface__datasets/src/datasets/utils/resources/size_categories.json new file mode 100644 index 0000000000000000000000000000000000000000..983ce0c10dbb2e2245f90ae47e9de4c1025d5bb1 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/resources/size_categories.json @@ -0,0 +1,14 @@ +[ + "unknown", + "n<1K", + "1K1T" +] diff --git a/testbed/huggingface__datasets/src/datasets/utils/sharding.py b/testbed/huggingface__datasets/src/datasets/utils/sharding.py new file mode 100644 index 0000000000000000000000000000000000000000..7ee3133b80ea927a076eebc7eedc2e7b25013ffa --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/sharding.py @@ -0,0 +1,96 @@ +from typing import List + +import numpy as np + + +def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int: + """Return the number of possible shards according to the input gen_kwargs""" + # Having lists of different sizes makes sharding ambigious, raise an error in this case + # until we decide how to define sharding without ambiguity for users + lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)} + if len(set(lists_lengths.values())) > 1: + raise RuntimeError( + ( + "Sharding is ambiguous for this dataset: " + + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + + "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items()) + + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." + ) + ) + max_length = max(lists_lengths.values(), default=0) + return max(1, max_length) + + +def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]: + """ + Get the range of shard indices per job. + If num_shards>> _distribute_shards(2, max_num_jobs=4) + [range(0, 1), range(1, 2)] + >>> _distribute_shards(10, max_num_jobs=3) + [range(0, 4), range(4, 7), range(7, 10)] + ``` + """ + shards_indices_per_group = [] + for group_idx in range(max_num_jobs): + num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) + if num_shards_to_add == 0: + break + start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 + shard_indices = range(start, start + num_shards_to_add) + shards_indices_per_group.append(shard_indices) + return shards_indices_per_group + + +def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> List[dict]: + """Split the gen_kwargs into `max_num_job` gen_kwargs""" + # Having lists of different sizes makes sharding ambigious, raise an error in this case + num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs) + if num_shards == 1: + return [dict(gen_kwargs)] + else: + shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs) + return [ + { + key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] + if isinstance(value, list) + else value + for key, value in gen_kwargs.items() + } + for group_idx in range(len(shard_indices_per_group)) + ] + + +def _merge_gen_kwargs(gen_kwargs_list: List[dict]) -> dict: + return { + key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] + if isinstance(gen_kwargs_list[0][key], list) + else gen_kwargs_list[0][key] + for key in gen_kwargs_list[0] + } + + +def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict: + """Return a shuffled copy of the input gen_kwargs""" + # We must shuffle all the lists, and lists of the same size must have the same shuffling. + # This way entangled lists of (shard, shard_metadata) are still in the right order. + + # First, let's generate the shuffled indices per list size + list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)} + indices_per_size = {} + for size in list_sizes: + indices_per_size[size] = list(range(size)) + rng.shuffle(indices_per_size[size]) + # Now let's copy the gen_kwargs and shuffle the lists based on their sizes + shuffled_kwargs = dict(gen_kwargs) + for key, value in shuffled_kwargs.items(): + if isinstance(value, list): + shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]] + return shuffled_kwargs diff --git a/testbed/huggingface__datasets/src/datasets/utils/stratify.py b/testbed/huggingface__datasets/src/datasets/utils/stratify.py new file mode 100644 index 0000000000000000000000000000000000000000..d0967aa1abb790f741af5ff920c67e615d1b01da --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/stratify.py @@ -0,0 +1,107 @@ +import numpy as np + + +def approximate_mode(class_counts, n_draws, rng): + """Computes approximate mode of multivariate hypergeometric. + This is an approximation to the mode of the multivariate + hypergeometric given by class_counts and n_draws. + It shouldn't be off by more than one. + It is the mostly likely outcome of drawing n_draws many + samples from the population given by class_counts. + Args + ---------- + class_counts : ndarray of int + Population per class. + n_draws : int + Number of draws (samples to draw) from the overall population. + rng : random state + Used to break ties. + Returns + ------- + sampled_classes : ndarray of int + Number of samples drawn from each class. + np.sum(sampled_classes) == n_draws + + """ + # this computes a bad approximation to the mode of the + # multivariate hypergeometric given by class_counts and n_draws + continuous = n_draws * class_counts / class_counts.sum() + # floored means we don't overshoot n_samples, but probably undershoot + floored = np.floor(continuous) + # we add samples according to how much "left over" probability + # they had, until we arrive at n_samples + need_to_add = int(n_draws - floored.sum()) + if need_to_add > 0: + remainder = continuous - floored + values = np.sort(np.unique(remainder))[::-1] + # add according to remainder, but break ties + # randomly to avoid biases + for value in values: + (inds,) = np.where(remainder == value) + # if we need_to_add less than what's in inds + # we draw randomly from them. + # if we need to add more, we add them all and + # go to the next value + add_now = min(len(inds), need_to_add) + inds = rng.choice(inds, size=add_now, replace=False) + floored[inds] += 1 + need_to_add -= add_now + if need_to_add == 0: + break + return floored.astype(np.int64) + + +def stratified_shuffle_split_generate_indices(y, n_train, n_test, rng, n_splits=10): + """ + + Provides train/test indices to split data in train/test sets. + It's reference is taken from StratifiedShuffleSplit implementation + of scikit-learn library. + + Args + ---------- + + n_train : int, + represents the absolute number of train samples. + + n_test : int, + represents the absolute number of test samples. + + random_state : int or RandomState instance, default=None + Controls the randomness of the training and testing indices produced. + Pass an int for reproducible output across multiple function calls. + + n_splits : int, default=10 + Number of re-shuffling & splitting iterations. + """ + classes, y_indices = np.unique(y, return_inverse=True) + n_classes = classes.shape[0] + class_counts = np.bincount(y_indices) + if np.min(class_counts) < 2: + raise ValueError("Minimum class count error") + if n_train < n_classes: + raise ValueError( + "The train_size = %d should be greater or " "equal to the number of classes = %d" % (n_train, n_classes) + ) + if n_test < n_classes: + raise ValueError( + "The test_size = %d should be greater or " "equal to the number of classes = %d" % (n_test, n_classes) + ) + class_indices = np.split(np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1]) + for _ in range(n_splits): + n_i = approximate_mode(class_counts, n_train, rng) + class_counts_remaining = class_counts - n_i + t_i = approximate_mode(class_counts_remaining, n_test, rng) + + train = [] + test = [] + + for i in range(n_classes): + permutation = rng.permutation(class_counts[i]) + perm_indices_class_i = class_indices[i].take(permutation, mode="clip") + train.extend(perm_indices_class_i[: n_i[i]]) + test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]]) + train = rng.permutation(train) + test = rng.permutation(test) + + yield train, test diff --git a/testbed/huggingface__datasets/src/datasets/utils/tf_utils.py b/testbed/huggingface__datasets/src/datasets/utils/tf_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b69f5c85b2c38bb47506a4b2fedb5a69e1d37c00 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/tf_utils.py @@ -0,0 +1,582 @@ +# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""TF-specific utils import.""" + +import os +import warnings +from functools import partial +from math import ceil +from uuid import uuid4 + +import numpy as np +import pyarrow as pa +from multiprocess import get_context + + +try: + from multiprocess.shared_memory import SharedMemory +except ImportError: + SharedMemory = None # Version checks should prevent this being called on older Python versions + +from .. import config + + +def minimal_tf_collate_fn(features): + if isinstance(features, dict): # case batch_size=None: nothing to collate + return features + elif config.TF_AVAILABLE: + import tensorflow as tf + else: + raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") + + first = features[0] + batch = {} + for k, v in first.items(): + if isinstance(v, np.ndarray): + batch[k] = np.stack([f[k] for f in features]) + elif isinstance(v, tf.Tensor): + batch[k] = tf.stack([f[k] for f in features]) + else: + batch[k] = np.array([f[k] for f in features]) + return batch + + +def minimal_tf_collate_fn_with_renaming(features): + batch = minimal_tf_collate_fn(features) + if "label" in batch: + batch["labels"] = batch["label"] + del batch["label"] + return batch + + +def is_numeric_pa_type(pa_type): + if pa.types.is_list(pa_type): + return is_numeric_pa_type(pa_type.value_type) + return pa.types.is_integer(pa_type) or pa.types.is_floating(pa_type) or pa.types.is_decimal(pa_type) + + +def is_numeric_feature(feature): + from .. import ClassLabel, Sequence, Value + from ..features.features import _ArrayXD + + if isinstance(feature, Sequence): + return is_numeric_feature(feature.feature) + elif isinstance(feature, list): + return is_numeric_feature(feature[0]) + elif isinstance(feature, _ArrayXD): + return is_numeric_pa_type(feature().storage_dtype) + elif isinstance(feature, Value): + return is_numeric_pa_type(feature()) + elif isinstance(feature, ClassLabel): + return True + else: + return False + + +def np_get_batch( + indices, dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, return_dict=False +): + if not isinstance(indices, np.ndarray): + indices = indices.numpy() + + is_batched = True + # Optimization - if we're loading a sequential batch, do it with slicing instead of a list of indices + if isinstance(indices, np.integer): + batch = dataset[indices.item()] + is_batched = False + elif np.all(np.diff(indices) == 1): + batch = dataset[indices[0] : indices[-1] + 1] + elif isinstance(indices, np.ndarray): + batch = dataset[indices] + else: + raise RuntimeError("Unexpected type for indices: {}".format(type(indices))) + + if cols_to_retain is not None: + batch = { + key: value + for key, value in batch.items() + if key in cols_to_retain or key in ("label", "label_ids", "labels") + } + + if is_batched: + actual_size = len(list(batch.values())[0]) # Get the length of one of the arrays, assume all same + # Our collators expect a list of dicts, not a dict of lists/arrays, so we invert + batch = [{key: value[i] for key, value in batch.items()} for i in range(actual_size)] + batch = collate_fn(batch, **collate_fn_args) + + if return_dict: + out_batch = {} + for col, cast_dtype in columns_to_np_types.items(): + # In case the collate_fn returns something strange + array = np.array(batch[col]) + array = array.astype(cast_dtype) + out_batch[col] = array + else: + out_batch = [] + for col, cast_dtype in columns_to_np_types.items(): + # In case the collate_fn returns something strange + array = np.array(batch[col]) + array = array.astype(cast_dtype) + out_batch.append(array) + return out_batch + + +def dataset_to_tf( + dataset, + cols_to_retain, + collate_fn, + collate_fn_args, + columns_to_np_types, + output_signature, + shuffle, + batch_size, + drop_remainder, +): + """Create a tf.data.Dataset from the underlying Dataset. This is a single-process method - the multiprocess + equivalent is multiprocess_dataset_to_tf. + + Args: + dataset (`Dataset`): Dataset to wrap with tf.data.Dataset. + cols_to_retain (`List[str]`): Dataset column(s) to load in the + tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and + that do not exist in the original dataset. + collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate + lists of samples into a batch. + collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the + `collate_fn`. Can be empty. + columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes. + output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to + `tf.TensorSpec` objects. + shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for + validation/evaluation. + batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that + the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`. + drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided, + defaults to the same setting as shuffle. + + Returns: + `tf.data.Dataset` + """ + if config.TF_AVAILABLE: + import tensorflow as tf + else: + raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") + + # TODO Matt: When our minimum Python version is 3.8 or higher, we can delete all of this and move everything + # to the NumPy multiprocessing path. + if hasattr(tf, "random_index_shuffle"): + random_index_shuffle = tf.random_index_shuffle + elif hasattr(tf.random.experimental, "index_shuffle"): + random_index_shuffle = tf.random.experimental.index_shuffle + else: + if len(dataset) > 10_000_000: + warnings.warn( + "to_tf_dataset() can be memory-inefficient on versions of TensorFlow older than 2.9. " + "If you are iterating over a dataset with a very large number of samples, consider " + "upgrading to TF >= 2.9." + ) + random_index_shuffle = None + + getter_fn = partial( + np_get_batch, + dataset=dataset, + cols_to_retain=cols_to_retain, + collate_fn=collate_fn, + collate_fn_args=collate_fn_args, + columns_to_np_types=columns_to_np_types, + return_dict=False, + ) + + # This works because dictionaries always output in the same order + tout = [tf.dtypes.as_dtype(dtype) for dtype in columns_to_np_types.values()] + + @tf.function(input_signature=[tf.TensorSpec(None, tf.int64)]) + def fetch_function(indices): + output = tf.py_function( + getter_fn, + inp=[indices], + Tout=tout, + ) + return {key: output[i] for i, key in enumerate(columns_to_np_types.keys())} + + tf_dataset = tf.data.Dataset.range(len(dataset)) + + if shuffle and random_index_shuffle is not None: + base_seed = tf.fill((3,), value=tf.cast(-1, dtype=tf.int64)) + + def scan_random_index(state, index): + if tf.reduce_all(state == -1): + # This generates a new random seed once per epoch only, + # to ensure that we iterate over each sample exactly once per epoch + state = tf.random.uniform(shape=(3,), maxval=2**62, dtype=tf.int64) + shuffled_index = random_index_shuffle(index=index, seed=state, max_index=len(dataset) - 1) + return state, shuffled_index + + tf_dataset = tf_dataset.scan(base_seed, scan_random_index) + elif shuffle: + tf_dataset = tf_dataset.shuffle(tf_dataset.cardinality()) + + if batch_size is not None: + tf_dataset = tf_dataset.batch(batch_size, drop_remainder=drop_remainder) + + tf_dataset = tf_dataset.map(fetch_function) + + if batch_size is not None: + + def ensure_shapes(input_dict): + return {key: tf.ensure_shape(val, output_signature[key].shape) for key, val in input_dict.items()} + + else: + # Ensure shape but remove batch dimension of output_signature[key].shape + def ensure_shapes(input_dict): + return {key: tf.ensure_shape(val, output_signature[key].shape[1:]) for key, val in input_dict.items()} + + return tf_dataset.map(ensure_shapes) + + +class SharedMemoryContext: + # This is a context manager for creating shared memory that ensures cleanup happens even if a process is interrupted + # The process that creates shared memory is always the one responsible for unlinking it in the end + def __init__(self): + self.created_shms = [] + self.opened_shms = [] + + def get_shm(self, name, size, create): + shm = SharedMemory(size=int(size), name=name, create=create) + if create: + # We only unlink the ones we created in this context + self.created_shms.append(shm) + else: + # If we didn't create it, we only close it when done, we don't unlink it + self.opened_shms.append(shm) + return shm + + def get_array(self, name, shape, dtype, create): + shm = self.get_shm(name=name, size=np.prod(shape) * np.dtype(dtype).itemsize, create=create) + return np.ndarray(shape, dtype=dtype, buffer=shm.buf) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + for shm in self.created_shms: + shm.close() + shm.unlink() + for shm in self.opened_shms: + shm.close() + + +class NumpyMultiprocessingGenerator: + def __init__( + self, + dataset, + cols_to_retain, + collate_fn, + collate_fn_args, + columns_to_np_types, + output_signature, + shuffle, + batch_size, + drop_remainder, + num_workers, + ): + self.dataset = dataset + self.cols_to_retain = cols_to_retain + self.collate_fn = collate_fn + self.collate_fn_args = collate_fn_args + self.string_columns = [col for col, dtype in columns_to_np_types.items() if dtype in (np.unicode_, np.str_)] + # Strings will be converted to arrays of single unicode chars, so that we can have a constant itemsize + self.columns_to_np_types = { + col: dtype if col not in self.string_columns else np.dtype("U1") + for col, dtype in columns_to_np_types.items() + } + self.output_signature = output_signature + self.shuffle = shuffle + self.batch_size = batch_size + self.drop_remainder = drop_remainder + self.num_workers = num_workers + # Because strings are converted to characters, we need to add one extra dimension to the shape + self.columns_to_ranks = { + col: int(spec.shape.rank) if col not in self.string_columns else int(spec.shape.rank) + 1 + for col, spec in output_signature.items() + } + + def __iter__(self): + # Make sure we only spawn workers if they have work to do + num_workers = min(self.num_workers, int(ceil(len(self.dataset) / self.batch_size))) + # Do the shuffling in iter so that it's done at the start of each epoch + per_worker_batches, final_batch, final_batch_worker = self.distribute_batches( + self.dataset, self.batch_size, self.drop_remainder, num_workers, self.shuffle + ) + ctx = get_context("spawn") + names = [] + shape_arrays = [] + workers = [] + array_ready_events = [ctx.Event() for _ in range(num_workers)] + array_loaded_events = [ctx.Event() for _ in range(num_workers)] + + base_args = { + "dataset": self.dataset, + "cols_to_retain": self.cols_to_retain, + "collate_fn": self.collate_fn, + "collate_fn_args": self.collate_fn_args, + "columns_to_np_types": self.columns_to_np_types, + "columns_to_ranks": self.columns_to_ranks, + "string_columns": self.string_columns, + } + with SharedMemoryContext() as shm_ctx: + for i in range(num_workers): + worker_random_id = str(uuid4()) + worker_name = f"dw_{i}_{worker_random_id}"[:10] + names.append(worker_name) + + worker_shape_arrays = { + col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=True) + for col, rank in self.columns_to_ranks.items() + } + shape_arrays.append(worker_shape_arrays) + + worker_indices = per_worker_batches[i] + if i == final_batch_worker and final_batch is not None: + final_batch_arg = final_batch + else: + final_batch_arg = None + worker_kwargs = { + "worker_name": worker_name, + "indices": worker_indices, + "extra_batch": final_batch_arg, + "array_ready_event": array_ready_events[i], + "array_loaded_event": array_loaded_events[i], + **base_args, + } + worker = ctx.Process(target=self.worker_loop, kwargs=worker_kwargs, daemon=True) + worker.start() + workers.append(worker) + + end_signal_received = False + while not end_signal_received: + for i in range(num_workers): + if not array_ready_events[i].wait(timeout=60): + raise TimeoutError("Data loading worker timed out!") + array_ready_events[i].clear() + array_shapes = shape_arrays[i] + if any(np.any(shape < 0) for shape in array_shapes.values()): + # Child processes send negative array shapes to indicate + # that no more data is going to be sent + end_signal_received = True + break + # Matt: Because array shapes are variable we recreate the shared memory each iteration. + # I suspect repeatedly opening lots of shared memory is the bottleneck for the parent process. + # A future optimization, at the cost of some code complexity, could be to reuse shared memory + # between iterations, but this would require knowing in advance the maximum size, or having + # a system to only create a new memory block when a new maximum size is seen. + # Another potential optimization would be to figure out which memory copies are necessary, + # or whether we can yield objects straight out of shared memory. + with SharedMemoryContext() as batch_shm_ctx: + # This memory context only lasts long enough to copy everything out of the batch + arrays = { + col: batch_shm_ctx.get_array( + f"{names[i]}_{col}", + shape=shape, + dtype=self.columns_to_np_types[col], + create=False, + ) + for col, shape in array_shapes.items() + } + # Copy everything out of shm because the memory + # will be unlinked by the child process at some point + arrays = {col: np.copy(arr) for col, arr in arrays.items()} + # Now we convert any unicode char arrays to strings + for string_col in self.string_columns: + arrays[string_col] = ( + arrays[string_col].view(f"U{arrays[string_col].shape[-1]}").squeeze(-1) + ) + yield arrays + array_loaded_events[i].set() + # Now we just do some cleanup + # Shared memory is cleaned up by the context manager, so we just make sure workers finish + for worker in workers: + worker.join() + + def __call__(self): + return self + + @staticmethod + def worker_loop( + dataset, + cols_to_retain, + collate_fn, + collate_fn_args, + columns_to_np_types, + columns_to_ranks, + string_columns, + indices, + extra_batch, + worker_name, + array_ready_event, + array_loaded_event, + ): + os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + + if config.TF_AVAILABLE: + import tensorflow as tf + else: + raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") + + tf.config.set_visible_devices([], "GPU") # Make sure workers don't try to allocate GPU memory + + def send_batch_to_parent(indices): + batch = np_get_batch( + indices=indices, + dataset=dataset, + cols_to_retain=cols_to_retain, + collate_fn=collate_fn, + collate_fn_args=collate_fn_args, + columns_to_np_types=columns_to_np_types, + return_dict=True, + ) + + # Now begins the fun part where we start shovelling shared memory at the parent process + out_arrays = {} + with SharedMemoryContext() as batch_shm_ctx: + # The batch shared memory context exists only as long as it takes for the parent process + # to read everything, after which it cleans everything up again + for col, cast_dtype in columns_to_np_types.items(): + # Everything has to be np.array for this to work, even if the collate_fn is giving us tf.Tensor + array = batch[col] + if col in string_columns: + # We can't send unicode arrays over shared memory, so we convert to single chars ("U1") + # which have a fixed width of 4 bytes. The parent process will convert these back to strings. + array = array.view("U1").reshape(array.shape + (-1,)) + shape_arrays[col][:] = array.shape + out_arrays[col] = batch_shm_ctx.get_array( + f"{worker_name}_{col}", shape=array.shape, dtype=cast_dtype, create=True + ) + out_arrays[col][:] = array + + array_ready_event.set() + array_loaded_event.wait() + array_loaded_event.clear() + + with SharedMemoryContext() as shm_ctx: + shape_arrays = { + col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=False) + for col, rank in columns_to_ranks.items() + } + + for batch in indices: + send_batch_to_parent(batch) + if extra_batch is not None: + send_batch_to_parent(extra_batch) + # Now we send a batsignal to the parent process that we're done + for col, array in shape_arrays.items(): + array[:] = -1 + array_ready_event.set() + + @staticmethod + def distribute_batches(dataset, batch_size, drop_remainder, num_workers, shuffle): + indices = np.arange(len(dataset)) + if shuffle: + np.random.shuffle(indices) + num_samples = len(indices) + # We distribute the batches so that reading from the workers in round-robin order yields the exact + # order specified in indices. This is only important when shuffle is False, but we do it regardless. + incomplete_batch_cutoff = num_samples - (num_samples % batch_size) + indices, last_incomplete_batch = np.split(indices, [incomplete_batch_cutoff]) + if drop_remainder or len(last_incomplete_batch) == 0: + last_incomplete_batch = None + + indices = indices.reshape(-1, batch_size) + num_batches = len(indices) + final_batches_cutoff = num_batches - (num_batches % num_workers) + indices, final_batches = np.split(indices, [final_batches_cutoff]) + indices = indices.reshape(-1, num_workers, batch_size) + + per_worker_indices = np.split(indices, indices.shape[1], axis=1) + per_worker_indices = [np.squeeze(worker_indices, 1) for worker_indices in per_worker_indices] + # Distribute the final batches to the first workers + for i in range(len(final_batches)): + # len(final_batches) can be zero, and is always less than num_workers + per_worker_indices[i] = np.concatenate([per_worker_indices[i], final_batches[i].reshape(1, -1)], axis=0) + # Add the last incomplete batch to the next worker, which might be the first worker + if last_incomplete_batch is not None: + incomplete_batch_worker_idx = len(final_batches) + else: + incomplete_batch_worker_idx = None + return per_worker_indices, last_incomplete_batch, incomplete_batch_worker_idx + + +def multiprocess_dataset_to_tf( + dataset, + cols_to_retain, + collate_fn, + collate_fn_args, + columns_to_np_types, + output_signature, + shuffle, + batch_size, + drop_remainder, + num_workers, +): + """Create a tf.data.Dataset from the underlying Dataset. This is a multi-process method - the single-process + equivalent is dataset_to_tf. + + Args: + dataset (`Dataset`): Dataset to wrap with tf.data.Dataset. + cols_to_retain (`List[str]`): Dataset column(s) to load in the + tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and + that do not exist in the original dataset. + collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate + lists of samples into a batch. + collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the + `collate_fn`. Can be empty. + columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes. + output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to + `tf.TensorSpec` objects. + shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for + validation/evaluation. + batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that + the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`. + drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided, + defaults to the same setting as shuffle. + num_workers (`int`): Number of workers to use for loading the dataset. Should be >= 1. + + Returns: + `tf.data.Dataset` + """ + if config.TF_AVAILABLE: + import tensorflow as tf + else: + raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") + + data_generator = NumpyMultiprocessingGenerator( + dataset=dataset, + cols_to_retain=cols_to_retain, + collate_fn=collate_fn, + collate_fn_args=collate_fn_args, + columns_to_np_types=columns_to_np_types, + output_signature=output_signature, + shuffle=shuffle, + batch_size=batch_size, + drop_remainder=drop_remainder, + num_workers=num_workers, + ) + + tf_dataset = tf.data.Dataset.from_generator(data_generator, output_signature=output_signature) + if drop_remainder: + dataset_length = int(len(dataset) // batch_size) + else: + dataset_length = int(ceil(len(dataset) / batch_size)) + return tf_dataset.apply(tf.data.experimental.assert_cardinality(dataset_length)) diff --git a/testbed/huggingface__datasets/src/datasets/utils/tqdm.py b/testbed/huggingface__datasets/src/datasets/utils/tqdm.py new file mode 100644 index 0000000000000000000000000000000000000000..0ea73a1c4a6e01ddc6f4936c2a4741c54f99ca7e --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/tqdm.py @@ -0,0 +1,130 @@ +"""Utility helpers to handle progress bars in `datasets`. + +Example: + 1. Use `datasets.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`. + 2. To disable progress bars, either use `disable_progress_bars()` helper or set the + environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` to 1. + 3. To re-enable progress bars, use `enable_progress_bars()`. + 4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`. + +NOTE: Environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` has the priority. + +Example: + ```py + from datasets.utils import ( + are_progress_bars_disabled, + disable_progress_bars, + enable_progress_bars, + tqdm, + ) + + # Disable progress bars globally + disable_progress_bars() + + # Use as normal `tqdm` + for _ in tqdm(range(5)): + do_something() + + # Still not showing progress bars, as `disable=False` is overwritten to `True`. + for _ in tqdm(range(5), disable=False): + do_something() + + are_progress_bars_disabled() # True + + # Re-enable progress bars globally + enable_progress_bars() + + # Progress bar will be shown ! + for _ in tqdm(range(5)): + do_something() + ``` +""" +import warnings + +from tqdm.auto import tqdm as old_tqdm + +from ..config import HF_DATASETS_DISABLE_PROGRESS_BARS + + +# `HF_DATASETS_DISABLE_PROGRESS_BARS` is `Optional[bool]` while `_hf_datasets_progress_bars_disabled` +# is a `bool`. If `HF_DATASETS_DISABLE_PROGRESS_BARS` is set to True or False, it has priority. +# If `HF_DATASETS_DISABLE_PROGRESS_BARS` is None, it means the user have not set the +# environment variable and is free to enable/disable progress bars programmatically. +# TL;DR: env variable has priority over code. +# +# By default, progress bars are enabled. +_hf_datasets_progress_bars_disabled: bool = HF_DATASETS_DISABLE_PROGRESS_BARS or False + + +def disable_progress_bars() -> None: + """ + Disable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment + variable has been set. + + Use [`~utils.enable_progress_bars`] to re-enable them. + """ + if HF_DATASETS_DISABLE_PROGRESS_BARS is False: + warnings.warn( + "Cannot disable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=0` is set and has" + " priority." + ) + return + global _hf_datasets_progress_bars_disabled + _hf_datasets_progress_bars_disabled = True + + +def enable_progress_bars() -> None: + """ + Enable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment + variable has been set. + + Use [`~utils.disable_progress_bars`] to disable them. + """ + if HF_DATASETS_DISABLE_PROGRESS_BARS is True: + warnings.warn( + "Cannot enable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=1` is set and has" + " priority." + ) + return + global _hf_datasets_progress_bars_disabled + _hf_datasets_progress_bars_disabled = False + + +def are_progress_bars_disabled() -> bool: + """Return whether progress bars are globally disabled or not. + + Progress bars used in `datasets` can be enable or disabled globally using [`~utils.enable_progress_bars`] + and [`~utils.disable_progress_bars`] or by setting `HF_DATASETS_DISABLE_PROGRESS_BAR` as environment variable. + """ + global _hf_datasets_progress_bars_disabled + return _hf_datasets_progress_bars_disabled + + +class tqdm(old_tqdm): + """ + Class to override `disable` argument in case progress bars are globally disabled. + + Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324. + """ + + def __init__(self, *args, **kwargs): + if are_progress_bars_disabled(): + kwargs["disable"] = True + super().__init__(*args, **kwargs) + + def __delattr__(self, attr: str) -> None: + """Fix for https://github.com/huggingface/datasets/issues/6066""" + try: + super().__delattr__(attr) + except AttributeError: + if attr != "_lock": + raise + + +# backward compatibility +enable_progress_bar = enable_progress_bars +disable_progress_bar = disable_progress_bars + + +def is_progress_bar_enabled(): + return not are_progress_bars_disabled() diff --git a/testbed/huggingface__datasets/src/datasets/utils/typing.py b/testbed/huggingface__datasets/src/datasets/utils/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..7ad087fc98d2a6de2d3e493120135fc9ea49e605 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/typing.py @@ -0,0 +1,9 @@ +import os +from typing import Dict, List, Tuple, TypeVar, Union + + +T = TypeVar("T") + +ListLike = Union[List[T], Tuple[T, ...]] +NestedDataStructureLike = Union[T, List[T], Dict[str, T]] +PathLike = Union[str, bytes, os.PathLike] diff --git a/testbed/huggingface__datasets/src/datasets/utils/version.py b/testbed/huggingface__datasets/src/datasets/utils/version.py new file mode 100644 index 0000000000000000000000000000000000000000..75cf4c39d5f9b916ade8f8d88a0f0ca9e5769217 --- /dev/null +++ b/testbed/huggingface__datasets/src/datasets/utils/version.py @@ -0,0 +1,106 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Version utils.""" + +import dataclasses +import re +from dataclasses import dataclass +from functools import total_ordering +from typing import Optional, Union + + +_VERSION_REG = re.compile(r"^(?P\d+)" r"\.(?P\d+)" r"\.(?P\d+)$") + + +@total_ordering +@dataclass +class Version: + """Dataset version `MAJOR.MINOR.PATCH`. + + Args: + version_str (`str`): + The dataset version. + description (`str`): + A description of what is new in this version. + major (`str`): + minor (`str`): + patch (`str`): + + Example: + + ```py + >>> VERSION = datasets.Version("1.0.0") + ``` + """ + + version_str: str + description: Optional[str] = None + major: Optional[Union[str, int]] = None + minor: Optional[Union[str, int]] = None + patch: Optional[Union[str, int]] = None + + def __post_init__(self): + self.major, self.minor, self.patch = _str_to_version_tuple(self.version_str) + + def __repr__(self): + return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}" + + @property + def tuple(self): + return self.major, self.minor, self.patch + + def _validate_operand(self, other): + if isinstance(other, str): + return Version(other) + elif isinstance(other, Version): + return other + raise TypeError(f"{other} (type {type(other)}) cannot be compared to version.") + + def __eq__(self, other): + try: + other = self._validate_operand(other) + except (TypeError, ValueError): + return False + else: + return self.tuple == other.tuple + + def __lt__(self, other): + other = self._validate_operand(other) + return self.tuple < other.tuple + + def __hash__(self): + return hash(_version_tuple_to_str(self.tuple)) + + @classmethod + def from_dict(cls, dic): + field_names = {f.name for f in dataclasses.fields(cls)} + return cls(**{k: v for k, v in dic.items() if k in field_names}) + + def _to_yaml_string(self) -> str: + return self.version_str + + +def _str_to_version_tuple(version_str): + """Return the tuple (major, minor, patch) version extracted from the str.""" + res = _VERSION_REG.match(version_str) + if not res: + raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.") + return tuple(int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")]) + + +def _version_tuple_to_str(version_tuple): + """Return the str version from the version tuple (major, minor, patch).""" + return ".".join(str(v) for v in version_tuple) diff --git a/testbed/huggingface__datasets/templates/README.md b/testbed/huggingface__datasets/templates/README.md new file mode 100644 index 0000000000000000000000000000000000000000..aa6884d13246c236160c8c1f7c7ab5f3ec6ea88b --- /dev/null +++ b/testbed/huggingface__datasets/templates/README.md @@ -0,0 +1,126 @@ +--- +TODO: Add YAML tags here. Copy-paste the tags obtained with the online tagging app: https://huggingface.co/spaces/huggingface/datasets-tagging +--- + +# Dataset Card for [Dataset Name] + +## Table of Contents +- [Table of Contents](#table-of-contents) +- [Dataset Description](#dataset-description) + - [Dataset Summary](#dataset-summary) + - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) + - [Languages](#languages) +- [Dataset Structure](#dataset-structure) + - [Data Instances](#data-instances) + - [Data Fields](#data-fields) + - [Data Splits](#data-splits) +- [Dataset Creation](#dataset-creation) + - [Curation Rationale](#curation-rationale) + - [Source Data](#source-data) + - [Annotations](#annotations) + - [Personal and Sensitive Information](#personal-and-sensitive-information) +- [Considerations for Using the Data](#considerations-for-using-the-data) + - [Social Impact of Dataset](#social-impact-of-dataset) + - [Discussion of Biases](#discussion-of-biases) + - [Other Known Limitations](#other-known-limitations) +- [Additional Information](#additional-information) + - [Dataset Curators](#dataset-curators) + - [Licensing Information](#licensing-information) + - [Citation Information](#citation-information) + - [Contributions](#contributions) + +## Dataset Description + +- **Homepage:** +- **Repository:** +- **Paper:** +- **Leaderboard:** +- **Point of Contact:** + +### Dataset Summary + +[More Information Needed] + +### Supported Tasks and Leaderboards + +[More Information Needed] + +### Languages + +[More Information Needed] + +## Dataset Structure + +### Data Instances + +[More Information Needed] + +### Data Fields + +[More Information Needed] + +### Data Splits + +[More Information Needed] + +## Dataset Creation + +### Curation Rationale + +[More Information Needed] + +### Source Data + +#### Initial Data Collection and Normalization + +[More Information Needed] + +#### Who are the source language producers? + +[More Information Needed] + +### Annotations + +#### Annotation process + +[More Information Needed] + +#### Who are the annotators? + +[More Information Needed] + +### Personal and Sensitive Information + +[More Information Needed] + +## Considerations for Using the Data + +### Social Impact of Dataset + +[More Information Needed] + +### Discussion of Biases + +[More Information Needed] + +### Other Known Limitations + +[More Information Needed] + +## Additional Information + +### Dataset Curators + +[More Information Needed] + +### Licensing Information + +[More Information Needed] + +### Citation Information + +[More Information Needed] + +### Contributions + +Thanks to [@github-username](https://github.com/) for adding this dataset. diff --git a/testbed/huggingface__datasets/templates/README_guide.md b/testbed/huggingface__datasets/templates/README_guide.md new file mode 100644 index 0000000000000000000000000000000000000000..a7cfbda616a7f88894a270cd92bcf1297450c22e --- /dev/null +++ b/testbed/huggingface__datasets/templates/README_guide.md @@ -0,0 +1,200 @@ +--- +YAML tags (full spec here: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1): +- copy-paste the tags obtained with the online tagging app: https://huggingface.co/spaces/huggingface/datasets-tagging +--- + +# Dataset Card Creation Guide + +## Table of Contents +- [Dataset Card Creation Guide](#dataset-card-creation-guide) + - [Table of Contents](#table-of-contents) + - [Dataset Description](#dataset-description) + - [Dataset Summary](#dataset-summary) + - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) + - [Languages](#languages) + - [Dataset Structure](#dataset-structure) + - [Data Instances](#data-instances) + - [Data Fields](#data-fields) + - [Data Splits](#data-splits) + - [Dataset Creation](#dataset-creation) + - [Curation Rationale](#curation-rationale) + - [Source Data](#source-data) + - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization) + - [Who are the source language producers?](#who-are-the-source-language-producers) + - [Annotations](#annotations) + - [Annotation process](#annotation-process) + - [Who are the annotators?](#who-are-the-annotators) + - [Personal and Sensitive Information](#personal-and-sensitive-information) + - [Considerations for Using the Data](#considerations-for-using-the-data) + - [Social Impact of Dataset](#social-impact-of-dataset) + - [Discussion of Biases](#discussion-of-biases) + - [Other Known Limitations](#other-known-limitations) + - [Additional Information](#additional-information) + - [Dataset Curators](#dataset-curators) + - [Licensing Information](#licensing-information) + - [Citation Information](#citation-information) + - [Contributions](#contributions) + +## Dataset Description + +- **Homepage:** [Add homepage URL here if available (unless it's a GitHub repository)]() +- **Repository:** [If the dataset is hosted on github or has a github homepage, add URL here]() +- **Paper:** [If the dataset was introduced by a paper or there was a paper written describing the dataset, add URL here (landing page for Arxiv paper preferred)]() +- **Leaderboard:** [If the dataset supports an active leaderboard, add link here]() +- **Point of Contact:** [If known, name and email of at least one person the reader can contact for questions about the dataset.]() + +### Dataset Summary + +Briefly summarize the dataset, its intended use and the supported tasks. Give an overview of how and why the dataset was created. The summary should explicitly mention the languages present in the dataset (possibly in broad terms, e.g. *translations between several pairs of European languages*), and describe the domain, topic, or genre covered. + +### Supported Tasks and Leaderboards + +For each of the tasks tagged for this dataset, give a brief description of the tag, metrics, and suggested models (with a link to their HuggingFace implementation if available). Give a similar description of tasks that were not covered by the structured tag set (repace the `task-category-tag` with an appropriate `other:other-task-name`). + +- `task-category-tag`: The dataset can be used to train a model for [TASK NAME], which consists in [TASK DESCRIPTION]. Success on this task is typically measured by achieving a *high/low* [metric name](https://huggingface.co/metrics/metric_name). The ([model name](https://huggingface.co/model_name) or [model class](https://huggingface.co/transformers/model_doc/model_class.html)) model currently achieves the following score. *[IF A LEADERBOARD IS AVAILABLE]:* This task has an active leaderboard which can be found at [leaderboard url]() and ranks models based on [metric name](https://huggingface.co/metrics/metric_name) while also reporting [other metric name](https://huggingface.co/metrics/other_metric_name). + +### Languages + +Provide a brief overview of the languages represented in the dataset. Describe relevant details about specifics of the language such as whether it is social media text, African American English,... + +When relevant, please provide [BCP-47 codes](https://tools.ietf.org/html/bcp47), which consist of a [primary language subtag](https://tools.ietf.org/html/bcp47#section-2.2.1), with a [script subtag](https://tools.ietf.org/html/bcp47#section-2.2.3) and/or [region subtag](https://tools.ietf.org/html/bcp47#section-2.2.4) if available. + +## Dataset Structure + +### Data Instances + +Provide an JSON-formatted example and brief description of a typical instance in the dataset. If available, provide a link to further examples. + +``` +{ + 'example_field': ..., + ... +} +``` + +Provide any additional information that is not covered in the other sections about the data here. In particular describe any relationships between data points and if these relationships are made explicit. + +### Data Fields + +List and describe the fields present in the dataset. Mention their data type, and whether they are used as input or output in any of the tasks the dataset currently supports. If the data has span indices, describe their attributes, such as whether they are at the character level or word level, whether they are contiguous or not, etc. If the datasets contains example IDs, state whether they have an inherent meaning, such as a mapping to other datasets or pointing to relationships between data points. + +- `example_field`: description of `example_field` + +Note that the descriptions can be initialized with the **Show Markdown Data Fields** output of the [Datasets Tagging app](https://huggingface.co/spaces/huggingface/datasets-tagging), you will then only need to refine the generated descriptions. + +### Data Splits + +Describe and name the splits in the dataset if there are more than one. + +Describe any criteria for splitting the data, if used. If there are differences between the splits (e.g. if the training annotations are machine-generated and the dev and test ones are created by humans, or if different numbers of annotators contributed to each example), describe them here. + +Provide the sizes of each split. As appropriate, provide any descriptive statistics for the features, such as average length. For example: + +| | train | validation | test | +|-------------------------|------:|-----------:|-----:| +| Input Sentences | | | | +| Average Sentence Length | | | | + +## Dataset Creation + +### Curation Rationale + +What need motivated the creation of this dataset? What are some of the reasons underlying the major choices involved in putting it together? + +### Source Data + +This section describes the source data (e.g. news text and headlines, social media posts, translated sentences,...) + +#### Initial Data Collection and Normalization + +Describe the data collection process. Describe any criteria for data selection or filtering. List any key words or search terms used. If possible, include runtime information for the collection process. + +If data was collected from other pre-existing datasets, link to source here and to their [Hugging Face version](https://huggingface.co/datasets/dataset_name). + +If the data was modified or normalized after being collected (e.g. if the data is word-tokenized), describe the process and the tools used. + +#### Who are the source language producers? + +State whether the data was produced by humans or machine generated. Describe the people or systems who originally created the data. + +If available, include self-reported demographic or identity information for the source data creators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender. + +Describe the conditions under which the data was created (for example, if the producers were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here. + +Describe other people represented or mentioned in the data. Where possible, link to references for the information. + +### Annotations + +If the dataset contains annotations which are not part of the initial data collection, describe them in the following paragraphs. + +#### Annotation process + +If applicable, describe the annotation process and any tools used, or state otherwise. Describe the amount of data annotated, if not all. Describe or reference annotation guidelines provided to the annotators. If available, provide interannotator statistics. Describe any annotation validation processes. + +#### Who are the annotators? + +If annotations were collected for the source data (such as class labels or syntactic parses), state whether the annotations were produced by humans or machine generated. + +Describe the people or systems who originally created the annotations and their selection criteria if applicable. + +If available, include self-reported demographic or identity information for the annotators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender. + +Describe the conditions under which the data was annotated (for example, if the annotators were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here. + +### Personal and Sensitive Information + +State whether the dataset uses identity categories and, if so, how the information is used. Describe where this information comes from (i.e. self-reporting, collecting from profiles, inferring, etc.). See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender. State whether the data is linked to individuals and whether those individuals can be identified in the dataset, either directly or indirectly (i.e., in combination with other data). + +State whether the dataset contains other data that might be considered sensitive (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history). + +If efforts were made to anonymize the data, describe the anonymization process. + +## Considerations for Using the Data + +### Social Impact of Dataset + +Please discuss some of the ways you believe the use of this dataset will impact society. + +The statement should include both positive outlooks, such as outlining how technologies developed through its use may improve people's lives, and discuss the accompanying risks. These risks may range from making important decisions more opaque to people who are affected by the technology, to reinforcing existing harmful biases (whose specifics should be discussed in the next section), among other considerations. + +Also describe in this section if the proposed dataset contains a low-resource or under-represented language. If this is the case or if this task has any impact on underserved communities, please elaborate here. + +### Discussion of Biases + +Provide descriptions of specific biases that are likely to be reflected in the data, and state whether any steps were taken to reduce their impact. + +For Wikipedia text, see for example [Dinan et al 2020 on biases in Wikipedia (esp. Table 1)](https://arxiv.org/abs/2005.00614), or [Blodgett et al 2020](https://www.aclweb.org/anthology/2020.acl-main.485/) for a more general discussion of the topic. + +If analyses have been run quantifying these biases, please add brief summaries and links to the studies here. + +### Other Known Limitations + +If studies of the datasets have outlined other limitations of the dataset, such as annotation artifacts, please outline and cite them here. + +## Additional Information + +### Dataset Curators + +List the people involved in collecting the dataset and their affiliation(s). If funding information is known, include it here. + +### Licensing Information + +Provide the license and link to the license webpage if available. + +### Citation Information + +Provide the [BibTex](http://www.bibtex.org/)-formatted reference for the dataset. For example: +``` +@article{article_id, + author = {Author List}, + title = {Dataset Paper Title}, + journal = {Publication Venue}, + year = {2525} +} +``` + +If the dataset has a [DOI](https://www.doi.org/), please provide it here. + +### Contributions + +Thanks to [@github-username](https://github.com/) for adding this dataset. diff --git a/testbed/huggingface__datasets/templates/metric_card_template.md b/testbed/huggingface__datasets/templates/metric_card_template.md new file mode 100644 index 0000000000000000000000000000000000000000..bf31ec59ef7abfddee0a6c5aa66224f1857e8235 --- /dev/null +++ b/testbed/huggingface__datasets/templates/metric_card_template.md @@ -0,0 +1,37 @@ +# Metric Card for *Current Metric* + +***Metric Card Instructions:*** *Copy this file into the relevant metric folder, then fill it out and save it as README.md. Feel free to take a look at existing metric cards if you'd like examples.* + +## Metric Description +*Give a brief overview of this metric.* + +## How to Use +*Give general statement of how to use the metric* + +*Provide simplest possible example for using the metric* + +### Inputs +*List all input arguments in the format below* +- **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).* + +### Output Values +*Explain what this metric outputs (e.g. a single score, a list of scores)* + +*Give an example of what the metric output looks like.* + +*State the range of possible values that the metric's output can take, as well as what in that range is considered good. For example: "This metric can take on any value between 0 and 100, inclusive. Higher scores are better."* + +#### Values from Popular Papers +*Give examples, preferrably with links, to papers that have reported this metric, along with the values they have reported.* + +### Examples +*Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.* + +## Limitations and Bias +*Note any known limitations or biases that the metric has, with links and references if possible.* + +## Citation +*Cite the source where this metric was introduced.* + +## Further References +*Add any useful further references.* diff --git a/testbed/huggingface__datasets/templates/new_dataset_script.py b/testbed/huggingface__datasets/templates/new_dataset_script.py new file mode 100644 index 0000000000000000000000000000000000000000..3f8d14181b50bce97b039988c2879b12401eb33b --- /dev/null +++ b/testbed/huggingface__datasets/templates/new_dataset_script.py @@ -0,0 +1,172 @@ +# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# TODO: Address all TODOs and remove all explanatory comments +"""TODO: Add a description here.""" + + +import csv +import json +import os + +import datasets + + +# TODO: Add BibTeX citation +# Find for instance the citation on arxiv or on the dataset repo/website +_CITATION = """\ +@InProceedings{huggingface:dataset, +title = {A great new dataset}, +author={huggingface, Inc. +}, +year={2020} +} +""" + +# TODO: Add description of the dataset here +# You can copy an official description +_DESCRIPTION = """\ +This new dataset is designed to solve this great NLP task and is crafted with a lot of care. +""" + +# TODO: Add a link to an official homepage for the dataset here +_HOMEPAGE = "" + +# TODO: Add the licence for the dataset here if you can find it +_LICENSE = "" + +# TODO: Add link to the official dataset URLs here +# The HuggingFace Datasets library doesn't host the datasets but only points to the original files. +# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) +_URLS = { + "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip", + "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip", +} + + +# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case +class NewDataset(datasets.GeneratorBasedBuilder): + """TODO: Short description of my dataset.""" + + VERSION = datasets.Version("1.1.0") + + # This is an example of a dataset with multiple configurations. + # If you don't want/need to define several sub-sets in your dataset, + # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. + + # If you need to make complex sub-parts in the datasets with configurable options + # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig + # BUILDER_CONFIG_CLASS = MyBuilderConfig + + # You will be able to load one or the other configurations in the following list with + # data = datasets.load_dataset('my_dataset', 'first_domain') + # data = datasets.load_dataset('my_dataset', 'second_domain') + BUILDER_CONFIGS = [ + datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"), + datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"), + ] + + DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense. + + def _info(self): + # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset + if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above + features = datasets.Features( + { + "sentence": datasets.Value("string"), + "option1": datasets.Value("string"), + "answer": datasets.Value("string") + # These are the features of your dataset like images, labels ... + } + ) + else: # This is an example to show how to have different features for "first_domain" and "second_domain" + features = datasets.Features( + { + "sentence": datasets.Value("string"), + "option2": datasets.Value("string"), + "second_domain_answer": datasets.Value("string") + # These are the features of your dataset like images, labels ... + } + ) + return datasets.DatasetInfo( + # This is the description that will appear on the datasets page. + description=_DESCRIPTION, + # This defines the different columns of the dataset and their types + features=features, # Here we define them above because they are different between the two configurations + # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and + # specify them. They'll be used if as_supervised=True in builder.as_dataset. + # supervised_keys=("sentence", "label"), + # Homepage of the dataset for documentation + homepage=_HOMEPAGE, + # License for the dataset if available + license=_LICENSE, + # Citation for the dataset + citation=_CITATION, + ) + + def _split_generators(self, dl_manager): + # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration + # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name + + # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS + # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. + # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive + urls = _URLS[self.config.name] + data_dir = dl_manager.download_and_extract(urls) + return [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + # These kwargs will be passed to _generate_examples + gen_kwargs={ + "filepath": os.path.join(data_dir, "train.jsonl"), + "split": "train", + }, + ), + datasets.SplitGenerator( + name=datasets.Split.VALIDATION, + # These kwargs will be passed to _generate_examples + gen_kwargs={ + "filepath": os.path.join(data_dir, "dev.jsonl"), + "split": "dev", + }, + ), + datasets.SplitGenerator( + name=datasets.Split.TEST, + # These kwargs will be passed to _generate_examples + gen_kwargs={ + "filepath": os.path.join(data_dir, "test.jsonl"), + "split": "test" + }, + ), + ] + + # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` + def _generate_examples(self, filepath, split): + # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. + # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. + with open(filepath, encoding="utf-8") as f: + for key, row in enumerate(f): + data = json.loads(row) + if self.config.name == "first_domain": + # Yields examples as (key, example) tuples + yield key, { + "sentence": data["sentence"], + "option1": data["option1"], + "answer": "" if split == "test" else data["answer"], + } + else: + yield key, { + "sentence": data["sentence"], + "option2": data["option2"], + "second_domain_answer": "" if split == "test" else data["second_domain_answer"], + } diff --git a/testbed/huggingface__datasets/tests/_test_patching.py b/testbed/huggingface__datasets/tests/_test_patching.py new file mode 100644 index 0000000000000000000000000000000000000000..dae9a419ec0c47b50793d039ba3da555fb080907 --- /dev/null +++ b/testbed/huggingface__datasets/tests/_test_patching.py @@ -0,0 +1,13 @@ +# isort: skip_file + +# This is the module that test_patching.py uses to test patch_submodule() + +import os # noqa: F401 - this is just for tests +import os as renamed_os # noqa: F401 - this is just for tests +from os import path # noqa: F401 - this is just for tests +from os import path as renamed_path # noqa: F401 - this is just for tests +from os.path import join # noqa: F401 - this is just for tests +from os.path import join as renamed_join # noqa: F401 - this is just for tests + + +open = open # noqa we just need to have a builtin inside this module to test it properly diff --git a/testbed/huggingface__datasets/tests/commands/__init__.py b/testbed/huggingface__datasets/tests/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/tests/commands/conftest.py b/testbed/huggingface__datasets/tests/commands/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..bac525c8c16975321fe533701243427dc8873f14 --- /dev/null +++ b/testbed/huggingface__datasets/tests/commands/conftest.py @@ -0,0 +1,74 @@ +import pytest + + +DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__" + +DATASET_LOADING_SCRIPT_CODE = """ +import json +import os + +import datasets + + +REPO_URL = "https://huggingface.co/datasets/hf-internal-testing/raw_jsonl/resolve/main/" +URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"} + + +class __DummyDataset1__(datasets.GeneratorBasedBuilder): + + def _info(self): + features = datasets.Features( + { + "tokens": datasets.Sequence(datasets.Value("string")), + "ner_tags": datasets.Sequence( + datasets.features.ClassLabel( + names=[ + "O", + "B-PER", + "I-PER", + "B-ORG", + "I-ORG", + "B-LOC", + "I-LOC", + ] + ) + ), + "langs": datasets.Sequence(datasets.Value("string")), + "spans": datasets.Sequence(datasets.Value("string")), + } + ) + return datasets.DatasetInfo(features=features) + + def _split_generators(self, dl_manager): + dl_path = dl_manager.download(URLS) + return [ + datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}), + datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}), + ] + + def _generate_examples(self, filepath): + with open(filepath, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + yield i, json.loads(line) +""" + + +@pytest.fixture +def dataset_loading_script_name(): + return DATASET_LOADING_SCRIPT_NAME + + +@pytest.fixture +def dataset_loading_script_code(): + return DATASET_LOADING_SCRIPT_CODE + + +@pytest.fixture +def dataset_loading_script_dir(dataset_loading_script_name, dataset_loading_script_code, tmp_path): + script_name = dataset_loading_script_name + script_dir = tmp_path / "datasets" / script_name + script_dir.mkdir(parents=True) + script_path = script_dir / f"{script_name}.py" + with open(script_path, "w") as f: + f.write(dataset_loading_script_code) + return str(script_dir) diff --git a/testbed/huggingface__datasets/tests/features/__init__.py b/testbed/huggingface__datasets/tests/features/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/tests/features/data/test_audio_48000.opus b/testbed/huggingface__datasets/tests/features/data/test_audio_48000.opus new file mode 100644 index 0000000000000000000000000000000000000000..330f1bfa22a690528ad4bcbed2d52551047e4f29 Binary files /dev/null and b/testbed/huggingface__datasets/tests/features/data/test_audio_48000.opus differ diff --git a/testbed/huggingface__datasets/tests/features/test_audio.py b/testbed/huggingface__datasets/tests/features/test_audio.py new file mode 100644 index 0000000000000000000000000000000000000000..255a6e4f765e43923d268d5df4ec6638fbf256f4 --- /dev/null +++ b/testbed/huggingface__datasets/tests/features/test_audio.py @@ -0,0 +1,685 @@ +import os +import tarfile + +import pyarrow as pa +import pytest + +from datasets import Dataset, concatenate_datasets, load_dataset +from datasets.features import Audio, Features, Sequence, Value + +from ..utils import ( + require_sndfile, +) + + +@pytest.fixture() +def tar_wav_path(shared_datadir, tmp_path_factory): + audio_path = str(shared_datadir / "test_audio_44100.wav") + path = tmp_path_factory.mktemp("data") / "audio_data.wav.tar" + with tarfile.TarFile(path, "w") as f: + f.add(audio_path, arcname=os.path.basename(audio_path)) + return path + + +@pytest.fixture() +def tar_mp3_path(shared_datadir, tmp_path_factory): + audio_path = str(shared_datadir / "test_audio_44100.mp3") + path = tmp_path_factory.mktemp("data") / "audio_data.mp3.tar" + with tarfile.TarFile(path, "w") as f: + f.add(audio_path, arcname=os.path.basename(audio_path)) + return path + + +def iter_archive(archive_path): + with tarfile.open(archive_path) as tar: + for tarinfo in tar: + file_path = tarinfo.name + file_obj = tar.extractfile(tarinfo) + yield file_path, file_obj + + +def test_audio_instantiation(): + audio = Audio() + assert audio.sampling_rate is None + assert audio.mono is True + assert audio.id is None + assert audio.dtype == "dict" + assert audio.pa_type == pa.struct({"bytes": pa.binary(), "path": pa.string()}) + assert audio._type == "Audio" + + +def test_audio_feature_type_to_arrow(): + features = Features({"audio": Audio()}) + assert features.arrow_schema == pa.schema({"audio": Audio().pa_type}) + features = Features({"struct_containing_an_audio": {"audio": Audio()}}) + assert features.arrow_schema == pa.schema({"struct_containing_an_audio": pa.struct({"audio": Audio().pa_type})}) + features = Features({"sequence_of_audios": Sequence(Audio())}) + assert features.arrow_schema == pa.schema({"sequence_of_audios": pa.list_(Audio().pa_type)}) + + +@pytest.mark.parametrize( + "build_example", + [ + lambda audio_path: audio_path, + lambda audio_path: open(audio_path, "rb").read(), + lambda audio_path: {"path": audio_path}, + lambda audio_path: {"path": audio_path, "bytes": None}, + lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read()}, + lambda audio_path: {"path": None, "bytes": open(audio_path, "rb").read()}, + lambda audio_path: {"bytes": open(audio_path, "rb").read()}, + lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000}, + ], +) +def test_audio_feature_encode_example(shared_datadir, build_example): + audio_path = str(shared_datadir / "test_audio_44100.wav") + audio = Audio() + encoded_example = audio.encode_example(build_example(audio_path)) + assert isinstance(encoded_example, dict) + assert encoded_example.keys() == {"bytes", "path"} + assert encoded_example["bytes"] is not None or encoded_example["path"] is not None + decoded_example = audio.decode_example(encoded_example) + assert decoded_example.keys() == {"path", "array", "sampling_rate"} + + +@pytest.mark.parametrize( + "build_example", + [ + lambda audio_path: {"path": audio_path, "sampling_rate": 16_000}, + lambda audio_path: {"path": audio_path, "bytes": None, "sampling_rate": 16_000}, + lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read(), "sampling_rate": 16_000}, + lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000}, + ], +) +def test_audio_feature_encode_example_pcm(shared_datadir, build_example): + audio_path = str(shared_datadir / "test_audio_16000.pcm") + audio = Audio(sampling_rate=16_000) + encoded_example = audio.encode_example(build_example(audio_path)) + assert isinstance(encoded_example, dict) + assert encoded_example.keys() == {"bytes", "path"} + assert encoded_example["bytes"] is not None or encoded_example["path"] is not None + decoded_example = audio.decode_example(encoded_example) + assert decoded_example.keys() == {"path", "array", "sampling_rate"} + + +@require_sndfile +def test_audio_decode_example(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.wav") + audio = Audio() + decoded_example = audio.decode_example(audio.encode_example(audio_path)) + assert decoded_example.keys() == {"path", "array", "sampling_rate"} + assert decoded_example["path"] == audio_path + assert decoded_example["array"].shape == (202311,) + assert decoded_example["sampling_rate"] == 44100 + + with pytest.raises(RuntimeError): + Audio(decode=False).decode_example(audio_path) + + +@require_sndfile +def test_audio_resampling(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.wav") + audio = Audio(sampling_rate=16000) + decoded_example = audio.decode_example(audio.encode_example(audio_path)) + assert decoded_example.keys() == {"path", "array", "sampling_rate"} + assert decoded_example["path"] == audio_path + assert decoded_example["array"].shape == (73401,) + assert decoded_example["sampling_rate"] == 16000 + + +@require_sndfile +def test_audio_decode_example_mp3(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.mp3") + audio = Audio() + decoded_example = audio.decode_example(audio.encode_example(audio_path)) + assert decoded_example.keys() == {"path", "array", "sampling_rate"} + assert decoded_example["path"] == audio_path + assert decoded_example["array"].shape == (110592,) + assert decoded_example["sampling_rate"] == 44100 + + +@require_sndfile +def test_audio_decode_example_opus(shared_datadir): + audio_path = str(shared_datadir / "test_audio_48000.opus") + audio = Audio() + decoded_example = audio.decode_example(audio.encode_example(audio_path)) + assert decoded_example.keys() == {"path", "array", "sampling_rate"} + assert decoded_example["path"] == audio_path + assert decoded_example["array"].shape == (48000,) + assert decoded_example["sampling_rate"] == 48000 + + +@pytest.mark.parametrize("sampling_rate", [16_000, 48_000]) +def test_audio_decode_example_pcm(shared_datadir, sampling_rate): + audio_path = str(shared_datadir / "test_audio_16000.pcm") + audio_input = {"path": audio_path, "sampling_rate": 16_000} + audio = Audio(sampling_rate=sampling_rate) + decoded_example = audio.decode_example(audio.encode_example(audio_input)) + assert decoded_example.keys() == {"path", "array", "sampling_rate"} + assert decoded_example["path"] is None + assert decoded_example["array"].shape == (16208 * sampling_rate // 16_000,) + assert decoded_example["sampling_rate"] == sampling_rate + + +@require_sndfile +def test_audio_resampling_mp3_different_sampling_rates(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.mp3") + audio_path2 = str(shared_datadir / "test_audio_16000.mp3") + audio = Audio(sampling_rate=48000) + + decoded_example = audio.decode_example(audio.encode_example(audio_path)) + assert decoded_example.keys() == {"path", "array", "sampling_rate"} + assert decoded_example["path"] == audio_path + assert decoded_example["array"].shape == (120373,) + assert decoded_example["sampling_rate"] == 48000 + + decoded_example = audio.decode_example(audio.encode_example(audio_path2)) + assert decoded_example.keys() == {"path", "array", "sampling_rate"} + assert decoded_example["path"] == audio_path2 + assert decoded_example["array"].shape == (122688,) + assert decoded_example["sampling_rate"] == 48000 + + +@require_sndfile +def test_dataset_with_audio_feature(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.wav") + data = {"audio": [audio_path]} + features = Features({"audio": Audio()}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"audio"} + assert item["audio"].keys() == {"path", "array", "sampling_rate"} + assert item["audio"]["path"] == audio_path + assert item["audio"]["array"].shape == (202311,) + assert item["audio"]["sampling_rate"] == 44100 + batch = dset[:1] + assert batch.keys() == {"audio"} + assert len(batch["audio"]) == 1 + assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} + assert batch["audio"][0]["path"] == audio_path + assert batch["audio"][0]["array"].shape == (202311,) + assert batch["audio"][0]["sampling_rate"] == 44100 + column = dset["audio"] + assert len(column) == 1 + assert column[0].keys() == {"path", "array", "sampling_rate"} + assert column[0]["path"] == audio_path + assert column[0]["array"].shape == (202311,) + assert column[0]["sampling_rate"] == 44100 + + +@require_sndfile +def test_dataset_with_audio_feature_tar_wav(tar_wav_path): + audio_filename = "test_audio_44100.wav" + data = {"audio": []} + for file_path, file_obj in iter_archive(tar_wav_path): + data["audio"].append({"path": file_path, "bytes": file_obj.read()}) + break + features = Features({"audio": Audio()}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"audio"} + assert item["audio"].keys() == {"path", "array", "sampling_rate"} + assert item["audio"]["path"] == audio_filename + assert item["audio"]["array"].shape == (202311,) + assert item["audio"]["sampling_rate"] == 44100 + batch = dset[:1] + assert batch.keys() == {"audio"} + assert len(batch["audio"]) == 1 + assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} + assert batch["audio"][0]["path"] == audio_filename + assert batch["audio"][0]["array"].shape == (202311,) + assert batch["audio"][0]["sampling_rate"] == 44100 + column = dset["audio"] + assert len(column) == 1 + assert column[0].keys() == {"path", "array", "sampling_rate"} + assert column[0]["path"] == audio_filename + assert column[0]["array"].shape == (202311,) + assert column[0]["sampling_rate"] == 44100 + + +@require_sndfile +def test_dataset_with_audio_feature_tar_mp3(tar_mp3_path): + audio_filename = "test_audio_44100.mp3" + data = {"audio": []} + for file_path, file_obj in iter_archive(tar_mp3_path): + data["audio"].append({"path": file_path, "bytes": file_obj.read()}) + break + features = Features({"audio": Audio()}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"audio"} + assert item["audio"].keys() == {"path", "array", "sampling_rate"} + assert item["audio"]["path"] == audio_filename + assert item["audio"]["array"].shape == (110592,) + assert item["audio"]["sampling_rate"] == 44100 + batch = dset[:1] + assert batch.keys() == {"audio"} + assert len(batch["audio"]) == 1 + assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} + assert batch["audio"][0]["path"] == audio_filename + assert batch["audio"][0]["array"].shape == (110592,) + assert batch["audio"][0]["sampling_rate"] == 44100 + column = dset["audio"] + assert len(column) == 1 + assert column[0].keys() == {"path", "array", "sampling_rate"} + assert column[0]["path"] == audio_filename + assert column[0]["array"].shape == (110592,) + assert column[0]["sampling_rate"] == 44100 + + +@require_sndfile +def test_dataset_with_audio_feature_with_none(): + data = {"audio": [None]} + features = Features({"audio": Audio()}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"audio"} + assert item["audio"] is None + batch = dset[:1] + assert len(batch) == 1 + assert batch.keys() == {"audio"} + assert isinstance(batch["audio"], list) and all(item is None for item in batch["audio"]) + column = dset["audio"] + assert len(column) == 1 + assert isinstance(column, list) and all(item is None for item in column) + + # nested tests + + data = {"audio": [[None]]} + features = Features({"audio": Sequence(Audio())}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"audio"} + assert all(i is None for i in item["audio"]) + + data = {"nested": [{"audio": None}]} + features = Features({"nested": {"audio": Audio()}}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"nested"} + assert item["nested"].keys() == {"audio"} + assert item["nested"]["audio"] is None + + +@require_sndfile +def test_resampling_at_loading_dataset_with_audio_feature(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.wav") + data = {"audio": [audio_path]} + features = Features({"audio": Audio(sampling_rate=16000)}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"audio"} + assert item["audio"].keys() == {"path", "array", "sampling_rate"} + assert item["audio"]["path"] == audio_path + assert item["audio"]["array"].shape == (73401,) + assert item["audio"]["sampling_rate"] == 16000 + batch = dset[:1] + assert batch.keys() == {"audio"} + assert len(batch["audio"]) == 1 + assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} + assert batch["audio"][0]["path"] == audio_path + assert batch["audio"][0]["array"].shape == (73401,) + assert batch["audio"][0]["sampling_rate"] == 16000 + column = dset["audio"] + assert len(column) == 1 + assert column[0].keys() == {"path", "array", "sampling_rate"} + assert column[0]["path"] == audio_path + assert column[0]["array"].shape == (73401,) + assert column[0]["sampling_rate"] == 16000 + + +@require_sndfile +def test_resampling_at_loading_dataset_with_audio_feature_mp3(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.mp3") + data = {"audio": [audio_path]} + features = Features({"audio": Audio(sampling_rate=16000)}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"audio"} + assert item["audio"].keys() == {"path", "array", "sampling_rate"} + assert item["audio"]["path"] == audio_path + assert item["audio"]["array"].shape == (40125,) + assert item["audio"]["sampling_rate"] == 16000 + batch = dset[:1] + assert batch.keys() == {"audio"} + assert len(batch["audio"]) == 1 + assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} + assert batch["audio"][0]["path"] == audio_path + assert batch["audio"][0]["array"].shape == (40125,) + assert batch["audio"][0]["sampling_rate"] == 16000 + column = dset["audio"] + assert len(column) == 1 + assert column[0].keys() == {"path", "array", "sampling_rate"} + assert column[0]["path"] == audio_path + assert column[0]["array"].shape == (40125,) + assert column[0]["sampling_rate"] == 16000 + + +@require_sndfile +def test_resampling_after_loading_dataset_with_audio_feature(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.wav") + data = {"audio": [audio_path]} + features = Features({"audio": Audio()}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item["audio"]["sampling_rate"] == 44100 + dset = dset.cast_column("audio", Audio(sampling_rate=16000)) + item = dset[0] + assert item.keys() == {"audio"} + assert item["audio"].keys() == {"path", "array", "sampling_rate"} + assert item["audio"]["path"] == audio_path + assert item["audio"]["array"].shape == (73401,) + assert item["audio"]["sampling_rate"] == 16000 + batch = dset[:1] + assert batch.keys() == {"audio"} + assert len(batch["audio"]) == 1 + assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} + assert batch["audio"][0]["path"] == audio_path + assert batch["audio"][0]["array"].shape == (73401,) + assert batch["audio"][0]["sampling_rate"] == 16000 + column = dset["audio"] + assert len(column) == 1 + assert column[0].keys() == {"path", "array", "sampling_rate"} + assert column[0]["path"] == audio_path + assert column[0]["array"].shape == (73401,) + assert column[0]["sampling_rate"] == 16000 + + +@require_sndfile +def test_resampling_after_loading_dataset_with_audio_feature_mp3(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.mp3") + data = {"audio": [audio_path]} + features = Features({"audio": Audio()}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item["audio"]["sampling_rate"] == 44100 + dset = dset.cast_column("audio", Audio(sampling_rate=16000)) + item = dset[0] + assert item.keys() == {"audio"} + assert item["audio"].keys() == {"path", "array", "sampling_rate"} + assert item["audio"]["path"] == audio_path + assert item["audio"]["array"].shape == (40125,) + assert item["audio"]["sampling_rate"] == 16000 + batch = dset[:1] + assert batch.keys() == {"audio"} + assert len(batch["audio"]) == 1 + assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} + assert batch["audio"][0]["path"] == audio_path + assert batch["audio"][0]["array"].shape == (40125,) + assert batch["audio"][0]["sampling_rate"] == 16000 + column = dset["audio"] + assert len(column) == 1 + assert column[0].keys() == {"path", "array", "sampling_rate"} + assert column[0]["path"] == audio_path + assert column[0]["array"].shape == (40125,) + assert column[0]["sampling_rate"] == 16000 + + +@pytest.mark.parametrize( + "build_data", + [ + lambda audio_path: {"audio": [audio_path]}, + lambda audio_path: {"audio": [open(audio_path, "rb").read()]}, + lambda audio_path: {"audio": [{"path": audio_path}]}, + lambda audio_path: {"audio": [{"path": audio_path, "bytes": None}]}, + lambda audio_path: {"audio": [{"path": audio_path, "bytes": open(audio_path, "rb").read()}]}, + lambda audio_path: {"audio": [{"path": None, "bytes": open(audio_path, "rb").read()}]}, + lambda audio_path: {"audio": [{"bytes": open(audio_path, "rb").read()}]}, + ], +) +def test_dataset_cast_to_audio_features(shared_datadir, build_data): + audio_path = str(shared_datadir / "test_audio_44100.wav") + data = build_data(audio_path) + dset = Dataset.from_dict(data) + item = dset.cast(Features({"audio": Audio()}))[0] + assert item.keys() == {"audio"} + assert item["audio"].keys() == {"path", "array", "sampling_rate"} + item = dset.cast_column("audio", Audio())[0] + assert item.keys() == {"audio"} + assert item["audio"].keys() == {"path", "array", "sampling_rate"} + + +def test_dataset_concatenate_audio_features(shared_datadir): + # we use a different data structure between 1 and 2 to make sure they are compatible with each other + audio_path = str(shared_datadir / "test_audio_44100.wav") + data1 = {"audio": [audio_path]} + dset1 = Dataset.from_dict(data1, features=Features({"audio": Audio()})) + data2 = {"audio": [{"bytes": open(audio_path, "rb").read()}]} + dset2 = Dataset.from_dict(data2, features=Features({"audio": Audio()})) + concatenated_dataset = concatenate_datasets([dset1, dset2]) + assert len(concatenated_dataset) == len(dset1) + len(dset2) + assert concatenated_dataset[0]["audio"]["array"].shape == dset1[0]["audio"]["array"].shape + assert concatenated_dataset[1]["audio"]["array"].shape == dset2[0]["audio"]["array"].shape + + +def test_dataset_concatenate_nested_audio_features(shared_datadir): + # we use a different data structure between 1 and 2 to make sure they are compatible with each other + audio_path = str(shared_datadir / "test_audio_44100.wav") + features = Features({"list_of_structs_of_audios": [{"audio": Audio()}]}) + data1 = {"list_of_structs_of_audios": [[{"audio": audio_path}]]} + dset1 = Dataset.from_dict(data1, features=features) + data2 = {"list_of_structs_of_audios": [[{"audio": {"bytes": open(audio_path, "rb").read()}}]]} + dset2 = Dataset.from_dict(data2, features=features) + concatenated_dataset = concatenate_datasets([dset1, dset2]) + assert len(concatenated_dataset) == len(dset1) + len(dset2) + assert ( + concatenated_dataset[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape + == dset1[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape + ) + assert ( + concatenated_dataset[1]["list_of_structs_of_audios"][0]["audio"]["array"].shape + == dset2[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape + ) + + +@require_sndfile +def test_dataset_with_audio_feature_map_is_not_decoded(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.wav") + data = {"audio": [audio_path], "text": ["Hello"]} + features = Features({"audio": Audio(), "text": Value("string")}) + dset = Dataset.from_dict(data, features=features) + + expected_audio = features.encode_batch(data)["audio"][0] + for item in dset.cast_column("audio", Audio(decode=False)): + assert item.keys() == {"audio", "text"} + assert item == {"audio": expected_audio, "text": "Hello"} + + def process_text(example): + example["text"] = example["text"] + " World!" + return example + + processed_dset = dset.map(process_text) + for item in processed_dset.cast_column("audio", Audio(decode=False)): + assert item.keys() == {"audio", "text"} + assert item == {"audio": expected_audio, "text": "Hello World!"} + + +@require_sndfile +def test_dataset_with_audio_feature_map_is_decoded(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.wav") + data = {"audio": [audio_path], "text": ["Hello"]} + features = Features({"audio": Audio(), "text": Value("string")}) + dset = Dataset.from_dict(data, features=features) + + def process_audio_sampling_rate_by_example(example): + example["double_sampling_rate"] = 2 * example["audio"]["sampling_rate"] + return example + + decoded_dset = dset.map(process_audio_sampling_rate_by_example) + for item in decoded_dset.cast_column("audio", Audio(decode=False)): + assert item.keys() == {"audio", "text", "double_sampling_rate"} + assert item["double_sampling_rate"] == 88200 + + def process_audio_sampling_rate_by_batch(batch): + double_sampling_rates = [] + for audio in batch["audio"]: + double_sampling_rates.append(2 * audio["sampling_rate"]) + batch["double_sampling_rate"] = double_sampling_rates + return batch + + decoded_dset = dset.map(process_audio_sampling_rate_by_batch, batched=True) + for item in decoded_dset.cast_column("audio", Audio(decode=False)): + assert item.keys() == {"audio", "text", "double_sampling_rate"} + assert item["double_sampling_rate"] == 88200 + + +@require_sndfile +def test_formatted_dataset_with_audio_feature(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.wav") + data = {"audio": [audio_path, audio_path]} + features = Features({"audio": Audio()}) + dset = Dataset.from_dict(data, features=features) + with dset.formatted_as("numpy"): + item = dset[0] + assert item.keys() == {"audio"} + assert item["audio"].keys() == {"path", "array", "sampling_rate"} + assert item["audio"]["path"] == audio_path + assert item["audio"]["array"].shape == (202311,) + assert item["audio"]["sampling_rate"] == 44100 + batch = dset[:1] + assert batch.keys() == {"audio"} + assert len(batch["audio"]) == 1 + assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} + assert batch["audio"][0]["path"] == audio_path + assert batch["audio"][0]["array"].shape == (202311,) + assert batch["audio"][0]["sampling_rate"] == 44100 + column = dset["audio"] + assert len(column) == 2 + assert column[0].keys() == {"path", "array", "sampling_rate"} + assert column[0]["path"] == audio_path + assert column[0]["array"].shape == (202311,) + assert column[0]["sampling_rate"] == 44100 + + with dset.formatted_as("pandas"): + item = dset[0] + assert item.shape == (1, 1) + assert item.columns == ["audio"] + assert item["audio"][0].keys() == {"path", "array", "sampling_rate"} + assert item["audio"][0]["path"] == audio_path + assert item["audio"][0]["array"].shape == (202311,) + assert item["audio"][0]["sampling_rate"] == 44100 + batch = dset[:1] + assert batch.shape == (1, 1) + assert batch.columns == ["audio"] + assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} + assert batch["audio"][0]["path"] == audio_path + assert batch["audio"][0]["array"].shape == (202311,) + assert batch["audio"][0]["sampling_rate"] == 44100 + column = dset["audio"] + assert len(column) == 2 + assert column[0].keys() == {"path", "array", "sampling_rate"} + assert column[0]["path"] == audio_path + assert column[0]["array"].shape == (202311,) + assert column[0]["sampling_rate"] == 44100 + + +@pytest.fixture +def jsonl_audio_dataset_path(shared_datadir, tmp_path_factory): + import json + + audio_path = str(shared_datadir / "test_audio_44100.wav") + data = [{"audio": audio_path, "text": "Hello world!"}] + path = str(tmp_path_factory.mktemp("data") / "audio_dataset.jsonl") + with open(path, "w") as f: + for item in data: + f.write(json.dumps(item) + "\n") + return path + + +@require_sndfile +@pytest.mark.parametrize("streaming", [False, True]) +def test_load_dataset_with_audio_feature(streaming, jsonl_audio_dataset_path, shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.wav") + data_files = jsonl_audio_dataset_path + features = Features({"audio": Audio(), "text": Value("string")}) + dset = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming) + item = dset[0] if not streaming else next(iter(dset)) + assert item.keys() == {"audio", "text"} + assert item["audio"].keys() == {"path", "array", "sampling_rate"} + assert item["audio"]["path"] == audio_path + assert item["audio"]["array"].shape == (202311,) + assert item["audio"]["sampling_rate"] == 44100 + + +@require_sndfile +@pytest.mark.integration +def test_dataset_with_audio_feature_loaded_from_cache(): + # load first time + ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean") + # load from cache + ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") + assert isinstance(ds, Dataset) + + +def test_dataset_with_audio_feature_undecoded(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.wav") + data = {"audio": [audio_path]} + features = Features({"audio": Audio(decode=False)}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"audio"} + assert item["audio"] == {"path": audio_path, "bytes": None} + batch = dset[:1] + assert batch.keys() == {"audio"} + assert len(batch["audio"]) == 1 + assert batch["audio"][0] == {"path": audio_path, "bytes": None} + column = dset["audio"] + assert len(column) == 1 + assert column[0] == {"path": audio_path, "bytes": None} + + +def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.wav") + data = {"audio": [audio_path]} + features = Features({"audio": Audio(decode=False)}) + dset = Dataset.from_dict(data, features=features) + with dset.formatted_as("numpy"): + item = dset[0] + assert item.keys() == {"audio"} + assert item["audio"] == {"path": audio_path, "bytes": None} + batch = dset[:1] + assert batch.keys() == {"audio"} + assert len(batch["audio"]) == 1 + assert batch["audio"][0] == {"path": audio_path, "bytes": None} + column = dset["audio"] + assert len(column) == 1 + assert column[0] == {"path": audio_path, "bytes": None} + + with dset.formatted_as("pandas"): + item = dset[0] + assert item.shape == (1, 1) + assert item.columns == ["audio"] + assert item["audio"][0] == {"path": audio_path, "bytes": None} + batch = dset[:1] + assert batch.shape == (1, 1) + assert batch.columns == ["audio"] + assert batch["audio"][0] == {"path": audio_path, "bytes": None} + column = dset["audio"] + assert len(column) == 1 + assert column[0] == {"path": audio_path, "bytes": None} + + +def test_dataset_with_audio_feature_map_undecoded(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.wav") + data = {"audio": [audio_path]} + features = Features({"audio": Audio(decode=False)}) + dset = Dataset.from_dict(data, features=features) + + def assert_audio_example_undecoded(example): + assert example["audio"] == {"path": audio_path, "bytes": None} + + dset.map(assert_audio_example_undecoded) + + def assert_audio_batch_undecoded(batch): + for audio in batch["audio"]: + assert audio == {"path": audio_path, "bytes": None} + + dset.map(assert_audio_batch_undecoded, batched=True) + + +def test_audio_embed_storage(shared_datadir): + audio_path = str(shared_datadir / "test_audio_44100.wav") + example = {"bytes": None, "path": audio_path} + storage = pa.array([example], type=pa.struct({"bytes": pa.binary(), "path": pa.string()})) + embedded_storage = Audio().embed_storage(storage) + embedded_example = embedded_storage.to_pylist()[0] + assert embedded_example == {"bytes": open(audio_path, "rb").read(), "path": "test_audio_44100.wav"} diff --git a/testbed/huggingface__datasets/tests/packaged_modules/test_csv.py b/testbed/huggingface__datasets/tests/packaged_modules/test_csv.py new file mode 100644 index 0000000000000000000000000000000000000000..6cfa5e4ca233bbb810566ba2da8864d9ac73b8a8 --- /dev/null +++ b/testbed/huggingface__datasets/tests/packaged_modules/test_csv.py @@ -0,0 +1,132 @@ +import os +import textwrap + +import pyarrow as pa +import pytest + +from datasets import ClassLabel, Features, Image +from datasets.packaged_modules.csv.csv import Csv + +from ..utils import require_pil + + +@pytest.fixture +def csv_file(tmp_path): + filename = tmp_path / "file.csv" + data = textwrap.dedent( + """\ + header1,header2 + 1,2 + 10,20 + """ + ) + with open(filename, "w") as f: + f.write(data) + return str(filename) + + +@pytest.fixture +def malformed_csv_file(tmp_path): + filename = tmp_path / "malformed_file.csv" + data = textwrap.dedent( + """\ + header1,header2 + 1,2 + 10,20, + """ + ) + with open(filename, "w") as f: + f.write(data) + return str(filename) + + +@pytest.fixture +def csv_file_with_image(tmp_path, image_file): + filename = tmp_path / "csv_with_image.csv" + data = textwrap.dedent( + f"""\ + image + {image_file} + """ + ) + with open(filename, "w") as f: + f.write(data) + return str(filename) + + +@pytest.fixture +def csv_file_with_label(tmp_path): + filename = tmp_path / "csv_with_label.csv" + data = textwrap.dedent( + """\ + label + good + bad + good + """ + ) + with open(filename, "w") as f: + f.write(data) + return str(filename) + + +@pytest.fixture +def csv_file_with_int_list(tmp_path): + filename = tmp_path / "csv_with_int_list.csv" + data = textwrap.dedent( + """\ + int_list + 1 2 3 + 4 5 6 + 7 8 9 + """ + ) + with open(filename, "w") as f: + f.write(data) + return str(filename) + + +def test_csv_generate_tables_raises_error_with_malformed_csv(csv_file, malformed_csv_file, caplog): + csv = Csv() + generator = csv._generate_tables([[csv_file, malformed_csv_file]]) + with pytest.raises(ValueError, match="Error tokenizing data"): + for _ in generator: + pass + assert any( + record.levelname == "ERROR" + and "Failed to read file" in record.message + and os.path.basename(malformed_csv_file) in record.message + for record in caplog.records + ) + + +@require_pil +def test_csv_cast_image(csv_file_with_image): + with open(csv_file_with_image, encoding="utf-8") as f: + image_file = f.read().splitlines()[1] + csv = Csv(encoding="utf-8", features=Features({"image": Image()})) + generator = csv._generate_tables([[csv_file_with_image]]) + pa_table = pa.concat_tables([table for _, table in generator]) + assert pa_table.schema.field("image").type == Image()() + generated_content = pa_table.to_pydict()["image"] + assert generated_content == [{"path": image_file, "bytes": None}] + + +def test_csv_cast_label(csv_file_with_label): + with open(csv_file_with_label, encoding="utf-8") as f: + labels = f.read().splitlines()[1:] + csv = Csv(encoding="utf-8", features=Features({"label": ClassLabel(names=["good", "bad"])})) + generator = csv._generate_tables([[csv_file_with_label]]) + pa_table = pa.concat_tables([table for _, table in generator]) + assert pa_table.schema.field("label").type == ClassLabel(names=["good", "bad"])() + generated_content = pa_table.to_pydict()["label"] + assert generated_content == [ClassLabel(names=["good", "bad"]).str2int(label) for label in labels] + + +def test_csv_convert_int_list(csv_file_with_int_list): + csv = Csv(encoding="utf-8", sep=",", converters={"int_list": lambda x: [int(i) for i in x.split()]}) + generator = csv._generate_tables([[csv_file_with_int_list]]) + pa_table = pa.concat_tables([table for _, table in generator]) + assert pa.types.is_list(pa_table.schema.field("int_list").type) + generated_content = pa_table.to_pydict()["int_list"] + assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] diff --git a/testbed/huggingface__datasets/tests/test_dataset_list.py b/testbed/huggingface__datasets/tests/test_dataset_list.py new file mode 100644 index 0000000000000000000000000000000000000000..1004ae3cd6883ef0a1d0a6968f8e72b7eb54090c --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_dataset_list.py @@ -0,0 +1,47 @@ +from unittest import TestCase + +from datasets import Sequence, Value +from datasets.arrow_dataset import Dataset + + +class DatasetListTest(TestCase): + def _create_example_records(self): + return [ + {"col_1": 3, "col_2": "a"}, + {"col_1": 2, "col_2": "b"}, + {"col_1": 1, "col_2": "c"}, + {"col_1": 0, "col_2": "d"}, + ] + + def _create_example_dict(self): + data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} + return Dataset.from_dict(data) + + def test_create(self): + example_records = self._create_example_records() + dset = Dataset.from_list(example_records) + self.assertListEqual(dset.column_names, ["col_1", "col_2"]) + for i, r in enumerate(dset): + self.assertDictEqual(r, example_records[i]) + + def test_list_dict_equivalent(self): + example_records = self._create_example_records() + dset = Dataset.from_list(example_records) + dset_from_dict = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]}) + self.assertEqual(dset.info, dset_from_dict.info) + + def test_uneven_records(self): # checks what happens with missing columns + uneven_records = [{"col_1": 1}, {"col_2": "x"}] + dset = Dataset.from_list(uneven_records) + self.assertDictEqual(dset[0], {"col_1": 1}) + self.assertDictEqual(dset[1], {"col_1": None}) # NB: first record is used for columns + + def test_variable_list_records(self): # checks if the type can be inferred from the second record + list_records = [{"col_1": []}, {"col_1": [1, 2]}] + dset = Dataset.from_list(list_records) + self.assertEqual(dset.info.features["col_1"], Sequence(Value("int64"))) + + def test_create_empty(self): + dset = Dataset.from_list([]) + self.assertEqual(len(dset), 0) + self.assertListEqual(dset.column_names, []) diff --git a/testbed/huggingface__datasets/tests/test_distributed.py b/testbed/huggingface__datasets/tests/test_distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..4cd228f2506b5f4c56dae6c0e3a47a33fd9dd0c2 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_distributed.py @@ -0,0 +1,123 @@ +import os +import sys +from pathlib import Path + +import pytest + +from datasets import Dataset, IterableDataset +from datasets.distributed import split_dataset_by_node + +from .utils import execute_subprocess_async, get_torch_dist_unique_port, require_torch + + +def test_split_dataset_by_node_map_style(): + full_ds = Dataset.from_dict({"i": range(17)}) + full_size = len(full_ds) + world_size = 3 + datasets_per_rank = [ + split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size) + ] + assert sum(len(ds) for ds in datasets_per_rank) == full_size + assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size + + +def test_split_dataset_by_node_iterable(): + def gen(): + return ({"i": i} for i in range(17)) + + world_size = 3 + full_ds = IterableDataset.from_generator(gen) + full_size = len(list(full_ds)) + datasets_per_rank = [ + split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size) + ] + assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size + assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size + + +@pytest.mark.parametrize("shards_per_node", [1, 2, 3]) +def test_split_dataset_by_node_iterable_sharded(shards_per_node): + def gen(shards): + for shard in shards: + yield from ({"i": i, "shard": shard} for i in range(17)) + + world_size = 3 + num_shards = shards_per_node * world_size + gen_kwargs = {"shards": [f"shard_{shard_idx}.txt" for shard_idx in range(num_shards)]} + full_ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs) + full_size = len(list(full_ds)) + assert full_ds.n_shards == world_size * shards_per_node + datasets_per_rank = [ + split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size) + ] + assert [ds.n_shards for ds in datasets_per_rank] == [shards_per_node] * world_size + assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size + assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size + + +def test_distributed_shuffle_iterable(): + def gen(): + return ({"i": i} for i in range(17)) + + world_size = 2 + full_ds = IterableDataset.from_generator(gen) + full_size = len(list(full_ds)) + + ds_rank0 = split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle(seed=42) + assert len(list(ds_rank0)) == 1 + full_size // world_size + with pytest.raises(RuntimeError): + split_dataset_by_node(full_ds, rank=0, world_size=world_size).shuffle() + + ds_rank0 = split_dataset_by_node(full_ds.shuffle(seed=42), rank=0, world_size=world_size) + assert len(list(ds_rank0)) == 1 + full_size // world_size + with pytest.raises(RuntimeError): + split_dataset_by_node(full_ds.shuffle(), rank=0, world_size=world_size) + + +@pytest.mark.parametrize("streaming", [False, True]) +@require_torch +@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows") +@pytest.mark.integration +def test_torch_distributed_run(streaming): + nproc_per_node = 2 + master_port = get_torch_dist_unique_port() + test_script = Path(__file__).resolve().parent / "distributed_scripts" / "run_torch_distributed.py" + distributed_args = f""" + -m torch.distributed.run + --nproc_per_node={nproc_per_node} + --master_port={master_port} + {test_script} + """.split() + args = f""" + --streaming={streaming} + """.split() + cmd = [sys.executable] + distributed_args + args + execute_subprocess_async(cmd, env=os.environ.copy()) + + +@pytest.mark.parametrize( + "nproc_per_node, num_workers", + [ + (2, 2), # each node has 2 shards and each worker has 1 shards + (3, 2), # each node uses all the shards but skips examples, and each worker has 2 shards + ], +) +@require_torch +@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows") +@pytest.mark.integration +def test_torch_distributed_run_streaming_with_num_workers(nproc_per_node, num_workers): + streaming = True + master_port = get_torch_dist_unique_port() + test_script = Path(__file__).resolve().parent / "distributed_scripts" / "run_torch_distributed.py" + distributed_args = f""" + -m torch.distributed.run + --nproc_per_node={nproc_per_node} + --master_port={master_port} + {test_script} + """.split() + args = f""" + --streaming={streaming} + --num_workers={num_workers} + """.split() + cmd = [sys.executable] + distributed_args + args + execute_subprocess_async(cmd, env=os.environ.copy()) diff --git a/testbed/huggingface__datasets/tests/test_extract.py b/testbed/huggingface__datasets/tests/test_extract.py new file mode 100644 index 0000000000000000000000000000000000000000..186d65fd0ba76f50b268c2cd6cef08d76ecfb55f --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_extract.py @@ -0,0 +1,203 @@ +import os +import zipfile + +import pytest + +from datasets.utils.extract import ( + Bzip2Extractor, + Extractor, + GzipExtractor, + Lz4Extractor, + SevenZipExtractor, + TarExtractor, + XzExtractor, + ZipExtractor, + ZstdExtractor, +) + +from .utils import require_lz4, require_py7zr, require_zstandard + + +@pytest.mark.parametrize( + "compression_format, is_archive", + [ + ("7z", True), + ("bz2", False), + ("gzip", False), + ("lz4", False), + ("tar", True), + ("xz", False), + ("zip", True), + ("zstd", False), + ], +) +def test_base_extractors( + compression_format, + is_archive, + bz2_file, + gz_file, + lz4_file, + seven_zip_file, + tar_file, + xz_file, + zip_file, + zstd_file, + tmp_path, + text_file, +): + input_paths_and_base_extractors = { + "7z": (seven_zip_file, SevenZipExtractor), + "bz2": (bz2_file, Bzip2Extractor), + "gzip": (gz_file, GzipExtractor), + "lz4": (lz4_file, Lz4Extractor), + "tar": (tar_file, TarExtractor), + "xz": (xz_file, XzExtractor), + "zip": (zip_file, ZipExtractor), + "zstd": (zstd_file, ZstdExtractor), + } + input_path, base_extractor = input_paths_and_base_extractors[compression_format] + if input_path is None: + reason = f"for '{compression_format}' compression_format, " + if compression_format == "7z": + reason += require_py7zr.kwargs["reason"] + elif compression_format == "lz4": + reason += require_lz4.kwargs["reason"] + elif compression_format == "zstd": + reason += require_zstandard.kwargs["reason"] + pytest.skip(reason) + assert base_extractor.is_extractable(input_path) + output_path = tmp_path / ("extracted" if is_archive else "extracted.txt") + base_extractor.extract(input_path, output_path) + if is_archive: + assert output_path.is_dir() + for file_path in output_path.iterdir(): + assert file_path.name == text_file.name + extracted_file_content = file_path.read_text(encoding="utf-8") + else: + extracted_file_content = output_path.read_text(encoding="utf-8") + expected_file_content = text_file.read_text(encoding="utf-8") + assert extracted_file_content == expected_file_content + + +@pytest.mark.parametrize( + "compression_format, is_archive", + [ + ("7z", True), + ("bz2", False), + ("gzip", False), + ("lz4", False), + ("tar", True), + ("xz", False), + ("zip", True), + ("zstd", False), + ], +) +def test_extractor( + compression_format, + is_archive, + bz2_file, + gz_file, + lz4_file, + seven_zip_file, + tar_file, + xz_file, + zip_file, + zstd_file, + tmp_path, + text_file, +): + input_paths = { + "7z": seven_zip_file, + "bz2": bz2_file, + "gzip": gz_file, + "lz4": lz4_file, + "tar": tar_file, + "xz": xz_file, + "zip": zip_file, + "zstd": zstd_file, + } + input_path = input_paths[compression_format] + if input_path is None: + reason = f"for '{compression_format}' compression_format, " + if compression_format == "7z": + reason += require_py7zr.kwargs["reason"] + elif compression_format == "lz4": + reason += require_lz4.kwargs["reason"] + elif compression_format == "zstd": + reason += require_zstandard.kwargs["reason"] + pytest.skip(reason) + extractor_format = Extractor.infer_extractor_format(input_path) + assert extractor_format is not None + output_path = tmp_path / ("extracted" if is_archive else "extracted.txt") + Extractor.extract(input_path, output_path, extractor_format) + if is_archive: + assert output_path.is_dir() + for file_path in output_path.iterdir(): + assert file_path.name == text_file.name + extracted_file_content = file_path.read_text(encoding="utf-8") + else: + extracted_file_content = output_path.read_text(encoding="utf-8") + expected_file_content = text_file.read_text(encoding="utf-8") + assert extracted_file_content == expected_file_content + + +@pytest.fixture +def tar_file_with_dot_dot(tmp_path, text_file): + import tarfile + + directory = tmp_path / "data_dot_dot" + directory.mkdir() + path = directory / "tar_file_with_dot_dot.tar" + with tarfile.TarFile(path, "w") as f: + f.add(text_file, arcname=os.path.join("..", text_file.name)) + return path + + +@pytest.fixture +def tar_file_with_sym_link(tmp_path): + import tarfile + + directory = tmp_path / "data_sym_link" + directory.mkdir() + path = directory / "tar_file_with_sym_link.tar" + os.symlink("..", directory / "subdir", target_is_directory=True) + with tarfile.TarFile(path, "w") as f: + f.add(str(directory / "subdir"), arcname="subdir") # str required by os.readlink on Windows and Python < 3.8 + return path + + +@pytest.mark.parametrize( + "insecure_tar_file, error_log", + [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")], +) +def test_tar_extract_insecure_files( + insecure_tar_file, error_log, tar_file_with_dot_dot, tar_file_with_sym_link, tmp_path, caplog +): + insecure_tar_files = { + "tar_file_with_dot_dot": tar_file_with_dot_dot, + "tar_file_with_sym_link": tar_file_with_sym_link, + } + input_path = insecure_tar_files[insecure_tar_file] + output_path = tmp_path / "extracted" + TarExtractor.extract(input_path, output_path) + assert caplog.text + for record in caplog.records: + assert record.levelname == "ERROR" + assert error_log in record.msg + + +def test_is_zipfile_false_positive(tmpdir): + # We should have less false positives than zipfile.is_zipfile + # We do that by checking only the magic number + not_a_zip_file = tmpdir / "not_a_zip_file" + # From: https://github.com/python/cpython/pull/5053 + data = ( + b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00" + b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I" + b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07" + b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82" + ) + with not_a_zip_file.open("wb") as f: + f.write(data) + assert zipfile.is_zipfile(str(not_a_zip_file)) # is a false positive for `zipfile` + assert not ZipExtractor.is_extractable(not_a_zip_file) # but we're right diff --git a/testbed/huggingface__datasets/tests/test_file_utils.py b/testbed/huggingface__datasets/tests/test_file_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..54f11bccce4a9e55a306190a22f367be1abb6698 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_file_utils.py @@ -0,0 +1,141 @@ +import os +from pathlib import Path +from unittest.mock import patch + +import pytest +import zstandard as zstd + +from datasets.download.download_config import DownloadConfig +from datasets.utils.file_utils import ( + OfflineModeIsEnabled, + cached_path, + fsspec_get, + fsspec_head, + ftp_get, + ftp_head, + get_from_cache, + http_get, + http_head, +) + + +FILE_CONTENT = """\ + Text data. + Second line of data.""" + +FILE_PATH = "file" + + +@pytest.fixture(scope="session") +def zstd_path(tmp_path_factory): + path = tmp_path_factory.mktemp("data") / (FILE_PATH + ".zstd") + data = bytes(FILE_CONTENT, "utf-8") + with zstd.open(path, "wb") as f: + f.write(data) + return path + + +@pytest.fixture +def tmpfs_file(tmpfs): + with open(os.path.join(tmpfs.local_root_dir, FILE_PATH), "w") as f: + f.write(FILE_CONTENT) + return FILE_PATH + + +@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"]) +def test_cached_path_extract(compression_format, gz_file, xz_file, zstd_path, tmp_path, text_file): + input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} + input_path = input_paths[compression_format] + cache_dir = tmp_path / "cache" + download_config = DownloadConfig(cache_dir=cache_dir, extract_compressed_file=True) + extracted_path = cached_path(input_path, download_config=download_config) + with open(extracted_path) as f: + extracted_file_content = f.read() + with open(text_file) as f: + expected_file_content = f.read() + assert extracted_file_content == expected_file_content + + +@pytest.mark.parametrize("default_extracted", [True, False]) +@pytest.mark.parametrize("default_cache_dir", [True, False]) +def test_extracted_datasets_path(default_extracted, default_cache_dir, xz_file, tmp_path, monkeypatch): + custom_cache_dir = "custom_cache" + custom_extracted_dir = "custom_extracted_dir" + custom_extracted_path = tmp_path / "custom_extracted_path" + if default_extracted: + expected = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") + else: + monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", custom_extracted_dir) + monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(custom_extracted_path)) + expected = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) + + filename = xz_file + download_config = ( + DownloadConfig(extract_compressed_file=True) + if default_cache_dir + else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=True) + ) + extracted_file_path = cached_path(filename, download_config=download_config) + assert Path(extracted_file_path).parent.parts[-2:] == expected + + +def test_cached_path_local(text_file): + # input absolute path -> output absolute path + text_file_abs = str(Path(text_file).resolve()) + assert os.path.samefile(cached_path(text_file_abs), text_file_abs) + # input relative path -> output absolute path + text_file = __file__ + text_file_abs = str(Path(text_file).resolve()) + text_file_rel = str(Path(text_file).resolve().relative_to(Path(os.getcwd()))) + assert os.path.samefile(cached_path(text_file_rel), text_file_abs) + + +def test_cached_path_missing_local(tmp_path): + # absolute path + missing_file = str(tmp_path.resolve() / "__missing_file__.txt") + with pytest.raises(FileNotFoundError): + cached_path(missing_file) + # relative path + missing_file = "./__missing_file__.txt" + with pytest.raises(FileNotFoundError): + cached_path(missing_file) + + +def test_get_from_cache_fsspec(tmpfs_file): + output_path = get_from_cache(f"tmp://{tmpfs_file}") + with open(output_path) as f: + output_file_content = f.read() + assert output_file_content == FILE_CONTENT + + +@patch("datasets.config.HF_DATASETS_OFFLINE", True) +def test_cached_path_offline(): + with pytest.raises(OfflineModeIsEnabled): + cached_path("https://huggingface.co") + + +@patch("datasets.config.HF_DATASETS_OFFLINE", True) +def test_http_offline(tmp_path_factory): + filename = tmp_path_factory.mktemp("data") / "file.html" + with pytest.raises(OfflineModeIsEnabled): + http_get("https://huggingface.co", temp_file=filename) + with pytest.raises(OfflineModeIsEnabled): + http_head("https://huggingface.co") + + +@patch("datasets.config.HF_DATASETS_OFFLINE", True) +def test_ftp_offline(tmp_path_factory): + filename = tmp_path_factory.mktemp("data") / "file.html" + with pytest.raises(OfflineModeIsEnabled): + ftp_get("ftp://huggingface.co", temp_file=filename) + with pytest.raises(OfflineModeIsEnabled): + ftp_head("ftp://huggingface.co") + + +@patch("datasets.config.HF_DATASETS_OFFLINE", True) +def test_fsspec_offline(tmp_path_factory): + filename = tmp_path_factory.mktemp("data") / "file.html" + with pytest.raises(OfflineModeIsEnabled): + fsspec_get("s3://huggingface.co", temp_file=filename) + with pytest.raises(OfflineModeIsEnabled): + fsspec_head("s3://huggingface.co") diff --git a/testbed/huggingface__datasets/tests/test_hf_gcp.py b/testbed/huggingface__datasets/tests/test_hf_gcp.py new file mode 100644 index 0000000000000000000000000000000000000000..d3aa27ff1554a02d82008cb2641c9ed2f17b38b5 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_hf_gcp.py @@ -0,0 +1,109 @@ +import os +from tempfile import TemporaryDirectory +from unittest import TestCase + +import pytest +from absl.testing import parameterized + +from datasets import config +from datasets.arrow_reader import HF_GCP_BASE_URL +from datasets.builder import DatasetBuilder +from datasets.dataset_dict import IterableDatasetDict +from datasets.iterable_dataset import IterableDataset +from datasets.load import dataset_module_factory, import_main_class +from datasets.utils.file_utils import cached_path + + +DATASETS_ON_HF_GCP = [ + {"dataset": "wikipedia", "config_name": "20220301.de"}, + {"dataset": "wikipedia", "config_name": "20220301.en"}, + {"dataset": "wikipedia", "config_name": "20220301.fr"}, + {"dataset": "wikipedia", "config_name": "20220301.frr"}, + {"dataset": "wikipedia", "config_name": "20220301.it"}, + {"dataset": "wikipedia", "config_name": "20220301.simple"}, + {"dataset": "snli", "config_name": "plain_text"}, + {"dataset": "eli5", "config_name": "LFQA_reddit"}, + {"dataset": "wiki40b", "config_name": "en"}, + {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"}, + {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"}, + {"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"}, + {"dataset": "natural_questions", "config_name": "default"}, +] + + +def list_datasets_on_hf_gcp_parameters(with_config=True): + if with_config: + return [ + { + "testcase_name": d["dataset"] + "/" + d["config_name"], + "dataset": d["dataset"], + "config_name": d["config_name"], + } + for d in DATASETS_ON_HF_GCP + ] + else: + return [ + {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} + ] + + +@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True)) +class TestDatasetOnHfGcp(TestCase): + dataset = None + config_name = None + + def test_dataset_info_available(self, dataset, config_name): + with TemporaryDirectory() as tmp_dir: + dataset_module = dataset_module_factory(dataset, cache_dir=tmp_dir) + + builder_cls = import_main_class(dataset_module.module_path, dataset=True) + + builder_instance: DatasetBuilder = builder_cls( + cache_dir=tmp_dir, + config_name=config_name, + hash=dataset_module.hash, + ) + + dataset_info_url = "/".join( + [ + HF_GCP_BASE_URL, + builder_instance._relative_data_dir(with_hash=False).replace(os.sep, "/"), + config.DATASET_INFO_FILENAME, + ] + ) + datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir) + self.assertTrue(os.path.exists(datset_info_path)) + + +@pytest.mark.integration +def test_as_dataset_from_hf_gcs(tmp_path_factory): + tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple" + dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_dir) + builder_cls = import_main_class(dataset_module.module_path) + builder_instance: DatasetBuilder = builder_cls( + cache_dir=tmp_dir, + config_name="20220301.frr", + hash=dataset_module.hash, + ) + # use the HF cloud storage, not the original download_and_prepare that uses apache-beam + builder_instance._download_and_prepare = None + builder_instance.download_and_prepare() + ds = builder_instance.as_dataset() + assert ds + + +@pytest.mark.integration +def test_as_streaming_dataset_from_hf_gcs(tmp_path): + dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_path) + builder_cls = import_main_class(dataset_module.module_path, dataset=True) + builder_instance: DatasetBuilder = builder_cls( + cache_dir=tmp_path, + config_name="20220301.frr", + hash=dataset_module.hash, + ) + ds = builder_instance.as_streaming_dataset() + assert ds + assert isinstance(ds, IterableDatasetDict) + assert "train" in ds + assert isinstance(ds["train"], IterableDataset) + assert next(iter(ds["train"])) diff --git a/testbed/huggingface__datasets/tests/test_hub.py b/testbed/huggingface__datasets/tests/test_hub.py new file mode 100644 index 0000000000000000000000000000000000000000..d7cdbd2843cad4055bea370f884ffc69226d0e86 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_hub.py @@ -0,0 +1,13 @@ +from urllib.parse import quote + +import pytest + +from datasets.utils.hub import hf_hub_url + + +@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"]) +@pytest.mark.parametrize("path", ["filename.csv", "filename with blanks.csv"]) +@pytest.mark.parametrize("revision", [None, "v2"]) +def test_hf_hub_url(repo_id, path, revision): + url = hf_hub_url(repo_id=repo_id, path=path, revision=revision) + assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(path)}" diff --git a/testbed/huggingface__datasets/tests/test_info.py b/testbed/huggingface__datasets/tests/test_info.py new file mode 100644 index 0000000000000000000000000000000000000000..f82c98fb161129d4dd70346a897a2b5e0639ed42 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_info.py @@ -0,0 +1,136 @@ +import os + +import pytest +import yaml + +from datasets.features.features import Features, Value +from datasets.info import DatasetInfo, DatasetInfosDict + + +@pytest.mark.parametrize( + "files", + [ + ["full:README.md", "dataset_infos.json"], + ["empty:README.md", "dataset_infos.json"], + ["dataset_infos.json"], + ["full:README.md"], + ], +) +def test_from_dir(files, tmp_path_factory): + dataset_infos_dir = tmp_path_factory.mktemp("dset_infos_dir") + if "full:README.md" in files: + with open(dataset_infos_dir / "README.md", "w") as f: + f.write("---\ndataset_info:\n dataset_size: 42\n---") + if "empty:README.md" in files: + with open(dataset_infos_dir / "README.md", "w") as f: + f.write("") + # we want to support dataset_infos.json for backward compatibility + if "dataset_infos.json" in files: + with open(dataset_infos_dir / "dataset_infos.json", "w") as f: + f.write('{"default": {"dataset_size": 42}}') + dataset_infos = DatasetInfosDict.from_directory(dataset_infos_dir) + assert dataset_infos + assert dataset_infos["default"].dataset_size == 42 + + +@pytest.mark.parametrize( + "dataset_info", + [ + DatasetInfo(), + DatasetInfo( + description="foo", + features=Features({"a": Value("int32")}), + builder_name="builder", + config_name="config", + version="1.0.0", + splits=[{"name": "train"}], + download_size=42, + ), + ], +) +def test_dataset_info_dump_and_reload(tmp_path, dataset_info: DatasetInfo): + tmp_path = str(tmp_path) + dataset_info.write_to_directory(tmp_path) + reloaded = DatasetInfo.from_directory(tmp_path) + assert dataset_info == reloaded + assert os.path.exists(os.path.join(tmp_path, "dataset_info.json")) + + +def test_dataset_info_to_yaml_dict(): + dataset_info = DatasetInfo( + description="foo", + citation="bar", + homepage="https://foo.bar", + license="CC0", + features=Features({"a": Value("int32")}), + post_processed={}, + supervised_keys=(), + task_templates=[], + builder_name="builder", + config_name="config", + version="1.0.0", + splits=[{"name": "train", "num_examples": 42}], + download_checksums={}, + download_size=1337, + post_processing_size=442, + dataset_size=1234, + size_in_bytes=1337 + 442 + 1234, + ) + dataset_info_yaml_dict = dataset_info._to_yaml_dict() + assert sorted(dataset_info_yaml_dict) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML) + for key in DatasetInfo._INCLUDED_INFO_IN_YAML: + assert key in dataset_info_yaml_dict + assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str)) + dataset_info_yaml = yaml.safe_dump(dataset_info_yaml_dict) + reloaded = yaml.safe_load(dataset_info_yaml) + assert dataset_info_yaml_dict == reloaded + + +def test_dataset_info_to_yaml_dict_empty(): + dataset_info = DatasetInfo() + dataset_info_yaml_dict = dataset_info._to_yaml_dict() + assert dataset_info_yaml_dict == {} + + +@pytest.mark.parametrize( + "dataset_infos_dict", + [ + DatasetInfosDict(), + DatasetInfosDict({"default": DatasetInfo()}), + DatasetInfosDict({"my_config_name": DatasetInfo()}), + DatasetInfosDict( + { + "default": DatasetInfo( + description="foo", + features=Features({"a": Value("int32")}), + builder_name="builder", + config_name="config", + version="1.0.0", + splits=[{"name": "train"}], + download_size=42, + ) + } + ), + DatasetInfosDict( + { + "v1": DatasetInfo(dataset_size=42), + "v2": DatasetInfo(dataset_size=1337), + } + ), + ], +) +def test_dataset_infos_dict_dump_and_reload(tmp_path, dataset_infos_dict: DatasetInfosDict): + tmp_path = str(tmp_path) + dataset_infos_dict.write_to_directory(tmp_path) + reloaded = DatasetInfosDict.from_directory(tmp_path) + + # the config_name of the dataset_infos_dict take over the attribute + for config_name, dataset_info in dataset_infos_dict.items(): + dataset_info.config_name = config_name + # the yaml representation doesn't include fields like description or citation + # so we just test that we can recover what we can from the yaml + dataset_infos_dict[config_name] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict()) + assert dataset_infos_dict == reloaded + + if dataset_infos_dict: + assert os.path.exists(os.path.join(tmp_path, "README.md")) diff --git a/testbed/huggingface__datasets/tests/test_iterable_dataset.py b/testbed/huggingface__datasets/tests/test_iterable_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..ba94aece8aea55a952456a506dcd87e8967e52e9 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_iterable_dataset.py @@ -0,0 +1,1947 @@ +import pickle +from copy import deepcopy +from itertools import chain, islice + +import numpy as np +import pandas as pd +import pyarrow as pa +import pyarrow.compute as pc +import pytest + +from datasets import Dataset, load_dataset +from datasets.combine import concatenate_datasets, interleave_datasets +from datasets.features import ( + ClassLabel, + Features, + Image, + Value, +) +from datasets.formatting import get_format_type_from_alias +from datasets.info import DatasetInfo +from datasets.iterable_dataset import ( + ArrowExamplesIterable, + BufferShuffledExamplesIterable, + CyclingMultiSourcesExamplesIterable, + ExamplesIterable, + FilteredExamplesIterable, + FormattingConfig, + HorizontallyConcatenatedMultiSourcesExamplesIterable, + IterableDataset, + MappedExamplesIterable, + RandomlyCyclingMultiSourcesExamplesIterable, + SelectColumnsIterable, + ShuffledDataSourcesArrowExamplesIterable, + ShuffledDataSourcesExamplesIterable, + ShufflingConfig, + SkipExamplesIterable, + StepExamplesIterable, + TakeExamplesIterable, + TypedExamplesIterable, + VerticallyConcatenatedMultiSourcesExamplesIterable, + _BaseExamplesIterable, + _batch_arrow_tables, + _batch_to_examples, + _convert_to_arrow, + _examples_to_batch, +) + +from .utils import ( + assert_arrow_memory_doesnt_increase, + is_rng_equal, + require_dill_gt_0_3_2, + require_not_windows, + require_pyspark, + require_tf, + require_torch, +) + + +DEFAULT_N_EXAMPLES = 20 +DEFAULT_BATCH_SIZE = 4 +DEFAULT_FILEPATH = "file.txt" + +SAMPLE_DATASET_IDENTIFIER = "hf-internal-testing/dataset_with_script" # has dataset script + + +def generate_examples_fn(**kwargs): + kwargs = kwargs.copy() + n = kwargs.pop("n", DEFAULT_N_EXAMPLES) + filepaths = kwargs.pop("filepaths", None) + for filepath in filepaths or [DEFAULT_FILEPATH]: + if filepaths is not None: + kwargs["filepath"] = filepath + for i in range(n): + yield f"{filepath}_{i}", {"id": i, **kwargs} + + +def generate_tables_fn(**kwargs): + kwargs = kwargs.copy() + n = kwargs.pop("n", DEFAULT_N_EXAMPLES) + batch_size = kwargs.pop("batch_size", DEFAULT_BATCH_SIZE) + filepaths = kwargs.pop("filepaths", None) + for filepath in filepaths or [DEFAULT_FILEPATH]: + buffer = [] + batch_idx = 0 + if filepaths is not None: + kwargs["filepath"] = filepath + for i in range(n): + buffer.append({"id": i, **kwargs}) + if len(buffer) == batch_size: + yield f"{filepath}_{batch_idx}", pa.Table.from_pylist(buffer) + buffer = [] + batch_idx += 1 + yield batch_idx, pa.Table.from_pylist(buffer) + + +@pytest.fixture +def dataset(): + ex_iterable = ExamplesIterable(generate_examples_fn, {}) + return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train") + + +@pytest.fixture +def dataset_with_several_columns(): + ex_iterable = ExamplesIterable( + generate_examples_fn, + {"filepath": ["data0.txt", "data1.txt", "data2.txt"], "metadata": {"sources": ["https://foo.bar"]}}, + ) + return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train") + + +@pytest.fixture +def arrow_file(tmp_path_factory, dataset: IterableDataset): + filename = str(tmp_path_factory.mktemp("data") / "file.arrow") + Dataset.from_generator(dataset.__iter__).map(cache_file_name=filename) + return filename + + +################################ +# +# Utilities tests +# +################################ + + +@pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20]) +@pytest.mark.parametrize("drop_last_batch", [False, True]) +def test_convert_to_arrow(batch_size, drop_last_batch): + examples = [{"foo": i} for i in range(10)] + full_table = pa.Table.from_pylist(examples) + num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size + num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size + subtables = list( + _convert_to_arrow( + list(enumerate(examples)), + batch_size=batch_size, + drop_last_batch=drop_last_batch, + ) + ) + assert len(subtables) == num_batches + if drop_last_batch: + assert all(len(subtable) == batch_size for _, subtable in subtables) + else: + assert all(len(subtable) == batch_size for _, subtable in subtables[:-1]) + assert len(subtables[-1][1]) <= batch_size + if num_rows > 0: + reloaded = pa.concat_tables([subtable for _, subtable in subtables]) + assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict() + + +@pytest.mark.parametrize( + "tables", + [ + [pa.table({"foo": range(10)})], + [pa.table({"foo": range(0, 5)}), pa.table({"foo": range(5, 10)})], + [pa.table({"foo": [i]}) for i in range(10)], + ], +) +@pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20]) +@pytest.mark.parametrize("drop_last_batch", [False, True]) +def test_batch_arrow_tables(tables, batch_size, drop_last_batch): + full_table = pa.concat_tables(tables) + num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size + num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size + subtables = list( + _batch_arrow_tables(list(enumerate(tables)), batch_size=batch_size, drop_last_batch=drop_last_batch) + ) + assert len(subtables) == num_batches + if drop_last_batch: + assert all(len(subtable) == batch_size for _, subtable in subtables) + else: + assert all(len(subtable) == batch_size for _, subtable in subtables[:-1]) + assert len(subtables[-1][1]) <= batch_size + if num_rows > 0: + reloaded = pa.concat_tables([subtable for _, subtable in subtables]) + assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict() + + +################################ +# +# _BaseExampleIterable tests +# +################################ + + +def test_examples_iterable(): + ex_iterable = ExamplesIterable(generate_examples_fn, {}) + expected = list(generate_examples_fn()) + assert next(iter(ex_iterable)) == expected[0] + assert list(ex_iterable) == expected + assert ex_iterable.iter_arrow is None + + +def test_examples_iterable_with_kwargs(): + ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"}) + expected = list(generate_examples_fn(filepaths=["0.txt", "1.txt"], split="train")) + assert list(ex_iterable) == expected + assert all("split" in ex for _, ex in ex_iterable) + assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"] + + +def test_examples_iterable_shuffle_data_sources(): + ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]}) + ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40)) + expected = list(generate_examples_fn(filepaths=["1.txt", "0.txt"])) # shuffle the filepaths + assert list(ex_iterable) == expected + + +def test_examples_iterable_shuffle_shards_and_metadata(): + def gen(filepaths, all_metadata): + for i, (filepath, metadata) in enumerate(zip(filepaths, all_metadata)): + yield i, {"filepath": filepath, "metadata": metadata} + + ex_iterable = ExamplesIterable( + gen, + { + "filepaths": [f"{i}.txt" for i in range(100)], + "all_metadata": [{"id": str(i)} for i in range(100)], + }, + ) + ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(42)) + out = list(ex_iterable) + filepaths_ids = [x["filepath"].split(".")[0] for _, x in out] + metadata_ids = [x["metadata"]["id"] for _, x in out] + assert filepaths_ids == metadata_ids, "entangled lists of shards/metadata should be shuffled the same way" + + +def test_arrow_examples_iterable(): + ex_iterable = ArrowExamplesIterable(generate_tables_fn, {}) + expected = sum([pa_table.to_pylist() for _, pa_table in generate_tables_fn()], []) + assert next(iter(ex_iterable))[1] == expected[0] + assert [example for _, example in ex_iterable] == expected + expected = list(generate_tables_fn()) + assert list(ex_iterable.iter_arrow()) == expected + + +def test_arrow_examples_iterable_with_kwargs(): + ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"}) + expected = sum( + [pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train")], [] + ) + assert [example for _, example in ex_iterable] == expected + assert all("split" in ex for _, ex in ex_iterable) + assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"] + expected = list(generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train")) + assert list(ex_iterable.iter_arrow()) == expected + + +def test_arrow_examples_iterable_shuffle_data_sources(): + ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"]}) + ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40)) + expected = sum( + [pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["1.txt", "0.txt"])], [] + ) # shuffle the filepaths + assert [example for _, example in ex_iterable] == expected + expected = list(generate_tables_fn(filepaths=["1.txt", "0.txt"])) + assert list(ex_iterable.iter_arrow()) == expected + + +@pytest.mark.parametrize("seed", [42, 1337, 101010, 123456]) +def test_buffer_shuffled_examples_iterable(seed): + n, buffer_size = 100, 30 + generator = np.random.default_rng(seed) + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = BufferShuffledExamplesIterable(base_ex_iterable, buffer_size=buffer_size, generator=generator) + + rng = deepcopy(generator) + expected_indices_used_for_shuffling = list( + islice(BufferShuffledExamplesIterable._iter_random_indices(rng, buffer_size=buffer_size), n - buffer_size) + ) + # indices to pick in the shuffle buffer should all be in the right range + assert all(0 <= index_to_pick < buffer_size for index_to_pick in expected_indices_used_for_shuffling) + # it should be random indices + assert expected_indices_used_for_shuffling != list(range(buffer_size)) + + # The final order of examples is the result of a shuffle buffer. + all_examples = list(generate_examples_fn(n=n)) + # We create a buffer and we pick random examples from it. + buffer, rest = all_examples[:buffer_size], all_examples[buffer_size:] + expected = [] + for i, index_to_pick in enumerate(expected_indices_used_for_shuffling): + expected.append(buffer[index_to_pick]) + # The picked examples are directly replaced by the next examples from the iterable. + buffer[index_to_pick] = rest.pop(0) + # Once we have reached the end of the iterable, we shuffle the buffer and return the remaining examples. + rng.shuffle(buffer) + expected += buffer + + assert next(iter(ex_iterable)) == expected[0] + assert list(ex_iterable) == expected + assert sorted(ex_iterable) == sorted(all_examples) + + +def test_cycling_multi_sources_examples_iterable(): + ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"}) + ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"}) + ex_iterable = CyclingMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) + expected = list(chain(*zip(generate_examples_fn(text="foo"), generate_examples_fn(text="bar")))) + + # The cycling stops as soon as one iterable is out of examples (here ex_iterable1), so the last sample from ex_iterable2 is unecessary + expected = expected[:-1] + + assert next(iter(ex_iterable)) == expected[0] + assert list(ex_iterable) == expected + assert all((x["id"], x["text"]) == (i // 2, "bar" if i % 2 else "foo") for i, (_, x) in enumerate(ex_iterable)) + + +@pytest.mark.parametrize("probabilities", [None, (0.5, 0.5), (0.9, 0.1)]) +def test_randomly_cycling_multi_sources_examples_iterable(probabilities): + seed = 42 + generator = np.random.default_rng(seed) + ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"}) + ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"}) + ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable( + [ex_iterable1, ex_iterable2], generator=generator, probabilities=probabilities + ) + + # The source used randomly changes at each example. It stops when one of the iterators is empty. + rng = deepcopy(generator) + iterators = (generate_examples_fn(text="foo"), generate_examples_fn(text="bar")) + indices_iterator = RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices( + rng, len(iterators), p=probabilities + ) + expected = [] + lengths = [len(list(ex_iterable1)), len(list(ex_iterable2))] + for i in indices_iterator: + if lengths[0] == 0 or lengths[1] == 0: + break + for key, example in iterators[i]: + expected.append((key, example)) + lengths[i] -= 1 + break + else: + break + + assert next(iter(ex_iterable)) == expected[0] + assert list(ex_iterable) == expected + + +@pytest.mark.parametrize( + "n, func, batched, batch_size", + [ + (3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id + (3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1 + (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10 + (25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10 + (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None + (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0 + (3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example + ], +) +def test_mapped_examples_iterable(n, func, batched, batch_size): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = MappedExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size) + all_examples = [x for _, x in generate_examples_fn(n=n)] + if batched is False: + expected = [{**x, **func(x)} for x in all_examples] + else: + # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function + all_transformed_examples = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + batch = _examples_to_batch(examples) + transformed_batch = func(batch) + all_transformed_examples.extend(_batch_to_examples(transformed_batch)) + expected = _examples_to_batch(all_examples) + expected.update(_examples_to_batch(all_transformed_examples)) + expected = list(_batch_to_examples(expected)) + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + + +@pytest.mark.parametrize( + "n, func, batched, batch_size", + [ + (3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id + (3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1 + (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10 + (25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10 + (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None + (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0 + (3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example + ], +) +def test_mapped_examples_iterable_drop_last_batch(n, func, batched, batch_size): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = MappedExamplesIterable( + base_ex_iterable, func, batched=batched, batch_size=batch_size, drop_last_batch=True + ) + all_examples = [x for _, x in generate_examples_fn(n=n)] + is_empty = False + if batched is False: + # `drop_last_batch` has no effect here + expected = [{**x, **func(x)} for x in all_examples] + else: + # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function + all_transformed_examples = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + if len(examples) < batch_size: # ignore last batch + break + batch = _examples_to_batch(examples) + transformed_batch = func(batch) + all_transformed_examples.extend(_batch_to_examples(transformed_batch)) + all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size] + if all_examples: + expected = _examples_to_batch(all_examples) + expected.update(_examples_to_batch(all_transformed_examples)) + expected = list(_batch_to_examples(expected)) + else: + is_empty = True + + if not is_empty: + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + else: + with pytest.raises(StopIteration): + next(iter(ex_iterable)) + + +@pytest.mark.parametrize( + "n, func, batched, batch_size", + [ + (3, lambda x, index: {"id+idx": x["id"] + index}, False, None), # add the index to the id + ( + 25, + lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, + True, + 10, + ), # add the index to the id + (5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, None), # same with bs=None + (5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, -1), # same with bs<=0 + ], +) +def test_mapped_examples_iterable_with_indices(n, func, batched, batch_size): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = MappedExamplesIterable( + base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True + ) + all_examples = [x for _, x in generate_examples_fn(n=n)] + if batched is False: + expected = [{**x, **func(x, idx)} for idx, x in enumerate(all_examples)] + else: + # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function + all_transformed_examples = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + batch = _examples_to_batch(examples) + indices = list(range(batch_offset, batch_offset + len(examples))) + transformed_batch = func(batch, indices) + all_transformed_examples.extend(_batch_to_examples(transformed_batch)) + expected = _examples_to_batch(all_examples) + expected.update(_examples_to_batch(all_transformed_examples)) + expected = list(_batch_to_examples(expected)) + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + + +@pytest.mark.parametrize( + "n, func, batched, batch_size, remove_columns", + [ + (3, lambda x: {"id+1": x["id"] + 1}, False, None, ["extra_column"]), # just add 1 to the id + (25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10, ["extra_column"]), # same with bs=10 + ( + 50, + lambda x: {"foo": ["bar"] * np.random.default_rng(x["id"][0]).integers(0, 10)}, + True, + 8, + ["extra_column", "id"], + ), # make a duplicate of each example + (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None, ["extra_column"]), # same with bs=None + (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1, ["extra_column"]), # same with bs<=0 + ], +) +def test_mapped_examples_iterable_remove_columns(n, func, batched, batch_size, remove_columns): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"}) + ex_iterable = MappedExamplesIterable( + base_ex_iterable, func, batched=batched, batch_size=batch_size, remove_columns=remove_columns + ) + all_examples = [x for _, x in generate_examples_fn(n=n)] + columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns] + if batched is False: + expected = [{**{k: v for k, v in x.items() if k not in columns_to_remove}, **func(x)} for x in all_examples] + else: + # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function + all_transformed_examples = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + batch = _examples_to_batch(examples) + transformed_batch = func(batch) + all_transformed_examples.extend(_batch_to_examples(transformed_batch)) + expected = {k: v for k, v in _examples_to_batch(all_examples).items() if k not in columns_to_remove} + expected.update(_examples_to_batch(all_transformed_examples)) + expected = list(_batch_to_examples(expected)) + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + + +@pytest.mark.parametrize( + "n, func, batched, batch_size, fn_kwargs", + [ + (3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, None), + (3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, {"y": 3}), + (25, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, 10, {"y": 3}), + (5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, None, {"y": 3}), # same with bs=None + (5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, -1, {"y": 3}), # same with bs<=0 + ], +) +def test_mapped_examples_iterable_fn_kwargs(n, func, batched, batch_size, fn_kwargs): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = MappedExamplesIterable( + base_ex_iterable, func, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs + ) + all_examples = [x for _, x in generate_examples_fn(n=n)] + if fn_kwargs is None: + fn_kwargs = {} + if batched is False: + expected = [{**x, **func(x, **fn_kwargs)} for x in all_examples] + else: + # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function + all_transformed_examples = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + batch = _examples_to_batch(examples) + transformed_batch = func(batch, **fn_kwargs) + all_transformed_examples.extend(_batch_to_examples(transformed_batch)) + expected = _examples_to_batch(all_examples) + expected.update(_examples_to_batch(all_transformed_examples)) + expected = list(_batch_to_examples(expected)) + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + + +@pytest.mark.parametrize( + "n, func, batched, batch_size, input_columns", + [ + (3, lambda id_: {"id+1": id_ + 1}, False, None, ["id"]), # just add 1 to the id + (25, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, 10, ["id"]), # same with bs=10 + (5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, None, ["id"]), # same with bs=None + (5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, -1, ["id"]), # same with bs<=0 + ], +) +def test_mapped_examples_iterable_input_columns(n, func, batched, batch_size, input_columns): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = MappedExamplesIterable( + base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns + ) + all_examples = [x for _, x in generate_examples_fn(n=n)] + columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns] + if batched is False: + expected = [{**x, **func(*[x[col] for col in columns_to_input])} for x in all_examples] + else: + # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function + all_transformed_examples = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + batch = _examples_to_batch(examples) + transformed_batch = func(*[batch[col] for col in columns_to_input]) + all_transformed_examples.extend(_batch_to_examples(transformed_batch)) + expected = _examples_to_batch(all_examples) + expected.update(_examples_to_batch(all_transformed_examples)) + expected = list(_batch_to_examples(expected)) + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + + +@pytest.mark.parametrize( + "n, func, batched, batch_size", + [ + (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id + (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1 + (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 + (25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 + (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None + (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0 + (3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example + ], +) +def test_mapped_examples_iterable_arrow_format(n, func, batched, batch_size): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = MappedExamplesIterable( + base_ex_iterable, + func, + batched=batched, + batch_size=batch_size, + formatting=FormattingConfig(format_type="arrow"), + ) + all_examples = [x for _, x in generate_examples_fn(n=n)] + if batched is False: + expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples] + else: + expected = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + batch = pa.Table.from_pylist(examples) + expected.extend(func(batch).to_pylist()) + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + + +@pytest.mark.parametrize( + "n, func, batched, batch_size", + [ + (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id + (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1 + (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 + (25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 + (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None + (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0 + (3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example + ], +) +def test_mapped_examples_iterable_drop_last_batch_and_arrow_format(n, func, batched, batch_size): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = MappedExamplesIterable( + base_ex_iterable, + func, + batched=batched, + batch_size=batch_size, + drop_last_batch=True, + formatting=FormattingConfig(format_type="arrow"), + ) + all_examples = [x for _, x in generate_examples_fn(n=n)] + is_empty = False + if batched is False: + # `drop_last_batch` has no effect here + expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples] + else: + all_transformed_examples = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + if len(examples) < batch_size: # ignore last batch + break + batch = pa.Table.from_pylist(examples) + out = func(batch) + all_transformed_examples.extend( + out.to_pylist() + ) # we don't merge with input since they're arrow tables and not dictionaries + all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size] + if all_examples: + expected = all_transformed_examples + else: + is_empty = True + + if not is_empty: + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + else: + with pytest.raises(StopIteration): + next(iter(ex_iterable)) + + +@pytest.mark.parametrize( + "n, func, batched, batch_size", + [ + ( + 3, + lambda t, index: t.append_column("id+idx", pc.add(t["id"], index)), + False, + None, + ), # add the index to the id + ( + 25, + lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), + True, + 10, + ), # add the index to the id + (5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, None), # same with bs=None + (5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, -1), # same with bs<=0 + ], +) +def test_mapped_examples_iterable_with_indices_and_arrow_format(n, func, batched, batch_size): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = MappedExamplesIterable( + base_ex_iterable, + func, + batched=batched, + batch_size=batch_size, + with_indices=True, + formatting=FormattingConfig(format_type="arrow"), + ) + all_examples = [x for _, x in generate_examples_fn(n=n)] + if batched is False: + expected = [func(pa.Table.from_pylist([x]), i).to_pylist()[0] for i, x in enumerate(all_examples)] + else: + expected = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + batch = pa.Table.from_pylist(examples) + expected.extend(func(batch, list(range(batch_offset, batch_offset + len(batch)))).to_pylist()) + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + + +@pytest.mark.parametrize( + "n, func, batched, batch_size, remove_columns", + [ + ( + 3, + lambda t: t.append_column("id+1", pc.add(t["id"], 1)), + False, + None, + ["extra_column"], + ), # just add 1 to the id + (25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10, ["extra_column"]), # same with bs=10 + ( + 50, + lambda t: pa.table({"foo": ["bar"] * np.random.default_rng(t["id"][0].as_py()).integers(0, 10)}), + True, + 8, + ["extra_column", "id"], + ), # make a duplicate of each example + (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None, ["extra_column"]), # same with bs=None + (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1, ["extra_column"]), # same with bs<=0 + ], +) +def test_mapped_examples_iterable_remove_columns_arrow_format(n, func, batched, batch_size, remove_columns): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"}) + ex_iterable = MappedExamplesIterable( + base_ex_iterable, + func, + batched=batched, + batch_size=batch_size, + remove_columns=remove_columns, + formatting=FormattingConfig(format_type="arrow"), + ) + all_examples = [x for _, x in generate_examples_fn(n=n)] + columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns] + if batched is False: + expected = [ + {**{k: v for k, v in func(pa.Table.from_pylist([x])).to_pylist()[0].items() if k not in columns_to_remove}} + for x in all_examples + ] + else: + expected = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + batch = pa.Table.from_pylist(examples) + expected.extend( + [{k: v for k, v in x.items() if k not in columns_to_remove} for x in func(batch).to_pylist()] + ) + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + + +@pytest.mark.parametrize( + "n, func, batched, batch_size, fn_kwargs", + [ + (3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, None), + (3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, {"y": 3}), + (25, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, 10, {"y": 3}), + (5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, None, {"y": 3}), # same with bs=None + (5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, -1, {"y": 3}), # same with bs<=0 + ], +) +def test_mapped_examples_iterable_fn_kwargs_and_arrow_format(n, func, batched, batch_size, fn_kwargs): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = MappedExamplesIterable( + base_ex_iterable, + func, + batched=batched, + batch_size=batch_size, + fn_kwargs=fn_kwargs, + formatting=FormattingConfig(format_type="arrow"), + ) + all_examples = [x for _, x in generate_examples_fn(n=n)] + if fn_kwargs is None: + fn_kwargs = {} + if batched is False: + expected = [func(pa.Table.from_pylist([x]), **fn_kwargs).to_pylist()[0] for x in all_examples] + else: + expected = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + batch = pa.Table.from_pylist(examples) + expected.extend(func(batch, **fn_kwargs).to_pylist()) + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + + +@pytest.mark.parametrize( + "n, func, batched, batch_size, input_columns", + [ + (3, lambda id_: pa.table({"id+1": pc.add(id_, 1)}), False, None, ["id"]), # just add 1 to the id + (25, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, 10, ["id"]), # same with bs=10 + (5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, None, ["id"]), # same with bs=None + (5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, -1, ["id"]), # same with bs<=0 + ], +) +def test_mapped_examples_iterable_input_columns_and_arrow_format(n, func, batched, batch_size, input_columns): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = MappedExamplesIterable( + base_ex_iterable, + func, + batched=batched, + batch_size=batch_size, + input_columns=input_columns, + formatting=FormattingConfig(format_type="arrow"), + ) + all_examples = [x for _, x in generate_examples_fn(n=n)] + columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns] + if batched is False: + expected = [ + func(*[pa.Table.from_pylist([x])[col] for col in columns_to_input]).to_pylist()[0] for x in all_examples + ] + else: + expected = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + batch = pa.Table.from_pylist(examples) + expected.extend(func(*[batch[col] for col in columns_to_input]).to_pylist()) + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + + +@pytest.mark.parametrize( + "n, func, batched, batch_size", + [ + (3, lambda x: x["id"] % 2 == 0, False, None), # keep even number + (3, lambda x: [x["id"][0] % 2 == 0], True, 1), # same with bs=1 + (25, lambda x: [i % 2 == 0 for i in x["id"]], True, 10), # same with bs=10 + (5, lambda x: [i % 2 == 0 for i in x["id"]], True, None), # same with bs=None + (5, lambda x: [i % 2 == 0 for i in x["id"]], True, -1), # same with bs<=0 + (3, lambda x: False, False, None), # return 0 examples + (3, lambda x: [False] * len(x["id"]), True, 10), # same with bs=10 + ], +) +def test_filtered_examples_iterable(n, func, batched, batch_size): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = FilteredExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size) + all_examples = [x for _, x in generate_examples_fn(n=n)] + if batched is False: + expected = [x for x in all_examples if func(x)] + else: + # For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function + expected = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + batch = _examples_to_batch(examples) + mask = func(batch) + expected.extend([x for x, to_keep in zip(examples, mask) if to_keep]) + if expected: + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + + +@pytest.mark.parametrize( + "n, func, batched, batch_size", + [ + (3, lambda x, index: index % 2 == 0, False, None), # keep even number + (25, lambda x, indices: [idx % 2 == 0 for idx in indices], True, 10), # same with bs=10 + (5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, None), # same with bs=None + (5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, -1), # same with bs<=0 + ], +) +def test_filtered_examples_iterable_with_indices(n, func, batched, batch_size): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = FilteredExamplesIterable( + base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True + ) + all_examples = [x for _, x in generate_examples_fn(n=n)] + if batched is False: + expected = [x for idx, x in enumerate(all_examples) if func(x, idx)] + else: + # For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function + expected = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + batch = _examples_to_batch(examples) + indices = list(range(batch_offset, batch_offset + len(examples))) + mask = func(batch, indices) + expected.extend([x for x, to_keep in zip(examples, mask) if to_keep]) + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + + +@pytest.mark.parametrize( + "n, func, batched, batch_size, input_columns", + [ + (3, lambda id_: id_ % 2 == 0, False, None, ["id"]), # keep even number + (25, lambda ids_: [i % 2 == 0 for i in ids_], True, 10, ["id"]), # same with bs=10 + (3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None + (3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None + ], +) +def test_filtered_examples_iterable_input_columns(n, func, batched, batch_size, input_columns): + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) + ex_iterable = FilteredExamplesIterable( + base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns + ) + all_examples = [x for _, x in generate_examples_fn(n=n)] + columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns] + if batched is False: + expected = [x for x in all_examples if func(*[x[col] for col in columns_to_input])] + else: + # For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function + expected = [] + # If batch_size is None or <=0, we use the whole dataset as a single batch + if batch_size is None or batch_size <= 0: + batch_size = len(all_examples) + for batch_offset in range(0, len(all_examples), batch_size): + examples = all_examples[batch_offset : batch_offset + batch_size] + batch = _examples_to_batch(examples) + mask = func(*[batch[col] for col in columns_to_input]) + expected.extend([x for x, to_keep in zip(examples, mask) if to_keep]) + assert next(iter(ex_iterable))[1] == expected[0] + assert [x for _, x in ex_iterable] == expected + + +def test_skip_examples_iterable(): + total, count = 10, 2 + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total}) + skip_ex_iterable = SkipExamplesIterable(base_ex_iterable, n=count) + expected = list(generate_examples_fn(n=total))[count:] + assert list(skip_ex_iterable) == expected + assert ( + skip_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is skip_ex_iterable + ), "skip examples makes the shards order fixed" + + +def test_take_examples_iterable(): + total, count = 10, 2 + base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total}) + take_ex_iterable = TakeExamplesIterable(base_ex_iterable, n=count) + expected = list(generate_examples_fn(n=total))[:count] + assert list(take_ex_iterable) == expected + assert ( + take_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is take_ex_iterable + ), "skip examples makes the shards order fixed" + + +def test_vertically_concatenated_examples_iterable(): + ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) + ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5}) + concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) + expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2] + assert [x for _, x in concatenated_ex_iterable] == expected + + +def test_vertically_concatenated_examples_iterable_with_different_columns(): + # having different columns is supported + # Though iterable datasets fill the missing data with nulls + ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) + ex_iterable2 = ExamplesIterable(generate_examples_fn, {}) + concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) + expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2] + assert [x for _, x in concatenated_ex_iterable] == expected + + +def test_vertically_concatenated_examples_iterable_shuffle_data_sources(): + ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) + ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5}) + concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) + rng = np.random.default_rng(42) + shuffled_ex_iterable = concatenated_ex_iterable.shuffle_data_sources(rng) + # make sure the list of examples iterables is shuffled, and each examples iterable is shuffled + expected = [x for _, x in ex_iterable2.shuffle_data_sources(rng)] + [ + x for _, x in ex_iterable1.shuffle_data_sources(rng) + ] + assert [x for _, x in shuffled_ex_iterable] == expected + + +def test_horizontally_concatenated_examples_iterable(): + ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10}) + ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5}) + concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) + with pytest.raises(ValueError): # column "id" is duplicated -> raise an error + list(concatenated_ex_iterable) + ex_iterable2 = MappedExamplesIterable(ex_iterable2, lambda x: x, remove_columns=["id"]) + concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) + expected = [{**x, **y} for (_, x), (_, y) in zip(ex_iterable1, ex_iterable2)] + assert [x for _, x in concatenated_ex_iterable] == expected + assert ( + concatenated_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is concatenated_ex_iterable + ), "horizontally concatenated examples makes the shards order fixed" + + +@pytest.mark.parametrize( + "ex_iterable", + [ + ExamplesIterable(generate_examples_fn, {}), + ShuffledDataSourcesExamplesIterable(generate_examples_fn, {}, np.random.default_rng(42)), + SelectColumnsIterable(ExamplesIterable(generate_examples_fn, {}), ["id"]), + StepExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 2, 0), + CyclingMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]), + VerticallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]), + HorizontallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]), + RandomlyCyclingMultiSourcesExamplesIterable( + [ExamplesIterable(generate_examples_fn, {})], np.random.default_rng(42) + ), + MappedExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: x), + MappedExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: x), + FilteredExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: True), + FilteredExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: True), + BufferShuffledExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10, np.random.default_rng(42)), + SkipExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10), + TakeExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10), + TypedExamplesIterable( + ExamplesIterable(generate_examples_fn, {}), Features({"id": Value("int32")}), token_per_repo_id={} + ), + ], +) +def test_no_iter_arrow(ex_iterable: _BaseExamplesIterable): + assert ex_iterable.iter_arrow is None + + +@pytest.mark.parametrize( + "ex_iterable", + [ + ArrowExamplesIterable(generate_tables_fn, {}), + ShuffledDataSourcesArrowExamplesIterable(generate_tables_fn, {}, np.random.default_rng(42)), + SelectColumnsIterable(ArrowExamplesIterable(generate_tables_fn, {}), ["id"]), + # StepExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 2, 0), # not implemented + # CyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented + VerticallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), + # HorizontallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented + # RandomlyCyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})], np.random.default_rng(42)), # not implemented + MappedExamplesIterable( + ExamplesIterable(generate_examples_fn, {}), lambda t: t, formatting=FormattingConfig(format_type="arrow") + ), + MappedExamplesIterable( + ArrowExamplesIterable(generate_tables_fn, {}), + lambda t: t, + formatting=FormattingConfig(format_type="arrow"), + ), + FilteredExamplesIterable( + ExamplesIterable(generate_examples_fn, {}), + lambda t: True, + formatting=FormattingConfig(format_type="arrow"), + ), + FilteredExamplesIterable( + ArrowExamplesIterable(generate_tables_fn, {}), + lambda t: True, + formatting=FormattingConfig(format_type="arrow"), + ), + # BufferShuffledExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10, np.random.default_rng(42)), # not implemented + # SkipExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented + # TakeExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented + TypedExamplesIterable( + ArrowExamplesIterable(generate_tables_fn, {}), Features({"id": Value("int32")}), token_per_repo_id={} + ), + ], +) +def test_iter_arrow(ex_iterable: _BaseExamplesIterable): + assert ex_iterable.iter_arrow is not None + key, pa_table = next(ex_iterable.iter_arrow()) + assert isinstance(pa_table, pa.Table) + + +############################ +# +# IterableDataset tests +# +############################ + + +def test_iterable_dataset(): + dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {})) + expected = [x for _, x in generate_examples_fn()] + assert next(iter(dataset)) == expected[0] + assert list(dataset) == expected + + +def test_iterable_dataset_from_generator(): + data = [ + {"col_1": "0", "col_2": 0, "col_3": 0.0}, + {"col_1": "1", "col_2": 1, "col_3": 1.0}, + {"col_1": "2", "col_2": 2, "col_3": 2.0}, + {"col_1": "3", "col_2": 3, "col_3": 3.0}, + ] + + def gen(): + yield from data + + dataset = IterableDataset.from_generator(gen) + assert isinstance(dataset, IterableDataset) + assert list(dataset) == data + + +def test_iterable_dataset_from_generator_with_shards(): + def gen(shard_names): + for shard_name in shard_names: + for i in range(10): + yield {"shard_name": shard_name, "i": i} + + shard_names = [f"data{shard_idx}.txt" for shard_idx in range(4)] + dataset = IterableDataset.from_generator(gen, gen_kwargs={"shard_names": shard_names}) + assert isinstance(dataset, IterableDataset) + assert dataset.n_shards == len(shard_names) + + +def test_iterable_dataset_from_file(dataset: IterableDataset, arrow_file: str): + with assert_arrow_memory_doesnt_increase(): + dataset_from_file = IterableDataset.from_file(arrow_file) + expected_features = dataset._resolve_features().features + assert dataset_from_file.features.type == expected_features.type + assert dataset_from_file.features == expected_features + assert isinstance(dataset_from_file, IterableDataset) + assert list(dataset_from_file) == list(dataset) + + +@require_not_windows +@require_dill_gt_0_3_2 +@require_pyspark +def test_from_spark_streaming(): + import pyspark + + spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() + data = [ + ("0", 0, 0.0), + ("1", 1, 1.0), + ("2", 2, 2.0), + ("3", 3, 3.0), + ] + df = spark.createDataFrame(data, "col_1: string, col_2: int, col_3: float") + dataset = IterableDataset.from_spark(df) + assert isinstance(dataset, IterableDataset) + results = [] + for ex in dataset: + results.append(ex) + assert results == [ + {"col_1": "0", "col_2": 0, "col_3": 0.0}, + {"col_1": "1", "col_2": 1, "col_3": 1.0}, + {"col_1": "2", "col_2": 2, "col_3": 2.0}, + {"col_1": "3", "col_2": 3, "col_3": 3.0}, + ] + + +@require_not_windows +@require_dill_gt_0_3_2 +@require_pyspark +def test_from_spark_streaming_features(): + import PIL.Image + import pyspark + + spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() + data = [(0, np.arange(4 * 4 * 3).reshape(4, 4, 3).tolist())] + df = spark.createDataFrame(data, "idx: int, image: array>>") + features = Features({"idx": Value("int64"), "image": Image()}) + dataset = IterableDataset.from_spark( + df, + features=features, + ) + assert isinstance(dataset, IterableDataset) + results = [] + for ex in dataset: + results.append(ex) + assert len(results) == 1 + isinstance(results[0]["image"], PIL.Image.Image) + + +@require_torch +def test_iterable_dataset_torch_integration(): + ex_iterable = ExamplesIterable(generate_examples_fn, {}) + dataset = IterableDataset(ex_iterable) + import torch.utils.data + + assert isinstance(dataset, torch.utils.data.IterableDataset) + assert isinstance(dataset, IterableDataset) + assert dataset._ex_iterable is ex_iterable + + +@require_torch +def test_iterable_dataset_torch_picklable(): + import pickle + + ex_iterable = ExamplesIterable(generate_examples_fn, {}) + dataset = IterableDataset(ex_iterable, formatting=FormattingConfig(format_type="torch")) + reloaded_dataset = pickle.loads(pickle.dumps(dataset)) + + import torch.utils.data + + assert isinstance(reloaded_dataset, IterableDataset) + assert isinstance(reloaded_dataset, torch.utils.data.IterableDataset) + assert reloaded_dataset._formatting.format_type == "torch" + assert len(list(dataset)) == len(list(reloaded_dataset)) + + +@require_torch +def test_iterable_dataset_with_format_torch(): + ex_iterable = ExamplesIterable(generate_examples_fn, {}) + dataset = IterableDataset(ex_iterable) + from torch.utils.data import DataLoader + + dataloader = DataLoader(dataset) + assert len(list(dataloader)) == len(list(ex_iterable)) + + +@require_torch +def test_iterable_dataset_torch_dataloader_parallel(): + from torch.utils.data import DataLoader + + ex_iterable = ExamplesIterable(generate_examples_fn, {}) + dataset = IterableDataset(ex_iterable) + dataloader = DataLoader(dataset, num_workers=2, batch_size=None) + result = list(dataloader) + expected = [example for _, example in ex_iterable] + assert len(result) == len(expected) + assert {str(x) for x in result} == {str(x) for x in expected} + + +@require_torch +@pytest.mark.filterwarnings("ignore:This DataLoader will create:UserWarning") +@pytest.mark.parametrize("n_shards, num_workers", [(2, 1), (2, 2), (3, 2), (2, 3)]) +def test_sharded_iterable_dataset_torch_dataloader_parallel(n_shards, num_workers): + from torch.utils.data import DataLoader + + ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}.txt" for i in range(n_shards)]}) + dataset = IterableDataset(ex_iterable) + dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers) + result = list(dataloader) + expected = [example for _, example in ex_iterable] + assert len(result) == len(expected) + assert {str(x) for x in result} == {str(x) for x in expected} + + +@require_torch +@pytest.mark.integration +@pytest.mark.parametrize("num_workers", [1, 2]) +def test_iterable_dataset_from_hub_torch_dataloader_parallel(num_workers, tmp_path): + from torch.utils.data import DataLoader + + dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=str(tmp_path), streaming=True, split="train") + dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers) + result = list(dataloader) + assert len(result) == 2 + + +@pytest.mark.parametrize("batch_size", [4, 5]) +@pytest.mark.parametrize("drop_last_batch", [False, True]) +def test_iterable_dataset_iter_batch(batch_size, drop_last_batch): + n = 25 + dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {"n": n})) + all_examples = [ex for _, ex in generate_examples_fn(n=n)] + expected = [] + for i in range(0, len(all_examples), batch_size): + if len(all_examples[i : i + batch_size]) < batch_size and drop_last_batch: + continue + expected.append(_examples_to_batch(all_examples[i : i + batch_size])) + assert next(iter(dataset.iter(batch_size, drop_last_batch=drop_last_batch))) == expected[0] + assert list(dataset.iter(batch_size, drop_last_batch=drop_last_batch)) == expected + + +def test_iterable_dataset_info(): + info = DatasetInfo(description="desc", citation="@article{}", size_in_bytes=42) + ex_iterable = ExamplesIterable(generate_examples_fn, {}) + dataset = IterableDataset(ex_iterable, info=info) + assert dataset.info == info + assert dataset.description == info.description + assert dataset.citation == info.citation + assert dataset.size_in_bytes == info.size_in_bytes + + +def test_iterable_dataset_set_epoch(dataset: IterableDataset): + assert dataset._epoch == 0 + dataset.set_epoch(42) + assert dataset._epoch == 42 + + +@pytest.mark.parametrize("seed", [None, 42, 1337]) +@pytest.mark.parametrize("epoch", [None, 0, 1, 10]) +def test_iterable_dataset_set_epoch_of_shuffled_dataset(dataset: IterableDataset, seed, epoch): + buffer_size = 10 + shuffled_dataset = dataset.shuffle(seed, buffer_size=buffer_size) + base_generator = shuffled_dataset._shuffling.generator + if epoch is not None: + shuffled_dataset.set_epoch(epoch) + effective_generator = shuffled_dataset._effective_generator() + assert effective_generator is not None + if epoch is None or epoch == 0: + assert is_rng_equal(base_generator, shuffled_dataset._effective_generator()) + else: + assert not is_rng_equal(base_generator, shuffled_dataset._effective_generator()) + effective_seed = deepcopy(base_generator).integers(0, 1 << 63) - epoch + assert is_rng_equal(np.random.default_rng(effective_seed), shuffled_dataset._effective_generator()) + + +def test_iterable_dataset_map( + dataset: IterableDataset, +): + func = lambda x: {"id+1": x["id"] + 1} # noqa: E731 + mapped_dataset = dataset.map(func) + assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable) + assert mapped_dataset._ex_iterable.function is func + assert mapped_dataset._ex_iterable.batched is False + assert next(iter(mapped_dataset)) == {**next(iter(dataset)), **func(next(iter(generate_examples_fn()))[1])} + + +def test_iterable_dataset_map_batched( + dataset: IterableDataset, +): + func = lambda x: {"id+1": [i + 1 for i in x["id"]]} # noqa: E731 + batch_size = 3 + dataset = dataset.map(func, batched=True, batch_size=batch_size) + assert isinstance(dataset._ex_iterable, MappedExamplesIterable) + assert dataset._ex_iterable.function is func + assert dataset._ex_iterable.batch_size == batch_size + assert next(iter(dataset)) == {"id": 0, "id+1": 1} + + +def test_iterable_dataset_map_complex_features( + dataset: IterableDataset, +): + # https://github.com/huggingface/datasets/issues/3505 + ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"}) + features = Features( + { + "id": Value("int64"), + "label": Value("string"), + } + ) + dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) + dataset = dataset.cast_column("label", ClassLabel(names=["negative", "positive"])) + dataset = dataset.map(lambda x: {"id+1": x["id"] + 1, **x}) + assert isinstance(dataset._ex_iterable, MappedExamplesIterable) + features["label"] = ClassLabel(names=["negative", "positive"]) + assert [{k: v for k, v in ex.items() if k != "id+1"} for ex in dataset] == [ + features.encode_example(ex) for _, ex in ex_iterable + ] + + +def test_iterable_dataset_map_with_features(dataset: IterableDataset) -> None: + # https://github.com/huggingface/datasets/issues/3888 + ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"}) + features_before_map = Features( + { + "id": Value("int64"), + "label": Value("string"), + } + ) + dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features_before_map)) + assert dataset.info.features is not None + assert dataset.info.features == features_before_map + features_after_map = Features( + { + "id": Value("int64"), + "label": Value("string"), + "target": Value("string"), + } + ) + dataset = dataset.map(lambda x: {"target": x["label"]}, features=features_after_map) + assert dataset.info.features is not None + assert dataset.info.features == features_after_map + + +def test_iterable_dataset_map_with_fn_kwargs(dataset: IterableDataset) -> None: + fn_kwargs = {"y": 1} + mapped_dataset = dataset.map(lambda x, y: {"id+y": x["id"] + y}, fn_kwargs=fn_kwargs) + assert mapped_dataset._ex_iterable.batched is False + assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1} + batch_size = 3 + mapped_dataset = dataset.map( + lambda x, y: {"id+y": [i + y for i in x["id"]]}, batched=True, batch_size=batch_size, fn_kwargs=fn_kwargs + ) + assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable) + assert mapped_dataset._ex_iterable.batch_size == batch_size + assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1} + + +def test_iterable_dataset_filter(dataset: IterableDataset) -> None: + fn_kwargs = {"y": 1} + filtered_dataset = dataset.filter(lambda x, y: x["id"] == y, fn_kwargs=fn_kwargs) + assert filtered_dataset._ex_iterable.batched is False + assert next(iter(filtered_dataset)) == {"id": 1} + + +@pytest.mark.parametrize("seed", [42, 1337, 101010, 123456]) +@pytest.mark.parametrize("epoch", [None, 0, 1]) +def test_iterable_dataset_shuffle(dataset: IterableDataset, seed, epoch): + buffer_size = 3 + dataset = deepcopy(dataset) + dataset._ex_iterable.kwargs["filepaths"] = ["0.txt", "1.txt"] + dataset = dataset.shuffle(seed, buffer_size=buffer_size) + assert isinstance(dataset._shuffling, ShufflingConfig) + assert isinstance(dataset._shuffling.generator, np.random.Generator) + assert is_rng_equal(dataset._shuffling.generator, np.random.default_rng(seed)) + # Effective seed is sum of seed and epoch + if epoch is None or epoch == 0: + effective_seed = seed + else: + dataset.set_epoch(epoch) + effective_seed = np.random.default_rng(seed).integers(0, 1 << 63) - epoch + # Shuffling adds a shuffle buffer + expected_first_example_index = next( + iter(BufferShuffledExamplesIterable._iter_random_indices(np.random.default_rng(effective_seed), buffer_size)) + ) + assert isinstance(dataset._ex_iterable, BufferShuffledExamplesIterable) + # It also shuffles the underlying examples iterable + expected_ex_iterable = ExamplesIterable( + generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]} + ).shuffle_data_sources(np.random.default_rng(effective_seed)) + assert isinstance(dataset._ex_iterable.ex_iterable, ExamplesIterable) + assert next(iter(dataset)) == list(islice(expected_ex_iterable, expected_first_example_index + 1))[-1][1] + + +@pytest.mark.parametrize( + "features", + [ + None, + Features( + { + "id": Value("int64"), + "label": Value("int64"), + } + ), + Features( + { + "id": Value("int64"), + "label": ClassLabel(names=["negative", "positive"]), + } + ), + ], +) +def test_iterable_dataset_features(features): + ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0}) + dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) + if features: + expected = [features.encode_example(x) for _, x in ex_iterable] + else: + expected = [x for _, x in ex_iterable] + assert list(dataset) == expected + + +def test_iterable_dataset_features_cast_to_python(): + ex_iterable = ExamplesIterable( + generate_examples_fn, {"timestamp": pd.Timestamp(2020, 1, 1), "array": np.ones(5), "n": 1} + ) + features = Features( + { + "id": Value("int64"), + "timestamp": Value("timestamp[us]"), + "array": [Value("int64")], + } + ) + dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) + assert list(dataset) == [{"timestamp": pd.Timestamp(2020, 1, 1).to_pydatetime(), "array": [1] * 5, "id": 0}] + + +@pytest.mark.parametrize("format_type", [None, "torch", "python", "tf", "tensorflow", "np", "numpy", "jax"]) +def test_iterable_dataset_with_format(dataset: IterableDataset, format_type): + formatted_dataset = dataset.with_format(format_type) + assert formatted_dataset._formatting.format_type == get_format_type_from_alias(format_type) + + +@require_torch +def test_iterable_dataset_is_torch_iterable_dataset(dataset: IterableDataset): + from torch.utils.data import DataLoader, _DatasetKind + + dataloader = DataLoader(dataset) + assert dataloader._dataset_kind == _DatasetKind.Iterable + out = list(dataloader) + assert len(out) == DEFAULT_N_EXAMPLES + + +@pytest.mark.parametrize("n", [0, 2, int(1e10)]) +def test_iterable_dataset_skip(dataset: IterableDataset, n): + skip_dataset = dataset.skip(n) + assert isinstance(skip_dataset._ex_iterable, SkipExamplesIterable) + assert skip_dataset._ex_iterable.n == n + assert list(skip_dataset) == list(dataset)[n:] + + +@pytest.mark.parametrize("n", [0, 2, int(1e10)]) +def test_iterable_dataset_take(dataset: IterableDataset, n): + take_dataset = dataset.take(n) + assert isinstance(take_dataset._ex_iterable, TakeExamplesIterable) + assert take_dataset._ex_iterable.n == n + assert list(take_dataset) == list(dataset)[:n] + + +@pytest.mark.parametrize("method", ["skip", "take"]) +def test_iterable_dataset_shuffle_after_skip_or_take(method): + seed = 42 + n, n_shards = 3, 10 + count = 7 + ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "filepaths": [f"{i}.txt" for i in range(n_shards)]}) + dataset = IterableDataset(ex_iterable) + dataset = dataset.skip(n) if method == "skip" else dataset.take(count) + shuffled_dataset = dataset.shuffle(seed, buffer_size=DEFAULT_N_EXAMPLES) + # shuffling a skip/take dataset should keep the same examples and don't shuffle the shards + key = lambda x: f"{x['filepath']}_{x['id']}" # noqa: E731 + assert sorted(dataset, key=key) == sorted(shuffled_dataset, key=key) + + +def test_iterable_dataset_add_column(dataset_with_several_columns): + new_column = list(range(DEFAULT_N_EXAMPLES)) + new_dataset = dataset_with_several_columns.add_column("new_column", new_column) + assert list(new_dataset) == [ + {**example, "new_column": idx} for idx, example in enumerate(dataset_with_several_columns) + ] + new_dataset = new_dataset._resolve_features() + assert "new_column" in new_dataset.column_names + + +def test_iterable_dataset_rename_column(dataset_with_several_columns): + new_dataset = dataset_with_several_columns.rename_column("id", "new_id") + assert list(new_dataset) == [ + {("new_id" if k == "id" else k): v for k, v in example.items()} for example in dataset_with_several_columns + ] + assert new_dataset.features is None + assert new_dataset.column_names is None + # rename the column if ds.features was not None + new_dataset = dataset_with_several_columns._resolve_features().rename_column("id", "new_id") + assert new_dataset.features is not None + assert new_dataset.column_names is not None + assert "id" not in new_dataset.column_names + assert "new_id" in new_dataset.column_names + + +def test_iterable_dataset_rename_columns(dataset_with_several_columns): + column_mapping = {"id": "new_id", "filepath": "filename"} + new_dataset = dataset_with_several_columns.rename_columns(column_mapping) + assert list(new_dataset) == [ + {column_mapping.get(k, k): v for k, v in example.items()} for example in dataset_with_several_columns + ] + assert new_dataset.features is None + assert new_dataset.column_names is None + # rename the columns if ds.features was not None + new_dataset = dataset_with_several_columns._resolve_features().rename_columns(column_mapping) + assert new_dataset.features is not None + assert new_dataset.column_names is not None + assert all(c not in new_dataset.column_names for c in ["id", "filepath"]) + assert all(c in new_dataset.column_names for c in ["new_id", "filename"]) + + +def test_iterable_dataset_remove_columns(dataset_with_several_columns): + new_dataset = dataset_with_several_columns.remove_columns("id") + assert list(new_dataset) == [ + {k: v for k, v in example.items() if k != "id"} for example in dataset_with_several_columns + ] + assert new_dataset.features is None + new_dataset = dataset_with_several_columns.remove_columns(["id", "filepath"]) + assert list(new_dataset) == [ + {k: v for k, v in example.items() if k != "id" and k != "filepath"} for example in dataset_with_several_columns + ] + assert new_dataset.features is None + assert new_dataset.column_names is None + # remove the columns if ds.features was not None + new_dataset = dataset_with_several_columns._resolve_features().remove_columns(["id", "filepath"]) + assert new_dataset.features is not None + assert new_dataset.column_names is not None + assert all(c not in new_dataset.features for c in ["id", "filepath"]) + assert all(c not in new_dataset.column_names for c in ["id", "filepath"]) + + +def test_iterable_dataset_select_columns(dataset_with_several_columns): + new_dataset = dataset_with_several_columns.select_columns("id") + assert list(new_dataset) == [ + {k: v for k, v in example.items() if k == "id"} for example in dataset_with_several_columns + ] + assert new_dataset.features is None + new_dataset = dataset_with_several_columns.select_columns(["id", "filepath"]) + assert list(new_dataset) == [ + {k: v for k, v in example.items() if k in ("id", "filepath")} for example in dataset_with_several_columns + ] + assert new_dataset.features is None + # select the columns if ds.features was not None + new_dataset = dataset_with_several_columns._resolve_features().select_columns(["id", "filepath"]) + assert new_dataset.features is not None + assert new_dataset.column_names is not None + assert all(c in new_dataset.features for c in ["id", "filepath"]) + assert all(c in new_dataset.column_names for c in ["id", "filepath"]) + + +def test_iterable_dataset_cast_column(): + ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10}) + features = Features({"id": Value("int64"), "label": Value("int64")}) + dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) + casted_dataset = dataset.cast_column("label", Value("bool")) + casted_features = features.copy() + casted_features["label"] = Value("bool") + assert list(casted_dataset) == [casted_features.encode_example(ex) for _, ex in ex_iterable] + + +def test_iterable_dataset_cast(): + ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10}) + features = Features({"id": Value("int64"), "label": Value("int64")}) + dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) + new_features = Features({"id": Value("int64"), "label": Value("bool")}) + casted_dataset = dataset.cast(new_features) + assert list(casted_dataset) == [new_features.encode_example(ex) for _, ex in ex_iterable] + + +def test_iterable_dataset_resolve_features(): + ex_iterable = ExamplesIterable(generate_examples_fn, {}) + dataset = IterableDataset(ex_iterable) + assert dataset.features is None + assert dataset.column_names is None + dataset = dataset._resolve_features() + assert dataset.features == Features( + { + "id": Value("int64"), + } + ) + assert dataset.column_names == ["id"] + + +def test_iterable_dataset_resolve_features_keep_order(): + def gen(): + yield from zip(range(3), [{"a": 1}, {"c": 1}, {"b": 1}]) + + ex_iterable = ExamplesIterable(gen, {}) + dataset = IterableDataset(ex_iterable)._resolve_features() + # columns appear in order of appearance in the dataset + assert list(dataset.features) == ["a", "c", "b"] + assert dataset.column_names == ["a", "c", "b"] + + +def test_iterable_dataset_with_features_fill_with_none(): + def gen(): + yield from zip(range(2), [{"a": 1}, {"b": 1}]) + + ex_iterable = ExamplesIterable(gen, {}) + info = DatasetInfo(features=Features({"a": Value("int32"), "b": Value("int32")})) + dataset = IterableDataset(ex_iterable, info=info) + assert list(dataset) == [{"a": 1, "b": None}, {"b": 1, "a": None}] + + +def test_concatenate_datasets(): + ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) + dataset1 = IterableDataset(ex_iterable1) + ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5}) + dataset2 = IterableDataset(ex_iterable2) + concatenated_dataset = concatenate_datasets([dataset1, dataset2]) + assert list(concatenated_dataset) == list(dataset1) + list(dataset2) + + +def test_concatenate_datasets_resolves_features(): + ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) + dataset1 = IterableDataset(ex_iterable1) + ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5}) + dataset2 = IterableDataset(ex_iterable2) + concatenated_dataset = concatenate_datasets([dataset1, dataset2]) + assert concatenated_dataset.features is not None + assert sorted(concatenated_dataset.features) == ["id", "label"] + + +def test_concatenate_datasets_with_different_columns(): + ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) + dataset1 = IterableDataset(ex_iterable1) + ex_iterable2 = ExamplesIterable(generate_examples_fn, {}) + dataset2 = IterableDataset(ex_iterable2) + # missing column "label" -> it should be replaced with nulls + extended_dataset2_list = [{"label": None, **x} for x in dataset2] + + concatenated_dataset = concatenate_datasets([dataset1, dataset2]) + assert list(concatenated_dataset) == list(dataset1) + extended_dataset2_list + # change order + concatenated_dataset = concatenate_datasets([dataset2, dataset1]) + assert list(concatenated_dataset) == extended_dataset2_list + list(dataset1) + + +def test_concatenate_datasets_axis_1(): + ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10}) + dataset1 = IterableDataset(ex_iterable1) + ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5}) + dataset2 = IterableDataset(ex_iterable2) + with pytest.raises(ValueError): # column "id" is duplicated -> raise an error + concatenate_datasets([dataset1, dataset2], axis=1) + concatenated_dataset = concatenate_datasets([dataset1, dataset2.remove_columns("id")], axis=1) + assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, dataset2)] + + +def test_concatenate_datasets_axis_1_resolves_features(): + ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10}) + dataset1 = IterableDataset(ex_iterable1) + ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5}) + dataset2 = IterableDataset(ex_iterable2).remove_columns("id") + concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1) + assert concatenated_dataset.features is not None + assert sorted(concatenated_dataset.features) == ["id", "label1", "label2"] + + +def test_concatenate_datasets_axis_1_with_different_lengths(): + n1 = 10 + ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10, "n": n1}) + dataset1 = IterableDataset(ex_iterable1) + n2 = 5 + ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5, "n": n2}) + dataset2 = IterableDataset(ex_iterable2).remove_columns("id") + # missing rows -> they should be replaced with nulls + extended_dataset2_list = list(dataset2) + [{"label2": None}] * (n1 - n2) + + concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1) + assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, extended_dataset2_list)] + # change order + concatenated_dataset = concatenate_datasets([dataset2, dataset1], axis=1) + assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(extended_dataset2_list, dataset1)] + + +@pytest.mark.parametrize( + "probas, seed, expected_length, stopping_strategy", + [ + (None, None, 3 * (DEFAULT_N_EXAMPLES - 1) + 1, "first_exhausted"), + ([1, 0, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"), + ([0, 1, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"), + ([0.2, 0.5, 0.3], 42, None, "first_exhausted"), + ([0.1, 0.1, 0.8], 1337, None, "first_exhausted"), + ([0.5, 0.2, 0.3], 101010, None, "first_exhausted"), + (None, None, 3 * DEFAULT_N_EXAMPLES, "all_exhausted"), + ([0.2, 0.5, 0.3], 42, None, "all_exhausted"), + ([0.1, 0.1, 0.8], 1337, None, "all_exhausted"), + ([0.5, 0.2, 0.3], 101010, None, "all_exhausted"), + ], +) +def test_interleave_datasets(dataset: IterableDataset, probas, seed, expected_length, stopping_strategy): + d1 = dataset + d2 = dataset.map(lambda x: {"id+1": x["id"] + 1, **x}) + d3 = dataset.with_format("python") + datasets = [d1, d2, d3] + + merged_dataset = interleave_datasets( + datasets, probabilities=probas, seed=seed, stopping_strategy=stopping_strategy + ) + + def fill_default(example): + return {"id": None, "id+1": None, **example} + + # Check the examples iterable + assert isinstance( + merged_dataset._ex_iterable, (CyclingMultiSourcesExamplesIterable, RandomlyCyclingMultiSourcesExamplesIterable) + ) + # Check that it is deterministic + if seed is not None: + merged_dataset2 = interleave_datasets( + [d1, d2, d3], probabilities=probas, seed=seed, stopping_strategy=stopping_strategy + ) + assert list(merged_dataset) == list(merged_dataset2) + # Check features + assert merged_dataset.features == Features({"id": Value("int64"), "id+1": Value("int64")}) + # Check first example + if seed is not None: + rng = np.random.default_rng(seed) + i = next(iter(RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices(rng, len(datasets), p=probas))) + assert next(iter(merged_dataset)) == fill_default(next(iter(datasets[i]))) + else: + assert any(next(iter(merged_dataset)) == fill_default(next(iter(dataset))) for dataset in datasets) + # Compute length it case it's random + if expected_length is None: + expected_length = 0 + counts = np.array([len(list(d)) for d in datasets]) + bool_strategy_func = np.all if stopping_strategy == "all_exhausted" else np.any + rng = np.random.default_rng(seed) + for i in RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices(rng, len(datasets), p=probas): + counts[i] -= 1 + expected_length += 1 + if bool_strategy_func(counts <= 0): + break + # Check length + assert len(list(merged_dataset)) == expected_length + + +def test_interleave_datasets_with_features( + dataset: IterableDataset, +): + features = Features( + { + "id": Value("int64"), + "label": ClassLabel(names=["negative", "positive"]), + } + ) + ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0}) + dataset_with_features = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) + + merged_dataset = interleave_datasets([dataset, dataset_with_features]) + assert merged_dataset.features == features + + +def test_interleave_datasets_with_oversampling(): + # Test hardcoded results + d1 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [0, 1, 2]])), {})) + d2 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [10, 11, 12, 13]])), {})) + d3 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [20, 21, 22, 23, 24]])), {})) + + expected_values = [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24] + + # Check oversampling strategy without probabilities + assert [x["a"] for x in interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")] == expected_values + + # Check oversampling strategy with probabilities + expected_values = [20, 0, 21, 10, 1, 22, 23, 24, 2, 0, 1, 20, 11, 21, 2, 0, 12, 1, 22, 13] + + values = [ + x["a"] + for x in interleave_datasets( + [d1, d2, d3], probabilities=[0.5, 0.2, 0.3], seed=42, stopping_strategy="all_exhausted" + ) + ] + + assert values == expected_values + + +@require_torch +def test_with_format_torch(dataset_with_several_columns: IterableDataset): + import torch + + dset = dataset_with_several_columns.with_format(type="torch") + example = next(iter(dset)) + batch = next(iter(dset.iter(batch_size=3))) + assert len(example) == 3 + assert isinstance(example["id"], torch.Tensor) + assert list(example["id"].shape) == [] + assert example["id"].item() == 0 + assert isinstance(batch["id"], torch.Tensor) + assert isinstance(example["filepath"], list) + assert isinstance(example["filepath"][0], str) + assert example["filepath"][0] == "data0.txt" + assert isinstance(batch["filepath"], list) + assert isinstance(example["metadata"], dict) + assert isinstance(example["metadata"]["sources"], list) + assert isinstance(example["metadata"]["sources"][0], str) + assert isinstance(batch["metadata"], list) + + +@require_tf +def test_with_format_tf(dataset_with_several_columns: IterableDataset): + import tensorflow as tf + + dset = dataset_with_several_columns.with_format(type="tensorflow") + example = next(iter(dset)) + batch = next(iter(dset.iter(batch_size=3))) + assert isinstance(example["id"], tf.Tensor) + assert list(example["id"].shape) == [] + assert example["id"].numpy().item() == 0 + assert isinstance(batch["id"], tf.Tensor) + assert isinstance(example["filepath"], tf.Tensor) + assert example["filepath"][0] == b"data0.txt" + assert isinstance(batch["filepath"], tf.Tensor) + assert isinstance(example["metadata"], dict) + assert isinstance(example["metadata"]["sources"], tf.Tensor) + assert isinstance(batch["metadata"], list) + + +def test_map_array_are_not_converted_back_to_lists(dataset: IterableDataset): + def func(example): + return {"array": np.array([1, 2, 3])} + + dset_test = dataset.map(func) + example = next(iter(dset_test)) + # not aligned with Dataset.map because we don't convert back to lists after map() + assert isinstance(example["array"], np.ndarray) + + +def test_formatted_map(dataset: IterableDataset): + dataset = dataset.with_format("np") + assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray) + dataset = dataset.with_format(None) + assert isinstance(next(dataset.iter(batch_size=3))["id"], list) + + def add_one_numpy(example): + assert isinstance(example["id"], np.ndarray) + return {"id": example["id"] + 1} + + dataset = dataset.with_format("np") + dataset = dataset.map(add_one_numpy, batched=True) + assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray) + dataset = dataset.with_format(None) + assert isinstance(next(dataset.iter(batch_size=3))["id"], list) + + +@pytest.mark.parametrize("n_shards1, n_shards2, num_workers", [(2, 1, 1), (2, 2, 2), (1, 3, 1), (4, 3, 3)]) +def test_interleave_dataset_with_sharding(n_shards1, n_shards2, num_workers): + from torch.utils.data import DataLoader + + ex_iterable1 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-1.txt" for i in range(n_shards1)]}) + dataset1 = IterableDataset(ex_iterable1).with_format("torch") + ex_iterable2 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-2.txt" for i in range(n_shards2)]}) + dataset2 = IterableDataset(ex_iterable2).with_format("torch") + + dataset_merged = interleave_datasets([dataset1, dataset2], stopping_strategy="first_exhausted") + assert dataset_merged.n_shards == min(n_shards1, n_shards2) + dataloader = DataLoader(dataset_merged, batch_size=None, num_workers=num_workers) + result = list(dataloader) + expected_length = 2 * min( + len([example for _, example in ex_iterable1]), len([example for _, example in ex_iterable2]) + ) + # some samples may be missing because the stopping strategy is applied per process + assert expected_length - num_workers <= len(result) <= expected_length + assert len(result) == len({str(x) for x in result}) + + +def filter_func(batch): + return batch["id"] == 4 + + +def map_func(batch): + batch["id"] *= 2 + return batch + + +def test_pickle_after_many_transforms(dataset_with_several_columns): + dataset = dataset_with_several_columns + dataset = dataset.remove_columns(["filepath"]) + dataset = dataset.take(5) + dataset = dataset.map(map_func) + dataset = dataset.shuffle() + dataset = dataset.skip(1) + dataset = dataset.filter(filter_func) + dataset = dataset.add_column("additional_col", ["something"]) + dataset = dataset.rename_column("metadata", "metadata1") + dataset = dataset.rename_columns({"id": "id1", "metadata1": "metadata2"}) + dataset = dataset.select_columns(["id1", "additional_col"]) + + unpickled_dataset = pickle.loads(pickle.dumps(dataset)) + + assert list(unpickled_dataset) == list(dataset) diff --git a/testbed/huggingface__datasets/tests/test_metadata_util.py b/testbed/huggingface__datasets/tests/test_metadata_util.py new file mode 100644 index 0000000000000000000000000000000000000000..7c487fb11f860e4f0fc99d9cae5d6bd54c0c605d --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_metadata_util.py @@ -0,0 +1,251 @@ +import re +import sys +import tempfile +import unittest +from pathlib import Path + +import pytest +import yaml +from huggingface_hub import DatasetCard, DatasetCardData + +from datasets.config import METADATA_CONFIGS_FIELD +from datasets.utils.metadata import MetadataConfigs + + +def _dedent(string: str) -> str: + indent_level = min(re.search("^ +", t).end() if t.startswith(" ") else 0 for t in string.splitlines()) + return "\n".join([line[indent_level:] for line in string.splitlines() if indent_level < len(line)]) + + +README_YAML = """\ +--- +language: +- zh +- en +task_ids: +- sentiment-classification +--- +# Begin of markdown + +Some cool dataset card +""" + +README_EMPTY_YAML = """\ +--- +--- +# Begin of markdown + +Some cool dataset card +""" + + +README_NO_YAML = """\ +# Begin of markdown + +Some cool dataset card +""" + + +README_METADATA_CONFIG_INCORRECT_FORMAT = f"""\ +--- +{METADATA_CONFIGS_FIELD}: + data_dir: v1 + drop_labels: true +--- +""" + + +README_METADATA_SINGLE_CONFIG = f"""\ +--- +{METADATA_CONFIGS_FIELD}: + - config_name: custom + data_dir: v1 + drop_labels: true +--- +""" + + +README_METADATA_TWO_CONFIGS_WITH_DEFAULT_FLAG = f"""\ +--- +{METADATA_CONFIGS_FIELD}: + - config_name: v1 + data_dir: v1 + drop_labels: true + - config_name: v2 + data_dir: v2 + drop_labels: false + default: true +--- +""" + + +README_METADATA_TWO_CONFIGS_WITH_DEFAULT_NAME = f"""\ +--- +{METADATA_CONFIGS_FIELD}: + - config_name: custom + data_dir: custom + drop_labels: true + - config_name: default + data_dir: data + drop_labels: false +--- +""" + + +EXPECTED_METADATA_SINGLE_CONFIG = {"custom": {"data_dir": "v1", "drop_labels": True}} +EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_FLAG = { + "v1": {"data_dir": "v1", "drop_labels": True}, + "v2": {"data_dir": "v2", "drop_labels": False, "default": True}, +} +EXPECTED_METADATA_TWO_CONFIGS_DEFAULT_NAME = { + "custom": {"data_dir": "custom", "drop_labels": True}, + "default": {"data_dir": "data", "drop_labels": False}, +} + + +@pytest.fixture +def data_dir_with_two_subdirs(tmp_path): + data_dir = tmp_path / "data_dir_with_two_configs_in_metadata" + cats_data_dir = data_dir / "cats" + cats_data_dir.mkdir(parents=True) + dogs_data_dir = data_dir / "dogs" + dogs_data_dir.mkdir(parents=True) + + with open(cats_data_dir / "cat.jpg", "wb") as f: + f.write(b"this_is_a_cat_image_bytes") + with open(dogs_data_dir / "dog.jpg", "wb") as f: + f.write(b"this_is_a_dog_image_bytes") + + return str(data_dir) + + +class TestMetadataUtils(unittest.TestCase): + def test_metadata_dict_from_readme(self): + with tempfile.TemporaryDirectory() as tmp_dir: + path = Path(tmp_dir) / "README.md" + with open(path, "w+") as readme_file: + readme_file.write(README_YAML) + dataset_card_data = DatasetCard.load(path).data + self.assertDictEqual( + dataset_card_data.to_dict(), {"language": ["zh", "en"], "task_ids": ["sentiment-classification"]} + ) + + with open(path, "w+") as readme_file: + readme_file.write(README_EMPTY_YAML) + if ( + sys.platform != "win32" + ): # there is a bug on windows, see https://github.com/huggingface/huggingface_hub/issues/1546 + dataset_card_data = DatasetCard.load(path).data + self.assertDictEqual(dataset_card_data.to_dict(), {}) + + with open(path, "w+") as readme_file: + readme_file.write(README_NO_YAML) + dataset_card_data = DatasetCard.load(path).data + self.assertEqual(dataset_card_data.to_dict(), {}) + + def test_from_yaml_string(self): + valid_yaml_string = _dedent( + """\ + annotations_creators: + - found + language_creators: + - found + language: + - en + license: + - unknown + multilinguality: + - monolingual + pretty_name: Test Dataset + size_categories: + - 10K 0 + assert indices[0] == 1 + + +@require_elasticsearch +class ElasticSearchIndexTest(TestCase): + def test_elasticsearch(self): + from elasticsearch import Elasticsearch + + with patch("elasticsearch.Elasticsearch.search") as mocked_search, patch( + "elasticsearch.client.IndicesClient.create" + ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk") as mocked_bulk: + es_client = Elasticsearch() + mocked_index_create.return_value = {"acknowledged": True} + index = ElasticSearchIndex(es_client=es_client) + mocked_bulk.return_value([(True, None)] * 3) + index.add_documents(["foo", "bar", "foobar"]) + + # single query + query = "foo" + mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} + scores, indices = index.search(query) + self.assertEqual(scores[0], 1) + self.assertEqual(indices[0], 0) + + # single query with timeout + query = "foo" + mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} + scores, indices = index.search(query, request_timeout=30) + self.assertEqual(scores[0], 1) + self.assertEqual(indices[0], 0) + + # batched queries + queries = ["foo", "bar", "foobar"] + mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} + total_scores, total_indices = index.search_batch(queries) + best_scores = [scores[0] for scores in total_scores] + best_indices = [indices[0] for indices in total_indices] + self.assertGreater(np.min(best_scores), 0) + self.assertListEqual([1, 1, 1], best_indices) + + # batched queries with timeout + queries = ["foo", "bar", "foobar"] + mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} + total_scores, total_indices = index.search_batch(queries, request_timeout=30) + best_scores = [scores[0] for scores in total_scores] + best_indices = [indices[0] for indices in total_indices] + self.assertGreater(np.min(best_scores), 0) + self.assertListEqual([1, 1, 1], best_indices) diff --git a/testbed/huggingface__datasets/tests/test_splits.py b/testbed/huggingface__datasets/tests/test_splits.py new file mode 100644 index 0000000000000000000000000000000000000000..bce980e36ab710b91f61834c55e5369a19b60354 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_splits.py @@ -0,0 +1,36 @@ +import pytest + +from datasets.splits import SplitDict, SplitInfo +from datasets.utils.py_utils import asdict + + +@pytest.mark.parametrize( + "split_dict", + [ + SplitDict(), + SplitDict({"train": SplitInfo(name="train", num_bytes=1337, num_examples=42, dataset_name="my_dataset")}), + SplitDict({"train": SplitInfo(name="train", num_bytes=1337, num_examples=42)}), + SplitDict({"train": SplitInfo()}), + ], +) +def test_split_dict_to_yaml_list(split_dict: SplitDict): + split_dict_yaml_list = split_dict._to_yaml_list() + assert len(split_dict_yaml_list) == len(split_dict) + reloaded = SplitDict._from_yaml_list(split_dict_yaml_list) + for split_name, split_info in split_dict.items(): + # dataset_name field is deprecated, and is therefore not part of the YAML dump + split_info.dataset_name = None + # the split name of split_dict takes over the name of the split info object + split_info.name = split_name + assert split_dict == reloaded + + +@pytest.mark.parametrize( + "split_info", [SplitInfo(), SplitInfo(dataset_name=None), SplitInfo(dataset_name="my_dataset")] +) +def test_split_dict_asdict_has_dataset_name(split_info): + # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" + # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files + split_dict_asdict = asdict(SplitDict({"train": split_info})) + assert "dataset_name" in split_dict_asdict["train"] + assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name diff --git a/testbed/huggingface__datasets/tests/test_table.py b/testbed/huggingface__datasets/tests/test_table.py new file mode 100644 index 0000000000000000000000000000000000000000..ba6dfbfb5f2f9849e0612ef4a45ca4460a6acdca --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_table.py @@ -0,0 +1,1284 @@ +import copy +import pickle +import warnings +from typing import List, Union + +import numpy as np +import pyarrow as pa +import pytest + +import datasets +from datasets import Sequence, Value +from datasets.features.features import Array2D, Array2DExtensionType, ClassLabel, Features, Image +from datasets.table import ( + ConcatenationTable, + InMemoryTable, + MemoryMappedTable, + Table, + TableBlock, + _in_memory_arrow_table_from_buffer, + _in_memory_arrow_table_from_file, + _interpolation_search, + _is_extension_type, + _memory_mapped_arrow_table_from_file, + array_concat, + cast_array_to_feature, + concat_tables, + embed_array_storage, + embed_table_storage, + inject_arrow_table_documentation, + table_cast, + table_iter, +) + +from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, slow + + +@pytest.fixture(scope="session") +def in_memory_pa_table(arrow_file) -> pa.Table: + return pa.ipc.open_stream(arrow_file).read_all() + + +def _to_testing_blocks(table: TableBlock) -> List[List[TableBlock]]: + assert len(table) > 2 + blocks = [ + [table.slice(0, 2)], + [table.slice(2).drop([c for c in table.column_names if c != "tokens"]), table.slice(2).drop(["tokens"])], + ] + return blocks + + +@pytest.fixture(scope="session") +def in_memory_blocks(in_memory_pa_table): + table = InMemoryTable(in_memory_pa_table) + return _to_testing_blocks(table) + + +@pytest.fixture(scope="session") +def memory_mapped_blocks(arrow_file): + table = MemoryMappedTable.from_file(arrow_file) + return _to_testing_blocks(table) + + +@pytest.fixture(scope="session") +def mixed_in_memory_and_memory_mapped_blocks(in_memory_blocks, memory_mapped_blocks): + return in_memory_blocks[:1] + memory_mapped_blocks[1:] + + +def assert_deepcopy_without_bringing_data_in_memory(table: MemoryMappedTable): + with assert_arrow_memory_doesnt_increase(): + copied_table = copy.deepcopy(table) + assert isinstance(copied_table, MemoryMappedTable) + assert copied_table.table == table.table + + +def assert_deepcopy_does_bring_data_in_memory(table: MemoryMappedTable): + with assert_arrow_memory_increases(): + copied_table = copy.deepcopy(table) + assert isinstance(copied_table, MemoryMappedTable) + assert copied_table.table == table.table + + +def assert_pickle_without_bringing_data_in_memory(table: MemoryMappedTable): + with assert_arrow_memory_doesnt_increase(): + pickled_table = pickle.dumps(table) + unpickled_table = pickle.loads(pickled_table) + assert isinstance(unpickled_table, MemoryMappedTable) + assert unpickled_table.table == table.table + + +def assert_pickle_does_bring_data_in_memory(table: MemoryMappedTable): + with assert_arrow_memory_increases(): + pickled_table = pickle.dumps(table) + unpickled_table = pickle.loads(pickled_table) + assert isinstance(unpickled_table, MemoryMappedTable) + assert unpickled_table.table == table.table + + +def assert_index_attributes_equal(table: Table, other: Table): + assert table._batches == other._batches + np.testing.assert_array_equal(table._offsets, other._offsets) + assert table._schema == other._schema + + +def add_suffix_to_column_names(table, suffix): + return table.rename_columns([f"{name}{suffix}" for name in table.column_names]) + + +def test_inject_arrow_table_documentation(in_memory_pa_table): + method = pa.Table.slice + + def function_to_wrap(*args): + return method(*args) + + args = (0, 1) + wrapped_method = inject_arrow_table_documentation(method)(function_to_wrap) + assert method(in_memory_pa_table, *args) == wrapped_method(in_memory_pa_table, *args) + assert "pyarrow.Table" not in wrapped_method.__doc__ + assert "Table" in wrapped_method.__doc__ + + +def test_in_memory_arrow_table_from_file(arrow_file, in_memory_pa_table): + with assert_arrow_memory_increases(): + pa_table = _in_memory_arrow_table_from_file(arrow_file) + assert in_memory_pa_table == pa_table + + +def test_in_memory_arrow_table_from_buffer(in_memory_pa_table): + with assert_arrow_memory_increases(): + buf_writer = pa.BufferOutputStream() + writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema) + writer.write_table(in_memory_pa_table) + writer.close() + buf_writer.close() + pa_table = _in_memory_arrow_table_from_buffer(buf_writer.getvalue()) + assert in_memory_pa_table == pa_table + + +def test_memory_mapped_arrow_table_from_file(arrow_file, in_memory_pa_table): + with assert_arrow_memory_doesnt_increase(): + pa_table = _memory_mapped_arrow_table_from_file(arrow_file) + assert in_memory_pa_table == pa_table + + +def test_table_init(in_memory_pa_table): + table = Table(in_memory_pa_table) + assert table.table == in_memory_pa_table + + +def test_table_validate(in_memory_pa_table): + table = Table(in_memory_pa_table) + assert table.validate() == in_memory_pa_table.validate() + + +def test_table_equals(in_memory_pa_table): + table = Table(in_memory_pa_table) + assert table.equals(in_memory_pa_table) + + +def test_table_to_batches(in_memory_pa_table): + table = Table(in_memory_pa_table) + assert table.to_batches() == in_memory_pa_table.to_batches() + + +def test_table_to_pydict(in_memory_pa_table): + table = Table(in_memory_pa_table) + assert table.to_pydict() == in_memory_pa_table.to_pydict() + + +def test_table_to_string(in_memory_pa_table): + table = Table(in_memory_pa_table) + assert table.to_string() == in_memory_pa_table.to_string() + + +def test_table_field(in_memory_pa_table): + assert "tokens" in in_memory_pa_table.column_names + table = Table(in_memory_pa_table) + assert table.field("tokens") == in_memory_pa_table.field("tokens") + + +def test_table_column(in_memory_pa_table): + assert "tokens" in in_memory_pa_table.column_names + table = Table(in_memory_pa_table) + assert table.column("tokens") == in_memory_pa_table.column("tokens") + + +def test_table_itercolumns(in_memory_pa_table): + table = Table(in_memory_pa_table) + assert isinstance(table.itercolumns(), type(in_memory_pa_table.itercolumns())) + assert list(table.itercolumns()) == list(in_memory_pa_table.itercolumns()) + + +def test_table_getitem(in_memory_pa_table): + table = Table(in_memory_pa_table) + assert table[0] == in_memory_pa_table[0] + + +def test_table_len(in_memory_pa_table): + table = Table(in_memory_pa_table) + assert len(table) == len(in_memory_pa_table) + + +def test_table_str(in_memory_pa_table): + table = Table(in_memory_pa_table) + assert str(table) == str(in_memory_pa_table).replace("pyarrow.Table", "Table") + assert repr(table) == repr(in_memory_pa_table).replace("pyarrow.Table", "Table") + + +@pytest.mark.parametrize( + "attribute", ["schema", "columns", "num_columns", "num_rows", "shape", "nbytes", "column_names"] +) +def test_table_attributes(in_memory_pa_table, attribute): + table = Table(in_memory_pa_table) + assert getattr(table, attribute) == getattr(in_memory_pa_table, attribute) + + +def test_in_memory_table_from_file(arrow_file, in_memory_pa_table): + with assert_arrow_memory_increases(): + table = InMemoryTable.from_file(arrow_file) + assert table.table == in_memory_pa_table + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_from_buffer(in_memory_pa_table): + with assert_arrow_memory_increases(): + buf_writer = pa.BufferOutputStream() + writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema) + writer.write_table(in_memory_pa_table) + writer.close() + buf_writer.close() + table = InMemoryTable.from_buffer(buf_writer.getvalue()) + assert table.table == in_memory_pa_table + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_from_pandas(in_memory_pa_table): + df = in_memory_pa_table.to_pandas() + with assert_arrow_memory_increases(): + # with no schema it might infer another order of the fields in the schema + table = InMemoryTable.from_pandas(df) + assert isinstance(table, InMemoryTable) + # by specifying schema we get the same order of features, and so the exact same table + table = InMemoryTable.from_pandas(df, schema=in_memory_pa_table.schema) + assert table.table == in_memory_pa_table + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_from_arrays(in_memory_pa_table): + arrays = list(in_memory_pa_table.columns) + names = list(in_memory_pa_table.column_names) + table = InMemoryTable.from_arrays(arrays, names=names) + assert table.table == in_memory_pa_table + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_from_pydict(in_memory_pa_table): + pydict = in_memory_pa_table.to_pydict() + with assert_arrow_memory_increases(): + table = InMemoryTable.from_pydict(pydict) + assert isinstance(table, InMemoryTable) + assert table.table == pa.Table.from_pydict(pydict) + + +def test_in_memory_table_from_pylist(in_memory_pa_table): + pylist = InMemoryTable(in_memory_pa_table).to_pylist() + table = InMemoryTable.from_pylist(pylist) + assert isinstance(table, InMemoryTable) + assert pylist == table.to_pylist() + + +def test_in_memory_table_from_batches(in_memory_pa_table): + batches = list(in_memory_pa_table.to_batches()) + table = InMemoryTable.from_batches(batches) + assert table.table == in_memory_pa_table + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_deepcopy(in_memory_pa_table): + table = InMemoryTable(in_memory_pa_table) + copied_table = copy.deepcopy(table) + assert table.table == copied_table.table + assert_index_attributes_equal(table, copied_table) + # deepcopy must return the exact same arrow objects since they are immutable + assert table.table is copied_table.table + assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches)) + + +def test_in_memory_table_pickle(in_memory_pa_table): + table = InMemoryTable(in_memory_pa_table) + pickled_table = pickle.dumps(table) + unpickled_table = pickle.loads(pickled_table) + assert unpickled_table.table == table.table + assert_index_attributes_equal(table, unpickled_table) + + +@slow +def test_in_memory_table_pickle_big_table(): + big_table_4GB = InMemoryTable.from_pydict({"col": [0] * ((4 * 8 << 30) // 64)}) + length = len(big_table_4GB) + big_table_4GB = pickle.dumps(big_table_4GB) + big_table_4GB = pickle.loads(big_table_4GB) + assert len(big_table_4GB) == length + + +def test_in_memory_table_slice(in_memory_pa_table): + table = InMemoryTable(in_memory_pa_table).slice(1, 2) + assert table.table == in_memory_pa_table.slice(1, 2) + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_filter(in_memory_pa_table): + mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))]) + table = InMemoryTable(in_memory_pa_table).filter(mask) + assert table.table == in_memory_pa_table.filter(mask) + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_flatten(in_memory_pa_table): + table = InMemoryTable(in_memory_pa_table).flatten() + assert table.table == in_memory_pa_table.flatten() + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_combine_chunks(in_memory_pa_table): + table = InMemoryTable(in_memory_pa_table).combine_chunks() + assert table.table == in_memory_pa_table.combine_chunks() + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_cast(in_memory_pa_table): + assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types + schema = pa.schema( + { + k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32()) + for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) + } + ) + table = InMemoryTable(in_memory_pa_table).cast(schema) + assert table.table == in_memory_pa_table.cast(schema) + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_cast_reorder_struct(): + table = InMemoryTable( + pa.Table.from_pydict( + { + "top": [ + { + "foo": "a", + "bar": "b", + } + ] + } + ) + ) + schema = pa.schema({"top": pa.struct({"bar": pa.string(), "foo": pa.string()})}) + assert table.cast(schema).schema == schema + + +def test_in_memory_table_cast_with_hf_features(): + table = InMemoryTable(pa.Table.from_pydict({"labels": [0, 1]})) + features = Features({"labels": ClassLabel(names=["neg", "pos"])}) + schema = features.arrow_schema + assert table.cast(schema).schema == schema + assert Features.from_arrow_schema(table.cast(schema).schema) == features + + +def test_in_memory_table_replace_schema_metadata(in_memory_pa_table): + metadata = {"huggingface": "{}"} + table = InMemoryTable(in_memory_pa_table).replace_schema_metadata(metadata) + assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_add_column(in_memory_pa_table): + i = len(in_memory_pa_table.column_names) + field_ = "new_field" + column = pa.array(list(range(len(in_memory_pa_table)))) + table = InMemoryTable(in_memory_pa_table).add_column(i, field_, column) + assert table.table == in_memory_pa_table.add_column(i, field_, column) + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_append_column(in_memory_pa_table): + field_ = "new_field" + column = pa.array(list(range(len(in_memory_pa_table)))) + table = InMemoryTable(in_memory_pa_table).append_column(field_, column) + assert table.table == in_memory_pa_table.append_column(field_, column) + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_remove_column(in_memory_pa_table): + table = InMemoryTable(in_memory_pa_table).remove_column(0) + assert table.table == in_memory_pa_table.remove_column(0) + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_set_column(in_memory_pa_table): + i = len(in_memory_pa_table.column_names) + field_ = "new_field" + column = pa.array(list(range(len(in_memory_pa_table)))) + table = InMemoryTable(in_memory_pa_table).set_column(i, field_, column) + assert table.table == in_memory_pa_table.set_column(i, field_, column) + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_rename_columns(in_memory_pa_table): + assert "tokens" in in_memory_pa_table.column_names + names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names] + table = InMemoryTable(in_memory_pa_table).rename_columns(names) + assert table.table == in_memory_pa_table.rename_columns(names) + assert isinstance(table, InMemoryTable) + + +def test_in_memory_table_drop(in_memory_pa_table): + names = [in_memory_pa_table.column_names[0]] + table = InMemoryTable(in_memory_pa_table).drop(names) + assert table.table == in_memory_pa_table.drop(names) + assert isinstance(table, InMemoryTable) + + +def test_memory_mapped_table_init(arrow_file, in_memory_pa_table): + table = MemoryMappedTable(_memory_mapped_arrow_table_from_file(arrow_file), arrow_file) + assert table.table == in_memory_pa_table + assert isinstance(table, MemoryMappedTable) + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_from_file(arrow_file, in_memory_pa_table): + with assert_arrow_memory_doesnt_increase(): + table = MemoryMappedTable.from_file(arrow_file) + assert table.table == in_memory_pa_table + assert isinstance(table, MemoryMappedTable) + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_from_file_with_replay(arrow_file, in_memory_pa_table): + replays = [("slice", (0, 1), {}), ("flatten", (), {})] + with assert_arrow_memory_doesnt_increase(): + table = MemoryMappedTable.from_file(arrow_file, replays=replays) + assert len(table) == 1 + for method, args, kwargs in replays: + in_memory_pa_table = getattr(in_memory_pa_table, method)(*args, **kwargs) + assert table.table == in_memory_pa_table + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_deepcopy(arrow_file): + table = MemoryMappedTable.from_file(arrow_file) + copied_table = copy.deepcopy(table) + assert table.table == copied_table.table + assert table.path == copied_table.path + assert_index_attributes_equal(table, copied_table) + # deepcopy must return the exact same arrow objects since they are immutable + assert table.table is copied_table.table + assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches)) + + +def test_memory_mapped_table_pickle(arrow_file): + table = MemoryMappedTable.from_file(arrow_file) + pickled_table = pickle.dumps(table) + unpickled_table = pickle.loads(pickled_table) + assert unpickled_table.table == table.table + assert unpickled_table.path == table.path + assert_index_attributes_equal(table, unpickled_table) + + +def test_memory_mapped_table_pickle_doesnt_fill_memory(arrow_file): + with assert_arrow_memory_doesnt_increase(): + table = MemoryMappedTable.from_file(arrow_file) + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_pickle_applies_replay(arrow_file): + replays = [("slice", (0, 1), {}), ("flatten", (), {})] + with assert_arrow_memory_doesnt_increase(): + table = MemoryMappedTable.from_file(arrow_file, replays=replays) + assert isinstance(table, MemoryMappedTable) + assert table.replays == replays + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_slice(arrow_file, in_memory_pa_table): + table = MemoryMappedTable.from_file(arrow_file).slice(1, 2) + assert table.table == in_memory_pa_table.slice(1, 2) + assert isinstance(table, MemoryMappedTable) + assert table.replays == [("slice", (1, 2), {})] + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_filter(arrow_file, in_memory_pa_table): + mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))]) + table = MemoryMappedTable.from_file(arrow_file).filter(mask) + assert table.table == in_memory_pa_table.filter(mask) + assert isinstance(table, MemoryMappedTable) + assert table.replays == [("filter", (mask,), {})] + assert_deepcopy_without_bringing_data_in_memory(table) + # filter DOES increase memory + # assert_pickle_without_bringing_data_in_memory(table) + assert_pickle_does_bring_data_in_memory(table) + + +def test_memory_mapped_table_flatten(arrow_file, in_memory_pa_table): + table = MemoryMappedTable.from_file(arrow_file).flatten() + assert table.table == in_memory_pa_table.flatten() + assert isinstance(table, MemoryMappedTable) + assert table.replays == [("flatten", (), {})] + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_combine_chunks(arrow_file, in_memory_pa_table): + table = MemoryMappedTable.from_file(arrow_file).combine_chunks() + assert table.table == in_memory_pa_table.combine_chunks() + assert isinstance(table, MemoryMappedTable) + assert table.replays == [("combine_chunks", (), {})] + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_cast(arrow_file, in_memory_pa_table): + assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types + schema = pa.schema( + { + k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32()) + for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) + } + ) + table = MemoryMappedTable.from_file(arrow_file).cast(schema) + assert table.table == in_memory_pa_table.cast(schema) + assert isinstance(table, MemoryMappedTable) + assert table.replays == [("cast", (schema,), {})] + assert_deepcopy_without_bringing_data_in_memory(table) + # cast DOES increase memory when converting integers precision for example + # assert_pickle_without_bringing_data_in_memory(table) + assert_pickle_does_bring_data_in_memory(table) + + +def test_memory_mapped_table_replace_schema_metadata(arrow_file, in_memory_pa_table): + metadata = {"huggingface": "{}"} + table = MemoryMappedTable.from_file(arrow_file).replace_schema_metadata(metadata) + assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata + assert isinstance(table, MemoryMappedTable) + assert table.replays == [("replace_schema_metadata", (metadata,), {})] + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_add_column(arrow_file, in_memory_pa_table): + i = len(in_memory_pa_table.column_names) + field_ = "new_field" + column = pa.array(list(range(len(in_memory_pa_table)))) + table = MemoryMappedTable.from_file(arrow_file).add_column(i, field_, column) + assert table.table == in_memory_pa_table.add_column(i, field_, column) + assert isinstance(table, MemoryMappedTable) + assert table.replays == [("add_column", (i, field_, column), {})] + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_append_column(arrow_file, in_memory_pa_table): + field_ = "new_field" + column = pa.array(list(range(len(in_memory_pa_table)))) + table = MemoryMappedTable.from_file(arrow_file).append_column(field_, column) + assert table.table == in_memory_pa_table.append_column(field_, column) + assert isinstance(table, MemoryMappedTable) + assert table.replays == [("append_column", (field_, column), {})] + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_remove_column(arrow_file, in_memory_pa_table): + table = MemoryMappedTable.from_file(arrow_file).remove_column(0) + assert table.table == in_memory_pa_table.remove_column(0) + assert isinstance(table, MemoryMappedTable) + assert table.replays == [("remove_column", (0,), {})] + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_set_column(arrow_file, in_memory_pa_table): + i = len(in_memory_pa_table.column_names) + field_ = "new_field" + column = pa.array(list(range(len(in_memory_pa_table)))) + table = MemoryMappedTable.from_file(arrow_file).set_column(i, field_, column) + assert table.table == in_memory_pa_table.set_column(i, field_, column) + assert isinstance(table, MemoryMappedTable) + assert table.replays == [("set_column", (i, field_, column), {})] + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_rename_columns(arrow_file, in_memory_pa_table): + assert "tokens" in in_memory_pa_table.column_names + names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names] + table = MemoryMappedTable.from_file(arrow_file).rename_columns(names) + assert table.table == in_memory_pa_table.rename_columns(names) + assert isinstance(table, MemoryMappedTable) + assert table.replays == [("rename_columns", (names,), {})] + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +def test_memory_mapped_table_drop(arrow_file, in_memory_pa_table): + names = [in_memory_pa_table.column_names[0]] + table = MemoryMappedTable.from_file(arrow_file).drop(names) + assert table.table == in_memory_pa_table.drop(names) + assert isinstance(table, MemoryMappedTable) + assert table.replays == [("drop", (names,), {})] + assert_deepcopy_without_bringing_data_in_memory(table) + assert_pickle_without_bringing_data_in_memory(table) + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_init( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = ( + in_memory_blocks + if blocks_type == "in_memory" + else memory_mapped_blocks + if blocks_type == "memory_mapped" + else mixed_in_memory_and_memory_mapped_blocks + ) + table = ConcatenationTable(in_memory_pa_table, blocks) + assert table.table == in_memory_pa_table + assert table.blocks == blocks + + +def test_concatenation_table_from_blocks(in_memory_pa_table, in_memory_blocks): + assert len(in_memory_pa_table) > 2 + in_memory_table = InMemoryTable(in_memory_pa_table) + t1, t2 = in_memory_table.slice(0, 2), in_memory_table.slice(2) + table = ConcatenationTable.from_blocks(in_memory_table) + assert isinstance(table, ConcatenationTable) + assert table.table == in_memory_pa_table + assert table.blocks == [[in_memory_table]] + table = ConcatenationTable.from_blocks([t1, t2]) + assert isinstance(table, ConcatenationTable) + assert table.table == in_memory_pa_table + assert table.blocks == [[in_memory_table]] + table = ConcatenationTable.from_blocks([[t1], [t2]]) + assert isinstance(table, ConcatenationTable) + assert table.table == in_memory_pa_table + assert table.blocks == [[in_memory_table]] + table = ConcatenationTable.from_blocks(in_memory_blocks) + assert isinstance(table, ConcatenationTable) + assert table.table == in_memory_pa_table + assert table.blocks == [[in_memory_table]] + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_from_blocks_doesnt_increase_memory( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + with assert_arrow_memory_doesnt_increase(): + table = ConcatenationTable.from_blocks(blocks) + assert isinstance(table, ConcatenationTable) + assert table.table == in_memory_pa_table + if blocks_type == "in_memory": + assert table.blocks == [[InMemoryTable(in_memory_pa_table)]] + else: + assert table.blocks == blocks + + +@pytest.mark.parametrize("axis", [0, 1]) +def test_concatenation_table_from_tables(axis, in_memory_pa_table, arrow_file): + in_memory_table = InMemoryTable(in_memory_pa_table) + concatenation_table = ConcatenationTable.from_blocks(in_memory_table) + memory_mapped_table = MemoryMappedTable.from_file(arrow_file) + tables = [in_memory_pa_table, in_memory_table, concatenation_table, memory_mapped_table] + if axis == 0: + expected_table = pa.concat_tables([in_memory_pa_table] * len(tables)) + else: + # avoids error due to duplicate column names + tables[1:] = [add_suffix_to_column_names(table, i) for i, table in enumerate(tables[1:], 1)] + expected_table = in_memory_pa_table + for table in tables[1:]: + for name, col in zip(table.column_names, table.columns): + expected_table = expected_table.append_column(name, col) + + with assert_arrow_memory_doesnt_increase(): + table = ConcatenationTable.from_tables(tables, axis=axis) + assert isinstance(table, ConcatenationTable) + assert table.table == expected_table + # because of consolidation, we end up with 1 InMemoryTable and 1 MemoryMappedTable + assert len(table.blocks) == 1 if axis == 1 else 2 + assert len(table.blocks[0]) == 1 if axis == 0 else 2 + assert axis == 1 or len(table.blocks[1]) == 1 + assert isinstance(table.blocks[0][0], InMemoryTable) + assert isinstance(table.blocks[1][0] if axis == 0 else table.blocks[0][1], MemoryMappedTable) + + +def test_concatenation_table_from_tables_axis1_misaligned_blocks(arrow_file): + table = MemoryMappedTable.from_file(arrow_file) + t1 = table.slice(0, 2) + t2 = table.slice(0, 3).rename_columns([col + "_1" for col in table.column_names]) + concatenated = ConcatenationTable.from_tables( + [ + ConcatenationTable.from_blocks([[t1], [t1], [t1]]), + ConcatenationTable.from_blocks([[t2], [t2]]), + ], + axis=1, + ) + assert len(concatenated) == 6 + assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2] + concatenated = ConcatenationTable.from_tables( + [ + ConcatenationTable.from_blocks([[t2], [t2]]), + ConcatenationTable.from_blocks([[t1], [t1], [t1]]), + ], + axis=1, + ) + assert len(concatenated) == 6 + assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2] + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_deepcopy( + blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + table = ConcatenationTable.from_blocks(blocks) + copied_table = copy.deepcopy(table) + assert table.table == copied_table.table + assert table.blocks == copied_table.blocks + assert_index_attributes_equal(table, copied_table) + # deepcopy must return the exact same arrow objects since they are immutable + assert table.table is copied_table.table + assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches)) + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_pickle( + blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + table = ConcatenationTable.from_blocks(blocks) + pickled_table = pickle.dumps(table) + unpickled_table = pickle.loads(pickled_table) + assert unpickled_table.table == table.table + assert unpickled_table.blocks == table.blocks + assert_index_attributes_equal(table, unpickled_table) + + +def test_concat_tables_with_features_metadata(arrow_file, in_memory_pa_table): + input_features = Features.from_arrow_schema(in_memory_pa_table.schema) + input_features["id"] = Value("int64", id="my_id") + intput_schema = input_features.arrow_schema + t0 = in_memory_pa_table.replace_schema_metadata(intput_schema.metadata) + t1 = MemoryMappedTable.from_file(arrow_file) + tables = [t0, t1] + concatenated_table = concat_tables(tables, axis=0) + output_schema = concatenated_table.schema + output_features = Features.from_arrow_schema(output_schema) + assert output_schema == intput_schema + assert output_schema.metadata == intput_schema.metadata + assert output_features == input_features + assert output_features["id"].id == "my_id" + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_slice( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + table = ConcatenationTable.from_blocks(blocks).slice(1, 2) + assert table.table == in_memory_pa_table.slice(1, 2) + assert isinstance(table, ConcatenationTable) + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_filter( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))]) + table = ConcatenationTable.from_blocks(blocks).filter(mask) + assert table.table == in_memory_pa_table.filter(mask) + assert isinstance(table, ConcatenationTable) + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_flatten( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + table = ConcatenationTable.from_blocks(blocks).flatten() + assert table.table == in_memory_pa_table.flatten() + assert isinstance(table, ConcatenationTable) + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_combine_chunks( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + table = ConcatenationTable.from_blocks(blocks).combine_chunks() + assert table.table == in_memory_pa_table.combine_chunks() + assert isinstance(table, ConcatenationTable) + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_cast( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types + assert pa.int64() in in_memory_pa_table.schema.types + schema = pa.schema( + { + k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32()) + for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) + } + ) + table = ConcatenationTable.from_blocks(blocks).cast(schema) + assert table.table == in_memory_pa_table.cast(schema) + assert isinstance(table, ConcatenationTable) + schema = pa.schema( + { + k: v if v != pa.int64() else pa.int32() + for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types) + } + ) + table = ConcatenationTable.from_blocks(blocks).cast(schema) + assert table.table == in_memory_pa_table.cast(schema) + assert isinstance(table, ConcatenationTable) + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concat_tables_cast_with_features_metadata( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + input_features = Features.from_arrow_schema(in_memory_pa_table.schema) + input_features["id"] = Value("int64", id="my_id") + intput_schema = input_features.arrow_schema + concatenated_table = ConcatenationTable.from_blocks(blocks).cast(intput_schema) + output_schema = concatenated_table.schema + output_features = Features.from_arrow_schema(output_schema) + assert output_schema == intput_schema + assert output_schema.metadata == intput_schema.metadata + assert output_features == input_features + assert output_features["id"].id == "my_id" + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_replace_schema_metadata( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + metadata = {"huggingface": "{}"} + table = ConcatenationTable.from_blocks(blocks).replace_schema_metadata(metadata) + assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata + assert isinstance(table, ConcatenationTable) + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_add_column( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + i = len(in_memory_pa_table.column_names) + field_ = "new_field" + column = pa.array(list(range(len(in_memory_pa_table)))) + with pytest.raises(NotImplementedError): + ConcatenationTable.from_blocks(blocks).add_column(i, field_, column) + # assert table.table == in_memory_pa_table.add_column(i, field_, column) + # unpickled_table = pickle.loads(pickle.dumps(table)) + # assert unpickled_table.table == in_memory_pa_table.add_column(i, field_, column) + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_append_column( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + field_ = "new_field" + column = pa.array(list(range(len(in_memory_pa_table)))) + with pytest.raises(NotImplementedError): + ConcatenationTable.from_blocks(blocks).append_column(field_, column) + # assert table.table == in_memory_pa_table.append_column(field_, column) + # unpickled_table = pickle.loads(pickle.dumps(table)) + # assert unpickled_table.table == in_memory_pa_table.append_column(field_, column) + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_remove_column( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + table = ConcatenationTable.from_blocks(blocks).remove_column(0) + assert table.table == in_memory_pa_table.remove_column(0) + assert isinstance(table, ConcatenationTable) + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_set_column( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + i = len(in_memory_pa_table.column_names) + field_ = "new_field" + column = pa.array(list(range(len(in_memory_pa_table)))) + with pytest.raises(NotImplementedError): + ConcatenationTable.from_blocks(blocks).set_column(i, field_, column) + # assert table.table == in_memory_pa_table.set_column(i, field_, column) + # unpickled_table = pickle.loads(pickle.dumps(table)) + # assert unpickled_table.table == in_memory_pa_table.set_column(i, field_, column) + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_rename_columns( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + assert "tokens" in in_memory_pa_table.column_names + names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names] + table = ConcatenationTable.from_blocks(blocks).rename_columns(names) + assert isinstance(table, ConcatenationTable) + assert table.table == in_memory_pa_table.rename_columns(names) + + +@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"]) +def test_concatenation_table_drop( + blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks +): + blocks = { + "in_memory": in_memory_blocks, + "memory_mapped": memory_mapped_blocks, + "mixed": mixed_in_memory_and_memory_mapped_blocks, + }[blocks_type] + names = [in_memory_pa_table.column_names[0]] + table = ConcatenationTable.from_blocks(blocks).drop(names) + assert table.table == in_memory_pa_table.drop(names) + assert isinstance(table, ConcatenationTable) + + +def test_concat_tables(arrow_file, in_memory_pa_table): + t0 = in_memory_pa_table + t1 = InMemoryTable(t0) + t2 = MemoryMappedTable.from_file(arrow_file) + t3 = ConcatenationTable.from_blocks(t1) + tables = [t0, t1, t2, t3] + concatenated_table = concat_tables(tables, axis=0) + assert concatenated_table.table == pa.concat_tables([t0] * 4) + assert concatenated_table.table.shape == (40, 4) + assert isinstance(concatenated_table, ConcatenationTable) + assert len(concatenated_table.blocks) == 3 # t0 and t1 are consolidated as a single InMemoryTable + assert isinstance(concatenated_table.blocks[0][0], InMemoryTable) + assert isinstance(concatenated_table.blocks[1][0], MemoryMappedTable) + assert isinstance(concatenated_table.blocks[2][0], InMemoryTable) + # add suffix to avoid error due to duplicate column names + concatenated_table = concat_tables( + [add_suffix_to_column_names(table, i) for i, table in enumerate(tables)], axis=1 + ) + assert concatenated_table.table.shape == (10, 16) + assert len(concatenated_table.blocks[0]) == 3 # t0 and t1 are consolidated as a single InMemoryTable + assert isinstance(concatenated_table.blocks[0][0], InMemoryTable) + assert isinstance(concatenated_table.blocks[0][1], MemoryMappedTable) + assert isinstance(concatenated_table.blocks[0][2], InMemoryTable) + + +def _interpolation_search_ground_truth(arr: List[int], x: int) -> Union[int, IndexError]: + for i in range(len(arr) - 1): + if arr[i] <= x < arr[i + 1]: + return i + return IndexError + + +class _ListWithGetitemCounter(list): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.unique_getitem_calls = set() + + def __getitem__(self, i): + out = super().__getitem__(i) + self.unique_getitem_calls.add(i) + return out + + @property + def getitem_unique_count(self): + return len(self.unique_getitem_calls) + + +@pytest.mark.parametrize( + "arr, x", + [(np.arange(0, 14, 3), x) for x in range(-1, 22)] + + [(list(np.arange(-5, 5)), x) for x in range(-6, 6)] + + [([0, 1_000, 1_001, 1_003], x) for x in [-1, 0, 2, 100, 999, 1_000, 1_001, 1_002, 1_003, 1_004]] + + [(list(range(1_000)), x) for x in [-1, 0, 1, 10, 666, 999, 1_000, 1_0001]], +) +def test_interpolation_search(arr, x): + ground_truth = _interpolation_search_ground_truth(arr, x) + if isinstance(ground_truth, int): + arr = _ListWithGetitemCounter(arr) + output = _interpolation_search(arr, x) + assert ground_truth == output + # 4 maximum unique getitem calls is expected for the cases of this test + # but it can be bigger for large and messy arrays. + assert arr.getitem_unique_count <= 4 + else: + with pytest.raises(ground_truth): + _interpolation_search(arr, x) + + +def test_indexed_table_mixin(): + n_rows_per_chunk = 10 + n_chunks = 4 + pa_table = pa.Table.from_pydict({"col": [0] * n_rows_per_chunk}) + pa_table = pa.concat_tables([pa_table] * n_chunks) + table = Table(pa_table) + assert all(table._offsets.tolist() == np.cumsum([0] + [n_rows_per_chunk] * n_chunks)) + assert table.fast_slice(5) == pa_table.slice(5) + assert table.fast_slice(2, 13) == pa_table.slice(2, 13) + + +@pytest.mark.parametrize( + "arrays", + [ + [pa.array([[1, 2, 3, 4]]), pa.array([[10, 2]])], + [ + pa.array([[[1, 2], [3]]], pa.list_(pa.list_(pa.int32()), 2)), + pa.array([[[10, 2, 3], [2]]], pa.list_(pa.list_(pa.int32()), 2)), + ], + [pa.array([[[1, 2, 3]], [[2, 3], [20, 21]], [[4]]]).slice(1), pa.array([[[1, 2, 3]]])], + ], +) +def test_concat_arrays(arrays): + assert array_concat(arrays) == pa.concat_arrays(arrays) + + +def test_concat_arrays_nested_with_nulls(): + arrays = [pa.array([{"a": 21, "b": [[1, 2], [3]]}]), pa.array([{"a": 100, "b": [[1], None]}])] + concatenated_arrays = array_concat(arrays) + assert concatenated_arrays == pa.array([{"a": 21, "b": [[1, 2], [3]]}, {"a": 100, "b": [[1], None]}]) + + +def test_concat_extension_arrays(): + arrays = [pa.array([[[1, 2], [3, 4]]]), pa.array([[[10, 2], [3, 4]]])] + extension_type = Array2DExtensionType((2, 2), "int64") + assert array_concat([extension_type.wrap_array(array) for array in arrays]) == extension_type.wrap_array( + pa.concat_arrays(arrays) + ) + + +def test_cast_array_to_features(): + arr = pa.array([[0, 1]]) + assert cast_array_to_feature(arr, Sequence(Value("string"))).type == pa.list_(pa.string()) + with pytest.raises(TypeError): + cast_array_to_feature(arr, Sequence(Value("string")), allow_number_to_str=False) + + +def test_cast_array_to_features_nested(): + arr = pa.array([[{"foo": [0]}]]) + assert cast_array_to_feature(arr, [{"foo": Sequence(Value("string"))}]).type == pa.list_( + pa.struct({"foo": pa.list_(pa.string())}) + ) + + +def test_cast_array_to_features_to_nested_with_no_fields(): + arr = pa.array([{}]) + assert cast_array_to_feature(arr, {}).type == pa.struct({}) + assert cast_array_to_feature(arr, {}).to_pylist() == arr.to_pylist() + + +def test_cast_array_to_features_nested_with_null_values(): + # same type + arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))})) + casted_array = cast_array_to_feature(arr, {"foo": [[Value("int64")]]}) + assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int64()))}) + assert casted_array.to_pylist() == arr.to_pylist() + + # different type + arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))})) + if datasets.config.PYARROW_VERSION.major < 10: + with pytest.warns(UserWarning, match="None values are converted to empty lists.+"): + casted_array = cast_array_to_feature(arr, {"foo": [[Value("int32")]]}) + assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))}) + assert casted_array.to_pylist() == [ + {"foo": [[], [0]]} + ] # empty list because of https://github.com/huggingface/datasets/issues/3676 + else: + with warnings.catch_warnings(): + warnings.simplefilter("error") + casted_array = cast_array_to_feature(arr, {"foo": [[Value("int32")]]}) + assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))}) + assert casted_array.to_pylist() == [{"foo": [None, [0]]}] + + +def test_cast_array_to_features_to_null_type(): + # same type + arr = pa.array([[None, None]]) + assert cast_array_to_feature(arr, Sequence(Value("null"))).type == pa.list_(pa.null()) + + # different type + arr = pa.array([[None, 1]]) + with pytest.raises(TypeError): + cast_array_to_feature(arr, Sequence(Value("null"))) + + +def test_cast_array_to_features_array_xd(): + # same storage type + arr = pa.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], pa.list_(pa.list_(pa.int32(), 2), 2)) + casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="int32")) + assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="int32") + # different storage type + casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="float32")) + assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="float32") + + +def test_cast_array_to_features_sequence_classlabel(): + arr = pa.array([[], [1], [0, 1]], pa.list_(pa.int64())) + assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) + + arr = pa.array([[], ["bar"], ["foo", "bar"]], pa.list_(pa.string())) + assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) + + # Test empty arrays + arr = pa.array([[], []], pa.list_(pa.int64())) + assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) + + arr = pa.array([[], []], pa.list_(pa.string())) + assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64()) + + # Test invalid class labels + arr = pa.array([[2]], pa.list_(pa.int64())) + with pytest.raises(ValueError): + assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))) + + arr = pa.array([["baz"]], pa.list_(pa.string())) + with pytest.raises(ValueError): + assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))) + + +def test_cast_fixed_size_array_to_features_sequence(): + arr = pa.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], pa.list_(pa.int32(), 3)) + # Fixed size list + casted_array = cast_array_to_feature(arr, Sequence(Value("int64"), length=3)) + assert casted_array.type == pa.list_(pa.int64(), 3) + assert casted_array.to_pylist() == arr.to_pylist() + # Variable size list + casted_array = cast_array_to_feature(arr, Sequence(Value("int64"))) + assert casted_array.type == pa.list_(pa.int64()) + assert casted_array.to_pylist() == arr.to_pylist() + + +def test_cast_sliced_fixed_size_array_to_features(): + arr = pa.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], pa.list_(pa.int32(), 3)) + casted_array = cast_array_to_feature(arr[1:], Sequence(Value("int64"), length=3)) + assert casted_array.type == pa.list_(pa.int64(), 3) + assert casted_array.to_pylist() == arr[1:].to_pylist() + + +def test_embed_array_storage(image_file): + array = pa.array([{"bytes": None, "path": image_file}], type=Image.pa_type) + embedded_images_array = embed_array_storage(array, Image()) + assert isinstance(embedded_images_array.to_pylist()[0]["path"], str) + assert embedded_images_array.to_pylist()[0]["path"] == "test_image_rgb.jpg" + assert isinstance(embedded_images_array.to_pylist()[0]["bytes"], bytes) + + +def test_embed_array_storage_nested(image_file): + array = pa.array([[{"bytes": None, "path": image_file}]], type=pa.list_(Image.pa_type)) + embedded_images_array = embed_array_storage(array, [Image()]) + assert isinstance(embedded_images_array.to_pylist()[0][0]["path"], str) + assert isinstance(embedded_images_array.to_pylist()[0][0]["bytes"], bytes) + array = pa.array([{"foo": {"bytes": None, "path": image_file}}], type=pa.struct({"foo": Image.pa_type})) + embedded_images_array = embed_array_storage(array, {"foo": Image()}) + assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["path"], str) + assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["bytes"], bytes) + + +def test_embed_table_storage(image_file): + features = Features({"image": Image()}) + table = table_cast(pa.table({"image": [image_file]}), features.arrow_schema) + embedded_images_table = embed_table_storage(table) + assert isinstance(embedded_images_table.to_pydict()["image"][0]["path"], str) + assert isinstance(embedded_images_table.to_pydict()["image"][0]["bytes"], bytes) + + +@pytest.mark.parametrize( + "table", + [ + InMemoryTable(pa.table({"foo": range(10)})), + InMemoryTable(pa.concat_tables([pa.table({"foo": range(0, 5)}), pa.table({"foo": range(5, 10)})])), + InMemoryTable(pa.concat_tables([pa.table({"foo": [i]}) for i in range(10)])), + ], +) +@pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20]) +@pytest.mark.parametrize("drop_last_batch", [False, True]) +def test_table_iter(table, batch_size, drop_last_batch): + num_rows = len(table) if not drop_last_batch else len(table) // batch_size * batch_size + num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size + subtables = list(table_iter(table, batch_size=batch_size, drop_last_batch=drop_last_batch)) + assert len(subtables) == num_batches + if drop_last_batch: + assert all(len(subtable) == batch_size for subtable in subtables) + else: + assert all(len(subtable) == batch_size for subtable in subtables[:-1]) + assert len(subtables[-1]) <= batch_size + if num_rows > 0: + reloaded = pa.concat_tables(subtables) + assert table.slice(0, num_rows).to_pydict() == reloaded.to_pydict() + + +@pytest.mark.parametrize( + "pa_type, expected", + [ + (pa.int8(), False), + (pa.struct({"col1": pa.int8(), "col2": pa.int64()}), False), + (pa.struct({"col1": pa.list_(pa.int8()), "col2": Array2DExtensionType((1, 3), "int64")}), True), + (pa.list_(pa.int8()), False), + (pa.list_(Array2DExtensionType((1, 3), "int64"), 4), True), + ], +) +def test_is_extension_type(pa_type, expected): + assert _is_extension_type(pa_type) == expected diff --git a/testbed/huggingface__datasets/tests/test_tasks.py b/testbed/huggingface__datasets/tests/test_tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..210e71277b64df0ff36b2964d36b2181fb34a64e --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_tasks.py @@ -0,0 +1,245 @@ +from copy import deepcopy +from unittest.case import TestCase + +import pytest + +from datasets.arrow_dataset import Dataset +from datasets.features import Audio, ClassLabel, Features, Image, Sequence, Value +from datasets.info import DatasetInfo +from datasets.tasks import ( + AudioClassification, + AutomaticSpeechRecognition, + ImageClassification, + LanguageModeling, + QuestionAnsweringExtractive, + Summarization, + TextClassification, + task_template_from_dict, +) +from datasets.utils.py_utils import asdict + + +SAMPLE_QUESTION_ANSWERING_EXTRACTIVE = { + "id": "5733be284776f41900661182", + "title": "University_of_Notre_Dame", + "context": 'Architecturally, the school has a Catholic character. Atop the Main Building\'s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend "Venite Ad Me Omnes". Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.', + "question": "To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?", + "answers": {"text": ["Saint Bernadette Soubirous"], "answer_start": [515]}, +} + + +@pytest.mark.parametrize( + "task_cls", + [ + AudioClassification, + AutomaticSpeechRecognition, + ImageClassification, + LanguageModeling, + QuestionAnsweringExtractive, + Summarization, + TextClassification, + ], +) +def test_reload_task_from_dict(task_cls): + task = task_cls() + task_dict = asdict(task) + reloaded = task_template_from_dict(task_dict) + assert task == reloaded + + +class TestLanguageModeling: + def test_column_mapping(self): + task = LanguageModeling(text_column="input_text") + assert {"input_text": "text"} == task.column_mapping + + def test_from_dict(self): + input_schema = Features({"text": Value("string")}) + template_dict = {"text_column": "input_text"} + task = LanguageModeling.from_dict(template_dict) + assert "language-modeling" == task.task + assert input_schema == task.input_schema + + +class TextClassificationTest(TestCase): + def setUp(self): + self.labels = sorted(["pos", "neg"]) + + def test_column_mapping(self): + task = TextClassification(text_column="input_text", label_column="input_label") + self.assertDictEqual({"input_text": "text", "input_label": "labels"}, task.column_mapping) + + def test_from_dict(self): + input_schema = Features({"text": Value("string")}) + # Labels are cast to tuple during `TextClassification.__post_init__`, so we do the same here + label_schema = Features({"labels": ClassLabel}) + template_dict = {"text_column": "input_text", "label_column": "input_labels"} + task = TextClassification.from_dict(template_dict) + self.assertEqual("text-classification", task.task) + self.assertEqual(input_schema, task.input_schema) + self.assertEqual(label_schema, task.label_schema) + + def test_align_with_features(self): + task = TextClassification(text_column="input_text", label_column="input_label") + self.assertEqual(task.label_schema["labels"], ClassLabel) + task = task.align_with_features(Features({"input_label": ClassLabel(names=self.labels)})) + self.assertEqual(task.label_schema["labels"], ClassLabel(names=self.labels)) + + +class QuestionAnsweringTest(TestCase): + def test_column_mapping(self): + task = QuestionAnsweringExtractive( + context_column="input_context", question_column="input_question", answers_column="input_answers" + ) + self.assertDictEqual( + {"input_context": "context", "input_question": "question", "input_answers": "answers"}, task.column_mapping + ) + + def test_from_dict(self): + input_schema = Features({"question": Value("string"), "context": Value("string")}) + label_schema = Features( + { + "answers": Sequence( + { + "text": Value("string"), + "answer_start": Value("int32"), + } + ) + } + ) + template_dict = { + "context_column": "input_input_context", + "question_column": "input_question", + "answers_column": "input_answers", + } + task = QuestionAnsweringExtractive.from_dict(template_dict) + self.assertEqual("question-answering-extractive", task.task) + self.assertEqual(input_schema, task.input_schema) + self.assertEqual(label_schema, task.label_schema) + + +class SummarizationTest(TestCase): + def test_column_mapping(self): + task = Summarization(text_column="input_text", summary_column="input_summary") + self.assertDictEqual({"input_text": "text", "input_summary": "summary"}, task.column_mapping) + + def test_from_dict(self): + input_schema = Features({"text": Value("string")}) + label_schema = Features({"summary": Value("string")}) + template_dict = {"text_column": "input_text", "summary_column": "input_summary"} + task = Summarization.from_dict(template_dict) + self.assertEqual("summarization", task.task) + self.assertEqual(input_schema, task.input_schema) + self.assertEqual(label_schema, task.label_schema) + + +class AutomaticSpeechRecognitionTest(TestCase): + def test_column_mapping(self): + task = AutomaticSpeechRecognition(audio_column="input_audio", transcription_column="input_transcription") + self.assertDictEqual({"input_audio": "audio", "input_transcription": "transcription"}, task.column_mapping) + + def test_from_dict(self): + input_schema = Features({"audio": Audio()}) + label_schema = Features({"transcription": Value("string")}) + template_dict = { + "audio_column": "input_audio", + "transcription_column": "input_transcription", + } + task = AutomaticSpeechRecognition.from_dict(template_dict) + self.assertEqual("automatic-speech-recognition", task.task) + self.assertEqual(input_schema, task.input_schema) + self.assertEqual(label_schema, task.label_schema) + + +class AudioClassificationTest(TestCase): + def setUp(self): + self.labels = sorted(["pos", "neg"]) + + def test_column_mapping(self): + task = AudioClassification(audio_column="input_audio", label_column="input_label") + self.assertDictEqual({"input_audio": "audio", "input_label": "labels"}, task.column_mapping) + + def test_from_dict(self): + input_schema = Features({"audio": Audio()}) + label_schema = Features({"labels": ClassLabel}) + template_dict = { + "audio_column": "input_image", + "label_column": "input_label", + } + task = AudioClassification.from_dict(template_dict) + self.assertEqual("audio-classification", task.task) + self.assertEqual(input_schema, task.input_schema) + self.assertEqual(label_schema, task.label_schema) + + def test_align_with_features(self): + task = AudioClassification(audio_column="input_audio", label_column="input_label") + self.assertEqual(task.label_schema["labels"], ClassLabel) + task = task.align_with_features(Features({"input_label": ClassLabel(names=self.labels)})) + self.assertEqual(task.label_schema["labels"], ClassLabel(names=self.labels)) + + +class ImageClassificationTest(TestCase): + def setUp(self): + self.labels = sorted(["pos", "neg"]) + + def test_column_mapping(self): + task = ImageClassification(image_column="input_image", label_column="input_label") + self.assertDictEqual({"input_image": "image", "input_label": "labels"}, task.column_mapping) + + def test_from_dict(self): + input_schema = Features({"image": Image()}) + label_schema = Features({"labels": ClassLabel}) + template_dict = { + "image_column": "input_image", + "label_column": "input_label", + } + task = ImageClassification.from_dict(template_dict) + self.assertEqual("image-classification", task.task) + self.assertEqual(input_schema, task.input_schema) + self.assertEqual(label_schema, task.label_schema) + + def test_align_with_features(self): + task = ImageClassification(image_column="input_image", label_column="input_label") + self.assertEqual(task.label_schema["labels"], ClassLabel) + task = task.align_with_features(Features({"input_label": ClassLabel(names=self.labels)})) + self.assertEqual(task.label_schema["labels"], ClassLabel(names=self.labels)) + + +class DatasetWithTaskProcessingTest(TestCase): + def test_map_on_task_template(self): + info = DatasetInfo(task_templates=QuestionAnsweringExtractive()) + dataset = Dataset.from_dict({k: [v] for k, v in SAMPLE_QUESTION_ANSWERING_EXTRACTIVE.items()}, info=info) + assert isinstance(dataset.info.task_templates, list) + assert len(dataset.info.task_templates) == 1 + + def keep_task(x): + return x + + def dont_keep_task(x): + out = deepcopy(SAMPLE_QUESTION_ANSWERING_EXTRACTIVE) + out["answers"]["foobar"] = 0 + return out + + mapped_dataset = dataset.map(keep_task) + assert mapped_dataset.info.task_templates == dataset.info.task_templates + # reload from cache + mapped_dataset = dataset.map(keep_task) + assert mapped_dataset.info.task_templates == dataset.info.task_templates + + mapped_dataset = dataset.map(dont_keep_task) + assert mapped_dataset.info.task_templates == [] + # reload from cache + mapped_dataset = dataset.map(dont_keep_task) + assert mapped_dataset.info.task_templates == [] + + def test_remove_and_map_on_task_template(self): + features = Features({"text": Value("string"), "label": ClassLabel(names=("pos", "neg"))}) + task_templates = TextClassification(text_column="text", label_column="label") + info = DatasetInfo(features=features, task_templates=task_templates) + dataset = Dataset.from_dict({"text": ["A sentence."], "label": ["pos"]}, info=info) + + def process(example): + return example + + modified_dataset = dataset.remove_columns("label") + mapped_dataset = modified_dataset.map(process) + assert mapped_dataset.info.task_templates == [] diff --git a/testbed/huggingface__datasets/tests/test_upstream_hub.py b/testbed/huggingface__datasets/tests/test_upstream_hub.py new file mode 100644 index 0000000000000000000000000000000000000000..15ddb01c06e44a8bc66f4b96c7026d6598e316b6 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_upstream_hub.py @@ -0,0 +1,906 @@ +import fnmatch +import gc +import os +import shutil +import tempfile +import textwrap +import time +import unittest +from io import BytesIO +from pathlib import Path +from unittest.mock import patch + +import numpy as np +import pytest +from huggingface_hub import DatasetCard, HfApi + +from datasets import ( + Audio, + ClassLabel, + Dataset, + DatasetDict, + DownloadManager, + Features, + Image, + Value, + load_dataset, + load_dataset_builder, +) +from datasets.config import METADATA_CONFIGS_FIELD +from datasets.data_files import get_data_patterns +from datasets.packaged_modules.folder_based_builder.folder_based_builder import ( + FolderBasedBuilder, + FolderBasedBuilderConfig, +) +from datasets.utils.file_utils import cached_path +from datasets.utils.hub import hf_hub_url +from tests.fixtures.hub import CI_HUB_ENDPOINT, CI_HUB_USER, CI_HUB_USER_TOKEN +from tests.utils import for_all_test_methods, require_pil, require_sndfile, xfail_if_500_502_http_error + + +pytestmark = pytest.mark.integration + + +@for_all_test_methods(xfail_if_500_502_http_error) +@pytest.mark.usefixtures("ci_hub_config", "ci_hfh_hf_hub_url") +class TestPushToHub: + _api = HfApi(endpoint=CI_HUB_ENDPOINT) + _token = CI_HUB_USER_TOKEN + + def test_push_dataset_dict_to_hub_no_token(self, temporary_repo, set_ci_hub_access_token): + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + + local_ds = DatasetDict({"train": ds}) + + with temporary_repo() as ds_name: + local_ds.push_to_hub(ds_name) + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) + assert local_ds["train"].features == hub_ds["train"].features + + # Ensure that there is a single file on the repository that has the correct name + files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) + assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] + + def test_push_dataset_dict_to_hub_name_without_namespace(self, temporary_repo): + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + + local_ds = DatasetDict({"train": ds}) + + with temporary_repo() as ds_name: + local_ds.push_to_hub(ds_name.split("/")[-1], token=self._token) + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) + assert local_ds["train"].features == hub_ds["train"].features + + # Ensure that there is a single file on the repository that has the correct name + files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) + assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] + + def test_push_dataset_dict_to_hub_datasets_with_different_features(self, cleanup_repo): + ds_train = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + ds_test = Dataset.from_dict({"x": [True, False, True], "y": ["a", "b", "c"]}) + + local_ds = DatasetDict({"train": ds_train, "test": ds_test}) + + ds_name = f"{CI_HUB_USER}/test-{int(time.time() * 10e6)}" + try: + with pytest.raises(ValueError): + local_ds.push_to_hub(ds_name.split("/")[-1], token=self._token) + except AssertionError: + cleanup_repo(ds_name) + raise + + def test_push_dataset_dict_to_hub_private(self, temporary_repo): + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + + local_ds = DatasetDict({"train": ds}) + + with temporary_repo() as ds_name: + local_ds.push_to_hub(ds_name, token=self._token, private=True) + hub_ds = load_dataset(ds_name, download_mode="force_redownload", token=self._token) + + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) + assert local_ds["train"].features == hub_ds["train"].features + + # Ensure that there is a single file on the repository that has the correct name + files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) + assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] + + def test_push_dataset_dict_to_hub(self, temporary_repo): + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + + local_ds = DatasetDict({"train": ds}) + + with temporary_repo() as ds_name: + local_ds.push_to_hub(ds_name, token=self._token) + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) + assert local_ds["train"].features == hub_ds["train"].features + + # Ensure that there is a single file on the repository that has the correct name + files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) + assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] + + def test_push_dataset_dict_to_hub_with_pull_request(self, temporary_repo): + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + + local_ds = DatasetDict({"train": ds}) + + with temporary_repo() as ds_name: + local_ds.push_to_hub(ds_name, token=self._token, create_pr=True) + hub_ds = load_dataset(ds_name, revision="refs/pr/1", download_mode="force_redownload") + + assert local_ds["train"].features == hub_ds["train"].features + assert list(local_ds.keys()) == list(hub_ds.keys()) + assert local_ds["train"].features == hub_ds["train"].features + + # Ensure that there is a single file on the repository that has the correct name + files = sorted( + self._api.list_repo_files(ds_name, revision="refs/pr/1", repo_type="dataset", token=self._token) + ) + assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] + + def test_push_dataset_dict_to_hub_with_revision(self, temporary_repo): + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + + local_ds = DatasetDict({"train": ds}) + + with temporary_repo() as ds_name: + local_ds.push_to_hub(ds_name, token=self._token, revision="dev") + hub_ds = load_dataset(ds_name, revision="dev", download_mode="force_redownload") + + assert local_ds["train"].features == hub_ds["train"].features + assert list(local_ds.keys()) == list(hub_ds.keys()) + assert local_ds["train"].features == hub_ds["train"].features + + # Ensure that there is a single file on the repository that has the correct name + files = sorted(self._api.list_repo_files(ds_name, revision="dev", repo_type="dataset", token=self._token)) + assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] + + def test_push_dataset_dict_to_hub_multiple_files(self, temporary_repo): + ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) + + local_ds = DatasetDict({"train": ds}) + + with temporary_repo() as ds_name: + with patch("datasets.config.MAX_SHARD_SIZE", "16KB"): + local_ds.push_to_hub(ds_name, token=self._token) + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) + assert local_ds["train"].features == hub_ds["train"].features + + # Ensure that there are two files on the repository that have the correct name + files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) + assert files == [ + ".gitattributes", + "README.md", + "data/train-00000-of-00002.parquet", + "data/train-00001-of-00002.parquet", + ] + + def test_push_dataset_dict_to_hub_multiple_files_with_max_shard_size(self, temporary_repo): + ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) + + local_ds = DatasetDict({"train": ds}) + + with temporary_repo() as ds_name: + local_ds.push_to_hub(ds_name, token=self._token, max_shard_size="16KB") + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) + assert local_ds["train"].features == hub_ds["train"].features + + # Ensure that there are two files on the repository that have the correct name + files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) + assert files == [ + ".gitattributes", + "README.md", + "data/train-00000-of-00002.parquet", + "data/train-00001-of-00002.parquet", + ] + + def test_push_dataset_dict_to_hub_multiple_files_with_num_shards(self, temporary_repo): + ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) + + local_ds = DatasetDict({"train": ds}) + + with temporary_repo() as ds_name: + local_ds.push_to_hub(ds_name, token=self._token, num_shards={"train": 2}) + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) + assert local_ds["train"].features == hub_ds["train"].features + + # Ensure that there are two files on the repository that have the correct name + files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) + assert files == [ + ".gitattributes", + "README.md", + "data/train-00000-of-00002.parquet", + "data/train-00001-of-00002.parquet", + ] + + def test_push_dataset_dict_to_hub_with_multiple_commits(self, temporary_repo): + ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) + + local_ds = DatasetDict({"train": ds}) + + with temporary_repo() as ds_name: + self._api.create_repo(ds_name, token=self._token, repo_type="dataset") + num_commits_before_push = len(self._api.list_repo_commits(ds_name, repo_type="dataset", token=self._token)) + with patch("datasets.config.MAX_SHARD_SIZE", "16KB"), patch( + "datasets.config.UPLOADS_MAX_NUMBER_PER_COMMIT", 1 + ): + local_ds.push_to_hub(ds_name, token=self._token) + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) + assert local_ds["train"].features == hub_ds["train"].features + + # Ensure that there are two files on the repository that have the correct name + files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) + assert files == [ + ".gitattributes", + "README.md", + "data/train-00000-of-00002.parquet", + "data/train-00001-of-00002.parquet", + ] + + num_commits_after_push = len(self._api.list_repo_commits(ds_name, repo_type="dataset", token=self._token)) + assert num_commits_after_push - num_commits_before_push > 1 + + def test_push_dataset_dict_to_hub_overwrite_files(self, temporary_repo): + ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) + ds2 = Dataset.from_dict({"x": list(range(100)), "y": list(range(100))}) + + local_ds = DatasetDict({"train": ds, "random": ds2}) + + # Push to hub two times, but the second time with a larger amount of files. + # Verify that the new files contain the correct dataset. + with temporary_repo() as ds_name: + local_ds.push_to_hub(ds_name, token=self._token) + + with tempfile.TemporaryDirectory() as tmp: + # Add a file starting with "data" to ensure it doesn't get deleted. + path = Path(tmp) / "datafile.txt" + with open(path, "w") as f: + f.write("Bogus file") + + self._api.upload_file( + path_or_fileobj=str(path), + path_in_repo="datafile.txt", + repo_id=ds_name, + repo_type="dataset", + token=self._token, + ) + + local_ds.push_to_hub(ds_name, token=self._token, max_shard_size=500 << 5) + + # Ensure that there are two files on the repository that have the correct name + files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) + assert files == [ + ".gitattributes", + "README.md", + "data/random-00000-of-00001.parquet", + "data/train-00000-of-00002.parquet", + "data/train-00001-of-00002.parquet", + "datafile.txt", + ] + + self._api.delete_file("datafile.txt", repo_id=ds_name, repo_type="dataset", token=self._token) + + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) + assert local_ds["train"].features == hub_ds["train"].features + + del hub_ds + + # To ensure the reference to the memory-mapped Arrow file is dropped to avoid the PermissionError on Windows + gc.collect() + + # Push to hub two times, but the second time with fewer files. + # Verify that the new files contain the correct dataset and that non-necessary files have been deleted. + with temporary_repo(ds_name): + local_ds.push_to_hub(ds_name, token=self._token, max_shard_size=500 << 5) + + with tempfile.TemporaryDirectory() as tmp: + # Add a file starting with "data" to ensure it doesn't get deleted. + path = Path(tmp) / "datafile.txt" + with open(path, "w") as f: + f.write("Bogus file") + + self._api.upload_file( + path_or_fileobj=str(path), + path_in_repo="datafile.txt", + repo_id=ds_name, + repo_type="dataset", + token=self._token, + ) + + local_ds.push_to_hub(ds_name, token=self._token) + + # Ensure that there are two files on the repository that have the correct name + files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) + assert files == [ + ".gitattributes", + "README.md", + "data/random-00000-of-00001.parquet", + "data/train-00000-of-00001.parquet", + "datafile.txt", + ] + + # Keeping the "datafile.txt" breaks the load_dataset to think it's a text-based dataset + self._api.delete_file("datafile.txt", repo_id=ds_name, repo_type="dataset", token=self._token) + + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) + assert local_ds["train"].features == hub_ds["train"].features + + def test_push_dataset_to_hub(self, temporary_repo): + local_ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + + with temporary_repo() as ds_name: + local_ds.push_to_hub(ds_name, split="train", token=self._token) + local_ds_dict = {"train": local_ds} + hub_ds_dict = load_dataset(ds_name, download_mode="force_redownload") + + assert list(local_ds_dict.keys()) == list(hub_ds_dict.keys()) + + for ds_split_name in local_ds_dict.keys(): + local_ds = local_ds_dict[ds_split_name] + hub_ds = hub_ds_dict[ds_split_name] + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds.features.keys()) == list(hub_ds.features.keys()) + assert local_ds.features == hub_ds.features + + def test_push_dataset_to_hub_custom_features(self, temporary_repo): + features = Features({"x": Value("int64"), "y": ClassLabel(names=["neg", "pos"])}) + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [0, 0, 1]}, features=features) + + with temporary_repo() as ds_name: + ds.push_to_hub(ds_name, token=self._token) + hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload") + + assert ds.column_names == hub_ds.column_names + assert list(ds.features.keys()) == list(hub_ds.features.keys()) + assert ds.features == hub_ds.features + assert ds[:] == hub_ds[:] + + @require_sndfile + def test_push_dataset_to_hub_custom_features_audio(self, temporary_repo): + audio_path = os.path.join(os.path.dirname(__file__), "features", "data", "test_audio_44100.wav") + data = {"x": [audio_path, None], "y": [0, -1]} + features = Features({"x": Audio(), "y": Value("int32")}) + ds = Dataset.from_dict(data, features=features) + + for embed_external_files in [True, False]: + with temporary_repo() as ds_name: + ds.push_to_hub(ds_name, embed_external_files=embed_external_files, token=self._token) + hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload") + + assert ds.column_names == hub_ds.column_names + assert list(ds.features.keys()) == list(hub_ds.features.keys()) + assert ds.features == hub_ds.features + np.testing.assert_equal(ds[0]["x"]["array"], hub_ds[0]["x"]["array"]) + assert ds[1] == hub_ds[1] # don't test hub_ds[0] since audio decoding might be slightly different + hub_ds = hub_ds.cast_column("x", Audio(decode=False)) + elem = hub_ds[0]["x"] + path, bytes_ = elem["path"], elem["bytes"] + assert isinstance(path, str) + assert os.path.basename(path) == "test_audio_44100.wav" + assert bool(bytes_) == embed_external_files + + @require_pil + def test_push_dataset_to_hub_custom_features_image(self, temporary_repo): + image_path = os.path.join(os.path.dirname(__file__), "features", "data", "test_image_rgb.jpg") + data = {"x": [image_path, None], "y": [0, -1]} + features = Features({"x": Image(), "y": Value("int32")}) + ds = Dataset.from_dict(data, features=features) + + for embed_external_files in [True, False]: + with temporary_repo() as ds_name: + ds.push_to_hub(ds_name, embed_external_files=embed_external_files, token=self._token) + hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload") + + assert ds.column_names == hub_ds.column_names + assert list(ds.features.keys()) == list(hub_ds.features.keys()) + assert ds.features == hub_ds.features + assert ds[:] == hub_ds[:] + hub_ds = hub_ds.cast_column("x", Image(decode=False)) + elem = hub_ds[0]["x"] + path, bytes_ = elem["path"], elem["bytes"] + assert isinstance(path, str) + assert bool(bytes_) == embed_external_files + + @require_pil + def test_push_dataset_to_hub_custom_features_image_list(self, temporary_repo): + image_path = os.path.join(os.path.dirname(__file__), "features", "data", "test_image_rgb.jpg") + data = {"x": [[image_path], [image_path, image_path]], "y": [0, -1]} + features = Features({"x": [Image()], "y": Value("int32")}) + ds = Dataset.from_dict(data, features=features) + + for embed_external_files in [True, False]: + with temporary_repo() as ds_name: + ds.push_to_hub(ds_name, embed_external_files=embed_external_files, token=self._token) + hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload") + + assert ds.column_names == hub_ds.column_names + assert list(ds.features.keys()) == list(hub_ds.features.keys()) + assert ds.features == hub_ds.features + assert ds[:] == hub_ds[:] + hub_ds = hub_ds.cast_column("x", [Image(decode=False)]) + elem = hub_ds[0]["x"][0] + path, bytes_ = elem["path"], elem["bytes"] + assert isinstance(path, str) + assert bool(bytes_) == embed_external_files + + def test_push_dataset_dict_to_hub_custom_features(self, temporary_repo): + features = Features({"x": Value("int64"), "y": ClassLabel(names=["neg", "pos"])}) + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [0, 0, 1]}, features=features) + + local_ds = DatasetDict({"test": ds}) + + with temporary_repo() as ds_name: + local_ds.push_to_hub(ds_name, token=self._token) + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds["test"].features.keys()) == list(hub_ds["test"].features.keys()) + assert local_ds["test"].features == hub_ds["test"].features + + def test_push_dataset_to_hub_custom_splits(self, temporary_repo): + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + + with temporary_repo() as ds_name: + ds.push_to_hub(ds_name, split="random", token=self._token) + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + + assert ds.column_names == hub_ds["random"].column_names + assert list(ds.features.keys()) == list(hub_ds["random"].features.keys()) + assert ds.features == hub_ds["random"].features + + def test_push_dataset_to_hub_multiple_splits_one_by_one(self, temporary_repo): + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + with temporary_repo() as ds_name: + ds.push_to_hub(ds_name, split="train", token=self._token) + ds.push_to_hub(ds_name, split="test", token=self._token) + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + assert sorted(hub_ds) == ["test", "train"] + assert ds.column_names == hub_ds["train"].column_names + assert list(ds.features.keys()) == list(hub_ds["train"].features.keys()) + assert ds.features == hub_ds["train"].features + + def test_push_dataset_dict_to_hub_custom_splits(self, temporary_repo): + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + + local_ds = DatasetDict({"random": ds}) + + with temporary_repo() as ds_name: + local_ds.push_to_hub(ds_name, token=self._token) + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds["random"].features.keys()) == list(hub_ds["random"].features.keys()) + assert local_ds["random"].features == hub_ds["random"].features + + @unittest.skip("This test cannot pass until iterable datasets have push to hub") + def test_push_streaming_dataset_dict_to_hub(self, temporary_repo): + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + local_ds = DatasetDict({"train": ds}) + with tempfile.TemporaryDirectory() as tmp: + local_ds.save_to_disk(tmp) + local_ds = load_dataset(tmp, streaming=True) + + with temporary_repo() as ds_name: + local_ds.push_to_hub(ds_name, token=self._token) + hub_ds = load_dataset(ds_name, download_mode="force_redownload") + + assert local_ds.column_names == hub_ds.column_names + assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) + assert local_ds["train"].features == hub_ds["train"].features + + def test_push_multiple_dataset_configs_to_hub_load_dataset_builder(self, temporary_repo): + ds_default = Dataset.from_dict({"a": [0], "b": [1]}) + ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) + + with temporary_repo() as ds_name: + ds_default.push_to_hub(ds_name, token=self._token) + ds_config1.push_to_hub(ds_name, "config1", token=self._token) + ds_config2.push_to_hub(ds_name, "config2", token=self._token) + ds_builder_default = load_dataset_builder(ds_name, download_mode="force_redownload") # default config + assert len(ds_builder_default.BUILDER_CONFIGS) == 3 + assert len(ds_builder_default.config.data_files["train"]) == 1 + assert fnmatch.fnmatch( + ds_builder_default.config.data_files["train"][0], + "*/data/train-*", + ) + ds_builder_config1 = load_dataset_builder(ds_name, "config1", download_mode="force_redownload") + assert len(ds_builder_config1.BUILDER_CONFIGS) == 3 + assert len(ds_builder_config1.config.data_files["train"]) == 1 + assert fnmatch.fnmatch( + ds_builder_config1.config.data_files["train"][0], + "*/config1/train-*", + ) + ds_builder_config2 = load_dataset_builder(ds_name, "config2", download_mode="force_redownload") + assert len(ds_builder_config2.BUILDER_CONFIGS) == 3 + assert len(ds_builder_config2.config.data_files["train"]) == 1 + assert fnmatch.fnmatch( + ds_builder_config2.config.data_files["train"][0], + "*/config2/train-*", + ) + + with pytest.raises(ValueError): # no config 'config3' + load_dataset_builder(ds_name, "config3", download_mode="force_redownload") + + def test_push_multiple_dataset_configs_to_hub_load_dataset(self, temporary_repo): + ds_default = Dataset.from_dict({"a": [0], "b": [1]}) + ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) + + with temporary_repo() as ds_name: + ds_default.push_to_hub(ds_name, token=self._token) + ds_config1.push_to_hub(ds_name, "config1", token=self._token) + ds_config2.push_to_hub(ds_name, "config2", token=self._token) + + files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) + assert files == [ + ".gitattributes", + "README.md", + "config1/train-00000-of-00001.parquet", + "config2/train-00000-of-00001.parquet", + "data/train-00000-of-00001.parquet", + ] + + hub_ds_default = load_dataset(ds_name, download_mode="force_redownload") + hub_ds_config1 = load_dataset(ds_name, "config1", download_mode="force_redownload") + hub_ds_config2 = load_dataset(ds_name, "config2", download_mode="force_redownload") + + # only "train" split + assert len(hub_ds_default) == len(hub_ds_config1) == len(hub_ds_config2) == 1 + + assert ds_default.column_names == hub_ds_default["train"].column_names == ["a", "b"] + assert ds_config1.column_names == hub_ds_config1["train"].column_names == ["x", "y"] + assert ds_config2.column_names == hub_ds_config2["train"].column_names == ["foo", "bar"] + + assert ds_default.features == hub_ds_default["train"].features + assert ds_config1.features == hub_ds_config1["train"].features + assert ds_config2.features == hub_ds_config2["train"].features + + assert ds_default.num_rows == hub_ds_default["train"].num_rows == 1 + assert ds_config1.num_rows == hub_ds_config1["train"].num_rows == 3 + assert ds_config2.num_rows == hub_ds_config2["train"].num_rows == 2 + + with pytest.raises(ValueError): # no config 'config3' + load_dataset(ds_name, "config3", download_mode="force_redownload") + + def test_push_multiple_dataset_configs_to_hub_readme_metadata_content(self, temporary_repo): + ds_default = Dataset.from_dict({"a": [0], "b": [2]}) + ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) + + with temporary_repo() as ds_name: + ds_default.push_to_hub(ds_name, token=self._token) + ds_config1.push_to_hub(ds_name, "config1", token=self._token) + ds_config2.push_to_hub(ds_name, "config2", token=self._token) + + # check that configs args was correctly pushed to README.md + ds_readme_path = cached_path(hf_hub_url(ds_name, "README.md")) + dataset_card_data = DatasetCard.load(ds_readme_path).data + assert METADATA_CONFIGS_FIELD in dataset_card_data + assert isinstance(dataset_card_data[METADATA_CONFIGS_FIELD], list) + assert sorted(dataset_card_data[METADATA_CONFIGS_FIELD], key=lambda x: x["config_name"]) == [ + { + "config_name": "config1", + "data_files": [ + {"split": "train", "path": "config1/train-*"}, + ], + }, + { + "config_name": "config2", + "data_files": [ + {"split": "train", "path": "config2/train-*"}, + ], + }, + { + "config_name": "default", + "data_files": [ + {"split": "train", "path": "data/train-*"}, + ], + }, + ] + + def test_push_multiple_dataset_dict_configs_to_hub_load_dataset_builder(self, temporary_repo): + ds_default = Dataset.from_dict({"a": [0], "b": [1]}) + ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) + ds_default = DatasetDict({"random": ds_default}) + ds_config1 = DatasetDict({"random": ds_config1}) + ds_config2 = DatasetDict({"random": ds_config2}) + + with temporary_repo() as ds_name: + ds_default.push_to_hub(ds_name, token=self._token) + ds_config1.push_to_hub(ds_name, "config1", token=self._token) + ds_config2.push_to_hub(ds_name, "config2", token=self._token) + + ds_builder_default = load_dataset_builder(ds_name, download_mode="force_redownload") # default config + assert len(ds_builder_default.BUILDER_CONFIGS) == 3 + assert len(ds_builder_default.config.data_files["random"]) == 1 + assert fnmatch.fnmatch( + ds_builder_default.config.data_files["random"][0], + "*/data/random-*", + ) + ds_builder_config1 = load_dataset_builder(ds_name, "config1", download_mode="force_redownload") + assert len(ds_builder_config1.BUILDER_CONFIGS) == 3 + assert len(ds_builder_config1.config.data_files["random"]) == 1 + assert fnmatch.fnmatch( + ds_builder_config1.config.data_files["random"][0], + "*/config1/random-*", + ) + ds_builder_config2 = load_dataset_builder(ds_name, "config2", download_mode="force_redownload") + assert len(ds_builder_config2.BUILDER_CONFIGS) == 3 + assert len(ds_builder_config2.config.data_files["random"]) == 1 + assert fnmatch.fnmatch( + ds_builder_config2.config.data_files["random"][0], + "*/config2/random-*", + ) + with pytest.raises(ValueError): # no config named 'config3' + load_dataset_builder(ds_name, "config3", download_mode="force_redownload") + + def test_push_multiple_dataset_dict_configs_to_hub_load_dataset(self, temporary_repo): + ds_default = Dataset.from_dict({"a": [0], "b": [1]}) + ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) + ds_default = DatasetDict({"train": ds_default, "random": ds_default}) + ds_config1 = DatasetDict({"train": ds_config1, "random": ds_config1}) + ds_config2 = DatasetDict({"train": ds_config2, "random": ds_config2}) + + with temporary_repo() as ds_name: + ds_default.push_to_hub(ds_name, token=self._token) + ds_config1.push_to_hub(ds_name, "config1", token=self._token) + ds_config2.push_to_hub(ds_name, "config2", token=self._token) + + files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) + assert files == [ + ".gitattributes", + "README.md", + "config1/random-00000-of-00001.parquet", + "config1/train-00000-of-00001.parquet", + "config2/random-00000-of-00001.parquet", + "config2/train-00000-of-00001.parquet", + "data/random-00000-of-00001.parquet", + "data/train-00000-of-00001.parquet", + ] + + hub_ds_default = load_dataset(ds_name, download_mode="force_redownload") + hub_ds_config1 = load_dataset(ds_name, "config1", download_mode="force_redownload") + hub_ds_config2 = load_dataset(ds_name, "config2", download_mode="force_redownload") + + # two splits + expected_splits = ["random", "train"] + assert len(hub_ds_default) == len(hub_ds_config1) == len(hub_ds_config2) == 2 + assert sorted(hub_ds_default) == sorted(hub_ds_config1) == sorted(hub_ds_config2) == expected_splits + + for split in expected_splits: + assert ds_default[split].column_names == hub_ds_default[split].column_names == ["a", "b"] + assert ds_config1[split].column_names == hub_ds_config1[split].column_names == ["x", "y"] + assert ds_config2[split].column_names == hub_ds_config2[split].column_names == ["foo", "bar"] + + assert ds_default[split].features == hub_ds_default[split].features + assert ds_config1[split].features == hub_ds_config1[split].features + assert ds_config2[split].features == hub_ds_config2["train"].features + + assert ds_default[split].num_rows == hub_ds_default[split].num_rows == 1 + assert ds_config1[split].num_rows == hub_ds_config1[split].num_rows == 3 + assert ds_config2[split].num_rows == hub_ds_config2[split].num_rows == 2 + + with pytest.raises(ValueError): # no config 'config3' + load_dataset(ds_name, "config3", download_mode="force_redownload") + + def test_push_multiple_dataset_dict_configs_to_hub_readme_metadata_content(self, temporary_repo): + ds_default = Dataset.from_dict({"a": [0], "b": [1]}) + ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) + ds_default = DatasetDict({"train": ds_default, "random": ds_default}) + ds_config1 = DatasetDict({"train": ds_config1, "random": ds_config1}) + ds_config2 = DatasetDict({"train": ds_config2, "random": ds_config2}) + + with temporary_repo() as ds_name: + ds_default.push_to_hub(ds_name, token=self._token) + ds_config1.push_to_hub(ds_name, "config1", token=self._token) + ds_config2.push_to_hub(ds_name, "config2", token=self._token) + + # check that configs args was correctly pushed to README.md + ds_readme_path = cached_path(hf_hub_url(ds_name, "README.md")) + dataset_card_data = DatasetCard.load(ds_readme_path).data + assert METADATA_CONFIGS_FIELD in dataset_card_data + assert isinstance(dataset_card_data[METADATA_CONFIGS_FIELD], list) + assert sorted(dataset_card_data[METADATA_CONFIGS_FIELD], key=lambda x: x["config_name"]) == [ + { + "config_name": "config1", + "data_files": [ + {"split": "train", "path": "config1/train-*"}, + {"split": "random", "path": "config1/random-*"}, + ], + }, + { + "config_name": "config2", + "data_files": [ + {"split": "train", "path": "config2/train-*"}, + {"split": "random", "path": "config2/random-*"}, + ], + }, + { + "config_name": "default", + "data_files": [ + {"split": "train", "path": "data/train-*"}, + {"split": "random", "path": "data/random-*"}, + ], + }, + ] + + def test_push_dataset_to_hub_with_config_no_metadata_configs(self, temporary_repo): + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + ds_another_config = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) + parquet_buf = BytesIO() + ds.to_parquet(parquet_buf) + parquet_content = parquet_buf.getvalue() + + with temporary_repo() as ds_name: + self._api.create_repo(ds_name, token=self._token, repo_type="dataset") + # old push_to_hub was uploading the parquet files only - without metadata configs + self._api.upload_file( + path_or_fileobj=parquet_content, + path_in_repo="data/train-00000-of-00001.parquet", + repo_id=ds_name, + repo_type="dataset", + token=self._token, + ) + ds_another_config.push_to_hub(ds_name, "another_config", token=self._token) + ds_builder = load_dataset_builder(ds_name, download_mode="force_redownload") + assert len(ds_builder.config.data_files) == 1 + assert len(ds_builder.config.data_files["train"]) == 1 + assert fnmatch.fnmatch(ds_builder.config.data_files["train"][0], "*/data/train-00000-of-00001.parquet") + ds_another_config_builder = load_dataset_builder( + ds_name, "another_config", download_mode="force_redownload" + ) + assert len(ds_another_config_builder.config.data_files) == 1 + assert len(ds_another_config_builder.config.data_files["train"]) == 1 + assert fnmatch.fnmatch( + ds_another_config_builder.config.data_files["train"][0], + "*/another_config/train-00000-of-00001.parquet", + ) + + def test_push_dataset_dict_to_hub_with_config_no_metadata_configs(self, temporary_repo): + ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) + ds_another_config = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) + parquet_buf = BytesIO() + ds.to_parquet(parquet_buf) + parquet_content = parquet_buf.getvalue() + + local_ds_another_config = DatasetDict({"random": ds_another_config}) + + with temporary_repo() as ds_name: + self._api.create_repo(ds_name, token=self._token, repo_type="dataset") + # old push_to_hub was uploading the parquet files only - without metadata configs + self._api.upload_file( + path_or_fileobj=parquet_content, + path_in_repo="data/random-00000-of-00001.parquet", + repo_id=ds_name, + repo_type="dataset", + token=self._token, + ) + local_ds_another_config.push_to_hub(ds_name, "another_config", token=self._token) + ds_builder = load_dataset_builder(ds_name, download_mode="force_redownload") + assert len(ds_builder.config.data_files) == 1 + assert len(ds_builder.config.data_files["random"]) == 1 + assert fnmatch.fnmatch(ds_builder.config.data_files["random"][0], "*/data/random-00000-of-00001.parquet") + ds_another_config_builder = load_dataset_builder( + ds_name, "another_config", download_mode="force_redownload" + ) + assert len(ds_another_config_builder.config.data_files) == 1 + assert len(ds_another_config_builder.config.data_files["random"]) == 1 + assert fnmatch.fnmatch( + ds_another_config_builder.config.data_files["random"][0], + "*/another_config/random-00000-of-00001.parquet", + ) + + +class DummyFolderBasedBuilder(FolderBasedBuilder): + BASE_FEATURE = dict + BASE_COLUMN_NAME = "base" + BUILDER_CONFIG_CLASS = FolderBasedBuilderConfig + EXTENSIONS = [".txt"] + # CLASSIFICATION_TASK = TextClassification(text_column="base", label_column="label") + + +@pytest.fixture(params=[".jsonl", ".csv"]) +def text_file_with_metadata(request, tmp_path, text_file): + metadata_filename_extension = request.param + data_dir = tmp_path / "data_dir" + data_dir.mkdir() + text_file_path = data_dir / "file.txt" + shutil.copyfile(text_file, text_file_path) + metadata_file_path = data_dir / f"metadata{metadata_filename_extension}" + metadata = textwrap.dedent( + """\ + {"file_name": "file.txt", "additional_feature": "Dummy file"} + """ + if metadata_filename_extension == ".jsonl" + else """\ + file_name,additional_feature + file.txt,Dummy file + """ + ) + with open(metadata_file_path, "w", encoding="utf-8") as f: + f.write(metadata) + return text_file_path, metadata_file_path + + +@for_all_test_methods(xfail_if_500_502_http_error) +@pytest.mark.usefixtures("ci_hub_config", "ci_hfh_hf_hub_url") +class TestLoadFromHub: + _api = HfApi(endpoint=CI_HUB_ENDPOINT) + _token = CI_HUB_USER_TOKEN + + def test_load_dataset_with_metadata_file(self, temporary_repo, text_file_with_metadata, tmp_path): + text_file_path, metadata_file_path = text_file_with_metadata + data_dir_path = text_file_path.parent + cache_dir_path = tmp_path / ".cache" + cache_dir_path.mkdir() + with temporary_repo() as repo_id: + self._api.create_repo(repo_id, token=self._token, repo_type="dataset") + self._api.upload_folder( + folder_path=str(data_dir_path), + repo_id=repo_id, + repo_type="dataset", + token=self._token, + ) + data_files = [ + f"hf://datasets/{repo_id}/{text_file_path.name}", + f"hf://datasets/{repo_id}/{metadata_file_path.name}", + ] + builder = DummyFolderBasedBuilder( + dataset_name=repo_id.split("/")[-1], data_files=data_files, cache_dir=str(cache_dir_path) + ) + download_manager = DownloadManager() + gen_kwargs = builder._split_generators(download_manager)[0].gen_kwargs + generator = builder._generate_examples(**gen_kwargs) + result = [example for _, example in generator] + assert len(result) == 1 + + def test_get_data_patterns(self, temporary_repo, tmp_path): + repo_dir = tmp_path / "test_get_data_patterns" + data_dir = repo_dir / "data" + data_dir.mkdir(parents=True) + data_file = data_dir / "train-00001-of-00009.parquet" + data_file.touch() + with temporary_repo() as repo_id: + self._api.create_repo(repo_id, token=self._token, repo_type="dataset") + self._api.upload_folder( + folder_path=str(repo_dir), + repo_id=repo_id, + repo_type="dataset", + token=self._token, + ) + data_file_patterns = get_data_patterns(f"hf://datasets/{repo_id}") + assert data_file_patterns == { + "train": ["data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*"] + } diff --git a/testbed/huggingface__datasets/tests/test_warnings.py b/testbed/huggingface__datasets/tests/test_warnings.py new file mode 100644 index 0000000000000000000000000000000000000000..eedcbb82ae4b2b17f02fe1e3487a6ca633f9ca75 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_warnings.py @@ -0,0 +1,34 @@ +import pytest + +from datasets import inspect_metric, list_metrics, load_metric + + +@pytest.fixture +def mock_emitted_deprecation_warnings(monkeypatch): + monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings", set()) + + +# Used by list_metrics +@pytest.fixture +def mock_hfh(monkeypatch): + class MetricMock: + def __init__(self, metric_id): + self.id = metric_id + + class HfhMock: + _metrics = [MetricMock(metric_id) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] + + def list_metrics(self): + return self._metrics + + monkeypatch.setattr("datasets.inspect.huggingface_hub", HfhMock()) + + +@pytest.mark.parametrize( + "func, args", [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] +) +def test_metric_deprecation_warning(func, args, mock_emitted_deprecation_warnings, mock_hfh, tmp_path): + if "tmp_path" in args: + args = tuple(arg if arg != "tmp_path" else tmp_path for arg in args) + with pytest.warns(FutureWarning, match="https://huggingface.co/docs/evaluate"): + func(*args) diff --git a/testbed/huggingface__datasets/tests/utils.py b/testbed/huggingface__datasets/tests/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1fe38e82774e0e81fe751f9af94c8b0a89b74443 --- /dev/null +++ b/testbed/huggingface__datasets/tests/utils.py @@ -0,0 +1,554 @@ +import asyncio +import importlib.metadata +import os +import re +import sys +import tempfile +import unittest +from contextlib import contextmanager +from copy import deepcopy +from distutils.util import strtobool +from enum import Enum +from importlib.util import find_spec +from pathlib import Path +from unittest.mock import patch + +import pyarrow as pa +import pytest +import requests +from packaging import version + +from datasets import config + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = strtobool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) +_run_remote_tests = parse_flag_from_env("RUN_REMOTE", default=False) +_run_local_tests = parse_flag_from_env("RUN_LOCAL", default=True) +_run_packaged_tests = parse_flag_from_env("RUN_PACKAGED", default=True) + +# Compression +require_lz4 = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4") +require_py7zr = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr") +require_zstandard = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard") + +# Audio +require_sndfile = pytest.mark.skipif( + # On Windows and OS X, soundfile installs sndfile + find_spec("soundfile") is None or version.parse(importlib.metadata.version("soundfile")) < version.parse("0.12.0"), + reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ", +) + +# Beam +require_beam = pytest.mark.skipif( + not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"), + reason="test requires apache-beam and a compatible dill version", +) + +# Dill-cloudpickle compatibility +require_dill_gt_0_3_2 = pytest.mark.skipif( + config.DILL_VERSION <= version.parse("0.3.2"), + reason="test requires dill>0.3.2 for cloudpickle compatibility", +) + +# Windows +require_not_windows = pytest.mark.skipif( + sys.platform == "win32", + reason="test should not be run on Windows", +) + + +def require_faiss(test_case): + """ + Decorator marking a test that requires Faiss. + + These tests are skipped when Faiss isn't installed. + + """ + try: + import faiss # noqa + except ImportError: + test_case = unittest.skip("test requires faiss")(test_case) + return test_case + + +def require_regex(test_case): + """ + Decorator marking a test that requires regex. + + These tests are skipped when Regex isn't installed. + + """ + try: + import regex # noqa + except ImportError: + test_case = unittest.skip("test requires regex")(test_case) + return test_case + + +def require_elasticsearch(test_case): + """ + Decorator marking a test that requires ElasticSearch. + + These tests are skipped when ElasticSearch isn't installed. + + """ + try: + import elasticsearch # noqa + except ImportError: + test_case = unittest.skip("test requires elasticsearch")(test_case) + return test_case + + +def require_sqlalchemy(test_case): + """ + Decorator marking a test that requires SQLAlchemy. + + These tests are skipped when SQLAlchemy isn't installed. + + """ + try: + import sqlalchemy # noqa + except ImportError: + test_case = unittest.skip("test requires sqlalchemy")(test_case) + return test_case + + +def require_torch(test_case): + """ + Decorator marking a test that requires PyTorch. + + These tests are skipped when PyTorch isn't installed. + + """ + if not config.TORCH_AVAILABLE: + test_case = unittest.skip("test requires PyTorch")(test_case) + return test_case + + +def require_tf(test_case): + """ + Decorator marking a test that requires TensorFlow. + + These tests are skipped when TensorFlow isn't installed. + + """ + if not config.TF_AVAILABLE: + test_case = unittest.skip("test requires TensorFlow")(test_case) + return test_case + + +def require_jax(test_case): + """ + Decorator marking a test that requires JAX. + + These tests are skipped when JAX isn't installed. + + """ + if not config.JAX_AVAILABLE: + test_case = unittest.skip("test requires JAX")(test_case) + return test_case + + +def require_pil(test_case): + """ + Decorator marking a test that requires Pillow. + + These tests are skipped when Pillow isn't installed. + + """ + if not config.PIL_AVAILABLE: + test_case = unittest.skip("test requires Pillow")(test_case) + return test_case + + +def require_transformers(test_case): + """ + Decorator marking a test that requires transformers. + + These tests are skipped when transformers isn't installed. + + """ + try: + import transformers # noqa F401 + except ImportError: + return unittest.skip("test requires transformers")(test_case) + else: + return test_case + + +def require_tiktoken(test_case): + """ + Decorator marking a test that requires tiktoken. + + These tests are skipped when transformers isn't installed. + + """ + try: + import tiktoken # noqa F401 + except ImportError: + return unittest.skip("test requires tiktoken")(test_case) + else: + return test_case + + +def require_spacy(test_case): + """ + Decorator marking a test that requires spacy. + + These tests are skipped when they aren't installed. + + """ + try: + import spacy # noqa F401 + except ImportError: + return unittest.skip("test requires spacy")(test_case) + else: + return test_case + + +def require_spacy_model(model): + """ + Decorator marking a test that requires a spacy model. + + These tests are skipped when they aren't installed. + """ + + def _require_spacy_model(test_case): + try: + import spacy # noqa F401 + + spacy.load(model) + except ImportError: + return unittest.skip("test requires spacy")(test_case) + except OSError: + return unittest.skip("test requires spacy model '{}'".format(model))(test_case) + else: + return test_case + + return _require_spacy_model + + +def require_pyspark(test_case): + """ + Decorator marking a test that requires pyspark. + + These tests are skipped when pyspark isn't installed. + + """ + try: + import pyspark # noqa F401 + except ImportError: + return unittest.skip("test requires pyspark")(test_case) + else: + return test_case + + +def require_joblibspark(test_case): + """ + Decorator marking a test that requires joblibspark. + + These tests are skipped when pyspark isn't installed. + + """ + try: + import joblibspark # noqa F401 + except ImportError: + return unittest.skip("test requires joblibspark")(test_case) + else: + return test_case + + +def slow(test_case): + """ + Decorator marking a test as slow. + + Slow tests are skipped by default. Set the RUN_SLOW environment variable + to a truthy value to run them. + + """ + if not _run_slow_tests or _run_slow_tests == 0: + test_case = unittest.skip("test is slow")(test_case) + return test_case + + +def local(test_case): + """ + Decorator marking a test as local + + Local tests are run by default. Set the RUN_LOCAL environment variable + to a falsy value to not run them. + """ + if not _run_local_tests or _run_local_tests == 0: + test_case = unittest.skip("test is local")(test_case) + return test_case + + +def packaged(test_case): + """ + Decorator marking a test as packaged + + Packaged tests are run by default. Set the RUN_PACKAGED environment variable + to a falsy value to not run them. + """ + if not _run_packaged_tests or _run_packaged_tests == 0: + test_case = unittest.skip("test is packaged")(test_case) + return test_case + + +def remote(test_case): + """ + Decorator marking a test as one that relies on GitHub or the Hugging Face Hub. + + Remote tests are skipped by default. Set the RUN_REMOTE environment variable + to a falsy value to not run them. + """ + if not _run_remote_tests or _run_remote_tests == 0: + test_case = unittest.skip("test requires remote")(test_case) + return test_case + + +def for_all_test_methods(*decorators): + def decorate(cls): + for name, fn in cls.__dict__.items(): + if callable(fn) and name.startswith("test"): + for decorator in decorators: + fn = decorator(fn) + setattr(cls, name, fn) + return cls + + return decorate + + +class RequestWouldHangIndefinitelyError(Exception): + pass + + +class OfflineSimulationMode(Enum): + CONNECTION_FAILS = 0 + CONNECTION_TIMES_OUT = 1 + HF_DATASETS_OFFLINE_SET_TO_1 = 2 + + +@contextmanager +def offline(mode=OfflineSimulationMode.CONNECTION_FAILS, timeout=1e-16): + """ + Simulate offline mode. + + There are three offline simulatiom modes: + + CONNECTION_FAILS (default mode): a ConnectionError is raised for each network call. + Connection errors are created by mocking socket.socket + CONNECTION_TIMES_OUT: the connection hangs until it times out. + The default timeout value is low (1e-16) to speed up the tests. + Timeout errors are created by mocking requests.request + HF_DATASETS_OFFLINE_SET_TO_1: the HF_DATASETS_OFFLINE environment variable is set to 1. + This makes the http/ftp calls of the library instantly fail and raise an OfflineModeEmabled error. + """ + online_request = requests.Session().request + + def timeout_request(session, method, url, **kwargs): + # Change the url to an invalid url so that the connection hangs + invalid_url = "https://10.255.255.1" + if kwargs.get("timeout") is None: + raise RequestWouldHangIndefinitelyError( + f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." + ) + kwargs["timeout"] = timeout + try: + return online_request(method, invalid_url, **kwargs) + except Exception as e: + # The following changes in the error are just here to make the offline timeout error prettier + e.request.url = url + max_retry_error = e.args[0] + max_retry_error.args = (max_retry_error.args[0].replace("10.255.255.1", f"OfflineMock[{url}]"),) + e.args = (max_retry_error,) + raise + + def raise_connection_error(session, prepared_request, **kwargs): + raise requests.ConnectionError("Offline mode is enabled.", request=prepared_request) + + if mode is OfflineSimulationMode.CONNECTION_FAILS: + with patch("requests.Session.send", raise_connection_error): + yield + elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: + # inspired from https://stackoverflow.com/a/904609 + with patch("requests.Session.request", timeout_request): + yield + elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: + with patch("datasets.config.HF_DATASETS_OFFLINE", True): + yield + else: + raise ValueError("Please use a value from the OfflineSimulationMode enum.") + + +@contextmanager +def set_current_working_directory_to_temp_dir(*args, **kwargs): + original_working_dir = str(Path().resolve()) + with tempfile.TemporaryDirectory(*args, **kwargs) as tmp_dir: + try: + os.chdir(tmp_dir) + yield + finally: + os.chdir(original_working_dir) + + +@contextmanager +def assert_arrow_memory_increases(): + import gc + + gc.collect() + previous_allocated_memory = pa.total_allocated_bytes() + yield + assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." + + +@contextmanager +def assert_arrow_memory_doesnt_increase(): + import gc + + gc.collect() + previous_allocated_memory = pa.total_allocated_bytes() + yield + assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." + + +def is_rng_equal(rng1, rng2): + return deepcopy(rng1).integers(0, 100, 10).tolist() == deepcopy(rng2).integers(0, 100, 10).tolist() + + +def xfail_if_500_502_http_error(func): + import decorator + from requests.exceptions import HTTPError + + def _wrapper(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except HTTPError as err: + if str(err).startswith("500") or str(err).startswith("502"): + pytest.xfail(str(err)) + raise err + + return decorator.decorator(_wrapper, func) + + +# --- distributed testing functions --- # + +# copied from transformers +# originally adapted from https://stackoverflow.com/a/59041913/9201239 + + +class _RunOutput: + def __init__(self, returncode, stdout, stderr): + self.returncode = returncode + self.stdout = stdout + self.stderr = stderr + + +async def _read_stream(stream, callback): + while True: + line = await stream.readline() + if line: + callback(line) + else: + break + + +async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput: + if echo: + print("\nRunning: ", " ".join(cmd)) + + p = await asyncio.create_subprocess_exec( + cmd[0], + *cmd[1:], + stdin=stdin, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + env=env, + ) + + # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe + # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait + # + # If it starts hanging, will need to switch to the following code. The problem is that no data + # will be seen until it's done and if it hangs for example there will be no debug info. + # out, err = await p.communicate() + # return _RunOutput(p.returncode, out, err) + + out = [] + err = [] + + def tee(line, sink, pipe, label=""): + line = line.decode("utf-8").rstrip() + sink.append(line) + if not quiet: + print(label, line, file=pipe) + + # XXX: the timeout doesn't seem to make any difference here + await asyncio.wait( + [ + _read_stream(p.stdout, lambda line: tee(line, out, sys.stdout, label="stdout:")), + _read_stream(p.stderr, lambda line: tee(line, err, sys.stderr, label="stderr:")), + ], + timeout=timeout, + ) + return _RunOutput(await p.wait(), out, err) + + +def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput: + loop = asyncio.get_event_loop() + result = loop.run_until_complete( + _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo) + ) + + cmd_str = " ".join(cmd) + if result.returncode > 0: + stderr = "\n".join(result.stderr) + raise RuntimeError( + f"'{cmd_str}' failed with returncode {result.returncode}\n\n" + f"The combined stderr from workers follows:\n{stderr}" + ) + + # check that the subprocess actually did run and produced some output, should the test rely on + # the remote side to do the testing + if not result.stdout and not result.stderr: + raise RuntimeError(f"'{cmd_str}' produced no output.") + + return result + + +def pytest_xdist_worker_id(): + """ + Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0 + if `-n 1` or `pytest-xdist` isn't being used. + """ + worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0") + worker = re.sub(r"^gw", "", worker, 0, re.M) + return int(worker) + + +def get_torch_dist_unique_port(): + """ + Returns a port number that can be fed to `torchrun`'s `--master_port` argument. + + Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same + port at once. + """ + port = 29500 + uniq_delta = pytest_xdist_worker_id() + return port + uniq_delta