diff --git a/.gitattributes b/.gitattributes index 1672eb81e01ce93b2a8ef6aa890e82e91863cae9..5bad330e114d64a3d3446e1b88e05534e919a9e8 100644 --- a/.gitattributes +++ b/.gitattributes @@ -68,3 +68,9 @@ testbed/sphinx-doc__sphinx/tests/roots/test-images/img.pdf filter=lfs diff=lfs m testbed/sphinx-doc__sphinx/tests/roots/test-images/subdir/svgimg.pdf filter=lfs diff=lfs merge=lfs -text testbed/sphinx-doc__sphinx/tests/roots/test-ext-imgconverter/img.pdf filter=lfs diff=lfs merge=lfs -text testbed/sphinx-doc__sphinx/sphinx/locale/fa/LC_MESSAGES/sphinx.mo filter=lfs diff=lfs merge=lfs -text +testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/1.0 filter=lfs diff=lfs merge=lfs -text +testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/2.0 filter=lfs diff=lfs merge=lfs -text +testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/3.0 filter=lfs diff=lfs merge=lfs -text +testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/0.0 filter=lfs diff=lfs merge=lfs -text +testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/4.0 filter=lfs diff=lfs merge=lfs -text +testbed/scverse__scanpy/scanpy/datasets/10x_pbmc68k_reduced.h5ad filter=lfs diff=lfs merge=lfs -text diff --git a/testbed/gradio-app__gradio/.dockerignore b/testbed/gradio-app__gradio/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..916fc50c4d5d5b47fc23bd6b3137c74564926a15 --- /dev/null +++ b/testbed/gradio-app__gradio/.dockerignore @@ -0,0 +1,40 @@ +# Python build +.eggs/ +gradio.egg-info/* +!gradio.egg-info/requires.txt +!gradio.egg-info/PKG-INFO +dist/ +*.pyc +__pycache__/ +*.py[cod] +*$py.class +build/ + +# JS build +gradio/templates/frontend/static +gradio/templates/frontend/cdn + +# Secrets +.env + +# Gradio run artifacts +*.db +*.sqlite3 +gradio/launches.json + +# Tests +.coverage +coverage.xml +test.txt + +# Demos +demo/tmp.zip +demo/flagged +demo/files/*.avi +demo/files/*.mp4 + +# Etc +.idea/* +.DS_Store +*.bak +workspace.code-workspace \ No newline at end of file diff --git a/testbed/gradio-app__gradio/.editorconfig b/testbed/gradio-app__gradio/.editorconfig new file mode 100644 index 0000000000000000000000000000000000000000..f4e045fdb9a018c79ddf8e8c52de8b01eecd38ce --- /dev/null +++ b/testbed/gradio-app__gradio/.editorconfig @@ -0,0 +1,8 @@ + +root = true + +[{js/**,client/js/**}] +end_of_line = lf +insert_final_newline = true +indent_style = tab +tab_width = 2 diff --git a/testbed/gradio-app__gradio/.git-blame-ignore-revs b/testbed/gradio-app__gradio/.git-blame-ignore-revs new file mode 100644 index 0000000000000000000000000000000000000000..4873f58fe2929ea859a7de8024fbb7afd238b160 --- /dev/null +++ b/testbed/gradio-app__gradio/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# https://github.com/gradio-app/gradio/pull/4487 - refactor components.py to separate files +69f36f98535c904e7cac2b4942cecc747ed7443c diff --git a/testbed/gradio-app__gradio/.gitignore b/testbed/gradio-app__gradio/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..29f6cd4ee03babac8abe1408b16a4a767e1df289 --- /dev/null +++ b/testbed/gradio-app__gradio/.gitignore @@ -0,0 +1,78 @@ +# Python build +.eggs/ +gradio.egg-info +dist/ +*.pyc +__pycache__/ +*.py[cod] +*$py.class +build/ +__tmp/* + +# JS build +gradio/templates/cdn +gradio/templates/frontend + +# Secrets +.env + +# Gradio run artifacts +*.db +*.sqlite3 +gradio/launches.json +flagged/ +gradio_cached_examples/ +tmp.zip + +# Tests +.coverage +coverage.xml +test.txt +**/snapshots/**/*.png +playwright-report/ + +# Demos +demo/tmp.zip +demo/files/*.avi +demo/files/*.mp4 +demo/all_demos/demos/* +demo/all_demos/requirements.txt +demo/*/config.json +demo/annotatedimage_component/*.png + +# Etc +.idea/* +.DS_Store +*.bak +workspace.code-workspace +*.h5 + +# dev containers +.pnpm-store/ + +# log files +.pnpm-debug.log + +# Local virtualenv for devs +.venv* + +# FRP +gradio/frpc_* +.vercel + +# js +node_modules +public/build/ +test-results +client/js/test.js +.config/test.py + +# storybook +storybook-static +build-storybook.log +js/storybook/theme.css + +# playwright +.config/playwright +!.config/playwright/index.html +!.config/playwright/index.ts diff --git a/testbed/gradio-app__gradio/CHANGELOG.md b/testbed/gradio-app__gradio/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..a872a0424c2ae2c44b06eb7b7720ffee9fd95214 --- /dev/null +++ b/testbed/gradio-app__gradio/CHANGELOG.md @@ -0,0 +1,4003 @@ +# gradio + +## 3.45.1 + +### Fixes + +- [#5701](https://github.com/gradio-app/gradio/pull/5701) [`ee8eec1e5`](https://github.com/gradio-app/gradio/commit/ee8eec1e5e544a0127e0aa68c2522a7085b8ada5) - Fix for regression in rendering empty Markdown. Thanks [@abidlabs](https://github.com/abidlabs)! + +## 3.45.0 + +### Features + +- [#5675](https://github.com/gradio-app/gradio/pull/5675) [`b619e6f6e`](https://github.com/gradio-app/gradio/commit/b619e6f6e4ca55334fb86da53790e45a8f978566) - Reorganize Docs Navbar and Fill in Gaps. Thanks [@aliabd](https://github.com/aliabd)! +- [#5669](https://github.com/gradio-app/gradio/pull/5669) [`c5e969559`](https://github.com/gradio-app/gradio/commit/c5e969559612f956afcdb0c6f7b22ab8275bc49a) - Fix small issues in docs and guides. Thanks [@aliabd](https://github.com/aliabd)! +- [#5682](https://github.com/gradio-app/gradio/pull/5682) [`c57f1b75e`](https://github.com/gradio-app/gradio/commit/c57f1b75e272c76b0af4d6bd0c7f44743ff34f26) - Fix functional tests. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5681](https://github.com/gradio-app/gradio/pull/5681) [`40de3d217`](https://github.com/gradio-app/gradio/commit/40de3d2178b61ebe424b6f6228f94c0c6f679bea) - add query parameters to the `gr.Request` object through the `query_params` attribute. Thanks [@DarhkVoyd](https://github.com/DarhkVoyd)! +- [#5653](https://github.com/gradio-app/gradio/pull/5653) [`ea0e00b20`](https://github.com/gradio-app/gradio/commit/ea0e00b207b4b90a10e9d054c4202d4e705a29ba) - Prevent Clients from accessing API endpoints that set `api_name=False`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5639](https://github.com/gradio-app/gradio/pull/5639) [`e1874aff8`](https://github.com/gradio-app/gradio/commit/e1874aff814d13b23f3e59ef239cc13e18ad3fa7) - Add `gr.on` listener method. Thanks [@aliabid94](https://github.com/aliabid94)! +- [#5652](https://github.com/gradio-app/gradio/pull/5652) [`2e25d4305`](https://github.com/gradio-app/gradio/commit/2e25d430582264945ae3316acd04c4453a25ce38) - Pause autoscrolling if a user scrolls up in a `gr.Textbox` and resume autoscrolling if they go all the way down. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5642](https://github.com/gradio-app/gradio/pull/5642) [`21c7225bd`](https://github.com/gradio-app/gradio/commit/21c7225bda057117a9d3311854323520218720b5) - Improve plot rendering. Thanks [@aliabid94](https://github.com/aliabid94)! +- [#5677](https://github.com/gradio-app/gradio/pull/5677) [`9f9af327c`](https://github.com/gradio-app/gradio/commit/9f9af327c9115356433ec837f349d6286730fb97) - [Refactoring] Convert async functions that don't contain `await` statements to normal functions. Thanks [@whitphx](https://github.com/whitphx)! +- [#5660](https://github.com/gradio-app/gradio/pull/5660) [`d76555a12`](https://github.com/gradio-app/gradio/commit/d76555a122b545f0df7c9e7c1ca7bd2a6e262c86) - Fix secondary hue bug in gr.themes.builder(). Thanks [@hellofreckles](https://github.com/hellofreckles)! +- [#5697](https://github.com/gradio-app/gradio/pull/5697) [`f4e4f82b5`](https://github.com/gradio-app/gradio/commit/f4e4f82b58a65efca9030a7e8e7c5ace60d8cc10) - Increase Slider clickable area. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! +- [#5671](https://github.com/gradio-app/gradio/pull/5671) [`6a36c3b78`](https://github.com/gradio-app/gradio/commit/6a36c3b786700600d3826ce1e0629cc5308ddd47) - chore(deps): update dependency @types/prismjs to v1.26.1. Thanks [@renovate](https://github.com/apps/renovate)! +- [#5240](https://github.com/gradio-app/gradio/pull/5240) [`da05e59a5`](https://github.com/gradio-app/gradio/commit/da05e59a53bbad15e5755a47f46685da18e1031e) - Cleanup of .update and .get_config per component. Thanks [@aliabid94](https://github.com/aliabid94)!/n get_config is removed, the config used is simply any attribute that is in the Block that shares a name with one of the constructor paramaters./n update is not removed for backwards compatibility, but deprecated. Instead return the component itself. Created a updateable decorator that simply checks to see if we're in an update, and if so, skips the constructor and wraps the args and kwargs in an update dictionary. easy peasy. +- [#5635](https://github.com/gradio-app/gradio/pull/5635) [`38fafb9e2`](https://github.com/gradio-app/gradio/commit/38fafb9e2a5509b444942e1d5dd48dffa20066f4) - Fix typos in Gallery docs. Thanks [@atesgoral](https://github.com/atesgoral)! +- [#5590](https://github.com/gradio-app/gradio/pull/5590) [`d1ad1f671`](https://github.com/gradio-app/gradio/commit/d1ad1f671caef9f226eb3965f39164c256d8615c) - Attach `elem_classes` selectors to layout elements, and an id to the Tab button (for targeting via CSS/JS). Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5554](https://github.com/gradio-app/gradio/pull/5554) [`75ddeb390`](https://github.com/gradio-app/gradio/commit/75ddeb390d665d4484667390a97442081b49a423) - Accessibility Improvements. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5598](https://github.com/gradio-app/gradio/pull/5598) [`6b1714386`](https://github.com/gradio-app/gradio/commit/6b17143868bdd2c1400af1199a01c1c0d5c27477) - Upgrade Pyodide to 0.24.0 and install the native orjson package. Thanks [@whitphx](https://github.com/whitphx)! + +### Fixes + +- [#5625](https://github.com/gradio-app/gradio/pull/5625) [`9ccc4794a`](https://github.com/gradio-app/gradio/commit/9ccc4794a72ce8319417119f6c370e7af3ffca6d) - Use ContextVar instead of threading.local(). Thanks [@cbensimon](https://github.com/cbensimon)! +- [#5602](https://github.com/gradio-app/gradio/pull/5602) [`54d21d3f1`](https://github.com/gradio-app/gradio/commit/54d21d3f18f2ddd4e796d149a0b41461f49c711b) - Ensure `HighlightedText` with `merge_elements` loads without a value. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5636](https://github.com/gradio-app/gradio/pull/5636) [`fb5964fb8`](https://github.com/gradio-app/gradio/commit/fb5964fb88082e7b956853b543c468116811cab9) - Fix bug in example cache loading event. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! +- [#5633](https://github.com/gradio-app/gradio/pull/5633) [`341402337`](https://github.com/gradio-app/gradio/commit/34140233794c29d4722020e13c2d045da642dfae) - Allow Gradio apps containing `gr.Radio()`, `gr.Checkboxgroup()`, or `gr.Dropdown()` to be loaded with `gr.load()`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5616](https://github.com/gradio-app/gradio/pull/5616) [`7c34b434a`](https://github.com/gradio-app/gradio/commit/7c34b434aae0eb85f112a1dc8d66cefc7e2296b2) - Fix width and height issues that would cut off content in `gr.DataFrame`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5604](https://github.com/gradio-app/gradio/pull/5604) [`faad01f8e`](https://github.com/gradio-app/gradio/commit/faad01f8e10ef6d18249b1a4587477c59b74adb2) - Add `render_markdown` parameter to chatbot. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! +- [#5593](https://github.com/gradio-app/gradio/pull/5593) [`88d43bd12`](https://github.com/gradio-app/gradio/commit/88d43bd124792d216da445adef932a2b02f5f416) - Fixes avatar image in chatbot being squashed. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! +- [#5690](https://github.com/gradio-app/gradio/pull/5690) [`6b8c8afd9`](https://github.com/gradio-app/gradio/commit/6b8c8afd981fea984da568e9a0bd8bfc2a9c06c4) - Fix incorrect behavior of `gr.load()` with `gr.Examples`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5696](https://github.com/gradio-app/gradio/pull/5696) [`e51fcd5d5`](https://github.com/gradio-app/gradio/commit/e51fcd5d54315e8b65ee40e3de4dab17579ff6d5) - setting share=True on Spaces or in wasm should warn instead of raising error. Thanks [@abidlabs](https://github.com/abidlabs)! + +## 3.44.4 + +### Features + +- [#5514](https://github.com/gradio-app/gradio/pull/5514) [`52f783175`](https://github.com/gradio-app/gradio/commit/52f7831751b432411e109bd41add4ab286023a8e) - refactor: Use package.json for version management. Thanks [@DarhkVoyd](https://github.com/DarhkVoyd)! +- [#5535](https://github.com/gradio-app/gradio/pull/5535) [`d29b1ab74`](https://github.com/gradio-app/gradio/commit/d29b1ab740784d8c70f9ab7bc38bbbf7dd3ff737) - Makes sliders consistent across all browsers. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! + +### Fixes + +- [#5587](https://github.com/gradio-app/gradio/pull/5587) [`e0d61b8ba`](https://github.com/gradio-app/gradio/commit/e0d61b8baa0f6293f53b9bdb1647d42f9ae2583a) - Fix `.clear()` events for audio and image. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! +- [#5534](https://github.com/gradio-app/gradio/pull/5534) [`d9e9ae43f`](https://github.com/gradio-app/gradio/commit/d9e9ae43f5c52c1f729af5a20e5d4f754689d429) - Guide fixes, esp. streaming audio. Thanks [@aliabid94](https://github.com/aliabid94)! +- [#5588](https://github.com/gradio-app/gradio/pull/5588) [`acdeff57e`](https://github.com/gradio-app/gradio/commit/acdeff57ece4672f943c374d537eaf47d3ec034f) - Allow multiple instances of Gradio with authentication to run on different ports. Thanks [@abidlabs](https://github.com/abidlabs)! + +## 3.44.3 + +### Fixes + +- [#5562](https://github.com/gradio-app/gradio/pull/5562) [`50d9747d0`](https://github.com/gradio-app/gradio/commit/50d9747d061962cff7f60a8da648bb3781794102) - chore(deps): update dependency iframe-resizer to v4.3.7. Thanks [@renovate](https://github.com/apps/renovate)! +- [#5550](https://github.com/gradio-app/gradio/pull/5550) [`4ed5902e7`](https://github.com/gradio-app/gradio/commit/4ed5902e7dda2d95cd43e4ccaaef520ddd8eba57) - Adding basque language. Thanks [@EkhiAzur](https://github.com/EkhiAzur)! +- [#5547](https://github.com/gradio-app/gradio/pull/5547) [`290f51871`](https://github.com/gradio-app/gradio/commit/290f5187160cdbd7a786494fe3c19b0e70abe167) - typo in UploadButton's docstring. Thanks [@chaeheum3](https://github.com/chaeheum3)! +- [#5553](https://github.com/gradio-app/gradio/pull/5553) [`d1bf23cd2`](https://github.com/gradio-app/gradio/commit/d1bf23cd2c6da3692d7753856bfe7564d84778e0) - Modify Image examples docstring. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! +- [#5563](https://github.com/gradio-app/gradio/pull/5563) [`ba64082ed`](https://github.com/gradio-app/gradio/commit/ba64082ed80c1ed9113497ae089e63f032dbcc75) - preprocess for components when type='index'. Thanks [@abidlabs](https://github.com/abidlabs)! + +## 3.44.2 + +### Fixes + +- [#5537](https://github.com/gradio-app/gradio/pull/5537) [`301c7878`](https://github.com/gradio-app/gradio/commit/301c7878217f9fc531c0f28330b394f02955811b) - allow gr.Image() examples to take urls. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5544](https://github.com/gradio-app/gradio/pull/5544) [`a0cc9ac9`](https://github.com/gradio-app/gradio/commit/a0cc9ac931554e06dcb091158c9b9ac0cc580b6c) - Fixes dropdown breaking if a user types in invalid value and presses enter. Thanks [@abidlabs](https://github.com/abidlabs)! + +## 3.44.1 + +### Fixes + +- [#5516](https://github.com/gradio-app/gradio/pull/5516) [`c5fe8eba`](https://github.com/gradio-app/gradio/commit/c5fe8ebadbf206e2f4199ccde4606e331a22148a) - Fix docstring of dropdown. Thanks [@hysts](https://github.com/hysts)! +- [#5529](https://github.com/gradio-app/gradio/pull/5529) [`81c9ca9a`](https://github.com/gradio-app/gradio/commit/81c9ca9a2e00d19334f632fec32081d36ad54c7f) - Fix `.update()` method in `gr.Dropdown()` to handle `choices`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5528](https://github.com/gradio-app/gradio/pull/5528) [`dc86e4a7`](https://github.com/gradio-app/gradio/commit/dc86e4a7e1c40b910c74558e6f88fddf9b3292bc) - Lazy load all images. Thanks [@aliabid94](https://github.com/aliabid94)! +- [#5525](https://github.com/gradio-app/gradio/pull/5525) [`21f1db40`](https://github.com/gradio-app/gradio/commit/21f1db40de6d1717eba97a550e11422a457ba7e9) - Ensure input value saves on dropdown blur. Thanks [@hannahblair](https://github.com/hannahblair)! + +## 3.44.0 + +### Features + +- [#5505](https://github.com/gradio-app/gradio/pull/5505) [`9ee20f49`](https://github.com/gradio-app/gradio/commit/9ee20f499f62c1fe5af6b8f84918b3a334eb1c8d) - Validate i18n file names with ISO-639x. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5475](https://github.com/gradio-app/gradio/pull/5475) [`c60b89b0`](https://github.com/gradio-app/gradio/commit/c60b89b0a54758a27277f0a6aa20d0653647c7c8) - Adding Central Kurdish. Thanks [@Hrazhan](https://github.com/Hrazhan)! +- [#5400](https://github.com/gradio-app/gradio/pull/5400) [`d112e261`](https://github.com/gradio-app/gradio/commit/d112e2611b0fc79ecedfaed367571f3157211387) - Allow interactive input in `gr.HighlightedText`. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5488](https://github.com/gradio-app/gradio/pull/5488) [`8909e42a`](https://github.com/gradio-app/gradio/commit/8909e42a7c6272358ad413588d27a5124d151205) - Adds `autoscroll` param to `gr.Textbox()`. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! +- [#5384](https://github.com/gradio-app/gradio/pull/5384) [`ddc02268`](https://github.com/gradio-app/gradio/commit/ddc02268f731bd2ed04b7a5854accf3383f9a0da) - Allows the `gr.Dropdown` to have separate names and values, as well as enables `allow_custom_value` for multiselect dropdown. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5473](https://github.com/gradio-app/gradio/pull/5473) [`b271e738`](https://github.com/gradio-app/gradio/commit/b271e738860ca238ecdee2991f49b505c7559016) - Remove except asyncio.CancelledError which is no longer necessary due to 53d7025. Thanks [@whitphx](https://github.com/whitphx)! +- [#5474](https://github.com/gradio-app/gradio/pull/5474) [`041560f9`](https://github.com/gradio-app/gradio/commit/041560f9f11ca2560005b467bb412ee1becfc2b2) - Fix queueing.call_prediction to retrieve the default response class in the same manner as FastAPI's implementation. Thanks [@whitphx](https://github.com/whitphx)! +- [#5510](https://github.com/gradio-app/gradio/pull/5510) [`afcf3c48`](https://github.com/gradio-app/gradio/commit/afcf3c48e82712067d6d00a0caedb1562eb986f8) - Do not expose existence of files outside of working directory. Thanks [@abidlabs](https://github.com/abidlabs)! + +### Fixes + +- [#5459](https://github.com/gradio-app/gradio/pull/5459) [`bd2fda77`](https://github.com/gradio-app/gradio/commit/bd2fda77fc98d815f4fb670f535af453ebee9b80) - Dispatch `stop_recording` event in Audio. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5508](https://github.com/gradio-app/gradio/pull/5508) [`05715f55`](https://github.com/gradio-app/gradio/commit/05715f5599ae3e928d3183c7b0a7f5291f843a96) - Adds a `filterable` parameter to `gr.Dropdown` that controls whether user can type to filter choices. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5470](https://github.com/gradio-app/gradio/pull/5470) [`a4e010a9`](https://github.com/gradio-app/gradio/commit/a4e010a96f1d8a52b3ac645e03fe472b9c3cbbb1) - Fix share button position. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! +- [#5496](https://github.com/gradio-app/gradio/pull/5496) [`82ec4d26`](https://github.com/gradio-app/gradio/commit/82ec4d2622a43c31b248b78e9410e2ac918f6035) - Allow interface with components to be run inside blocks. Thanks [@abidlabs](https://github.com/abidlabs)! + +## 3.43.2 + +### Fixes + +- [#5456](https://github.com/gradio-app/gradio/pull/5456) [`6e381c4f`](https://github.com/gradio-app/gradio/commit/6e381c4f146cc8177a4e2b8e39f914f09cd7ff0c) - ensure dataframe doesn't steal focus. Thanks [@pngwn](https://github.com/pngwn)! + +## 3.43.1 + +### Fixes + +- [#5445](https://github.com/gradio-app/gradio/pull/5445) [`67bb7bcb`](https://github.com/gradio-app/gradio/commit/67bb7bcb6a95b7a00a8bdf612cf147850d919a44) - ensure dataframe doesn't scroll unless needed. Thanks [@pngwn](https://github.com/pngwn)! +- [#5447](https://github.com/gradio-app/gradio/pull/5447) [`7a4a89e5`](https://github.com/gradio-app/gradio/commit/7a4a89e5ca1dedb39e5366867501584b0c636bbb) - ensure iframe is correct size on spaces. Thanks [@pngwn](https://github.com/pngwn)! + +## 3.43.0 + +### Features + +- [#5165](https://github.com/gradio-app/gradio/pull/5165) [`c77f05ab`](https://github.com/gradio-app/gradio/commit/c77f05abb65b2828c9c19af4ec0a0c09412f9f6a) - Fix the Queue to call API endpoints without internal HTTP routing. Thanks [@whitphx](https://github.com/whitphx)! +- [#5427](https://github.com/gradio-app/gradio/pull/5427) [`aad7acd7`](https://github.com/gradio-app/gradio/commit/aad7acd7128dca05b227ecbba06db9f94d65b088) - Add sort to bar plot. Thanks [@Chaitanya134](https://github.com/Chaitanya134)! +- [#5342](https://github.com/gradio-app/gradio/pull/5342) [`afac0006`](https://github.com/gradio-app/gradio/commit/afac0006337ce2840cf497cd65691f2f60ee5912) - significantly improve the performance of `gr.Dataframe` for large datasets. Thanks [@pngwn](https://github.com/pngwn)! +- [#5417](https://github.com/gradio-app/gradio/pull/5417) [`d14d63e3`](https://github.com/gradio-app/gradio/commit/d14d63e30c4af3f9c2a664fd11b0a01943a8300c) - Auto scroll to bottom of textbox. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! + +### Fixes + +- [#5412](https://github.com/gradio-app/gradio/pull/5412) [`26fef8c7`](https://github.com/gradio-app/gradio/commit/26fef8c7f85a006c7e25cdbed1792df19c512d02) - Skip view_api request in js client when auth enabled. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! +- [#5436](https://github.com/gradio-app/gradio/pull/5436) [`7ab4b70f`](https://github.com/gradio-app/gradio/commit/7ab4b70f6821afb4e85cef225d1235c19df8ebbf) - api_open does not take precedence over show_api. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! + +## 3.42.0 + +### Highlights + +#### Like/Dislike Button for Chatbot ([#5391](https://github.com/gradio-app/gradio/pull/5391) [`abf1c57d`](https://github.com/gradio-app/gradio/commit/abf1c57d7d85de0df233ee3b38aeb38b638477db)) + + Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! + +#### Added the ability to attach event listeners via decorators ([#5395](https://github.com/gradio-app/gradio/pull/5395) [`55fed04f`](https://github.com/gradio-app/gradio/commit/55fed04f559becb9c24f22cc6292dc572d709886)) + +e.g. + +```python +with gr.Blocks() as demo: + name = gr.Textbox(label="Name") + output = gr.Textbox(label="Output Box") + greet_btn = gr.Button("Greet") + + @greet_btn.click(inputs=name, outputs=output) + def greet(name): + return "Hello " + name + "!" +``` + + Thanks [@aliabid94](https://github.com/aliabid94)! + +### Features + +- [#5334](https://github.com/gradio-app/gradio/pull/5334) [`c5bf9138`](https://github.com/gradio-app/gradio/commit/c5bf91385a632dc9f612499ee01166ac6ae509a9) - Add chat bubble width param. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! +- [#5267](https://github.com/gradio-app/gradio/pull/5267) [`119c8343`](https://github.com/gradio-app/gradio/commit/119c834331bfae60d4742c8f20e9cdecdd67e8c2) - Faster reload mode. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! +- [#5373](https://github.com/gradio-app/gradio/pull/5373) [`79d8f9d8`](https://github.com/gradio-app/gradio/commit/79d8f9d891901683c5a1b7486efb44eab2478c96) - Adds `height` and `zoom_speed` parameters to `Model3D` component, as well as a button to reset the camera position. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5370](https://github.com/gradio-app/gradio/pull/5370) [`61803c65`](https://github.com/gradio-app/gradio/commit/61803c6545e73fce47e8740bd46721ab9bb0ba5c) - chore(deps): update dependency extendable-media-recorder to v9. Thanks [@renovate](https://github.com/apps/renovate)! +- [#5266](https://github.com/gradio-app/gradio/pull/5266) [`4ccb9a86`](https://github.com/gradio-app/gradio/commit/4ccb9a86f194c6997f80a09880edc3c2b0554aab) - Makes it possible to set the initial camera position for the `Model3D` component as a tuple of (alpha, beta, radius). Thanks [@mbahri](https://github.com/mbahri)! +- [#5271](https://github.com/gradio-app/gradio/pull/5271) [`97c3c7b1`](https://github.com/gradio-app/gradio/commit/97c3c7b1730407f9e80566af9ecb4ca7cccf62ff) - Move scripts from old website to CI. Thanks [@aliabd](https://github.com/aliabd)! +- [#5369](https://github.com/gradio-app/gradio/pull/5369) [`b8968898`](https://github.com/gradio-app/gradio/commit/b89688984fa9c6be0db06e392e6935a544620764) - Fix typo in utils.py. Thanks [@eltociear](https://github.com/eltociear)! + +### Fixes + +- [#5304](https://github.com/gradio-app/gradio/pull/5304) [`05892302`](https://github.com/gradio-app/gradio/commit/05892302fb8fe2557d57834970a2b65aea97355b) - Adds kwarg to disable html sanitization in `gr.Chatbot()`. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! +- [#5366](https://github.com/gradio-app/gradio/pull/5366) [`0cc7e2dc`](https://github.com/gradio-app/gradio/commit/0cc7e2dcf60e216e0a30e2f85a9879ce3cb2a1bd) - Hide avatar when message none. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! +- [#5393](https://github.com/gradio-app/gradio/pull/5393) [`e4e7a431`](https://github.com/gradio-app/gradio/commit/e4e7a4319924aaf51dcb18d07d0c9953d4011074) - Renders LaTeX that is added to the page in `gr.Markdown`, `gr.Chatbot`, and `gr.DataFrame`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5394](https://github.com/gradio-app/gradio/pull/5394) [`4d94ea0a`](https://github.com/gradio-app/gradio/commit/4d94ea0a0cf2103cda19f48398a5634f8341d04d) - Adds horizontal scrolling to content that overflows in gr.Markdown. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5368](https://github.com/gradio-app/gradio/pull/5368) [`b27f7583`](https://github.com/gradio-app/gradio/commit/b27f7583254165b135bf1496a7d8c489a62ba96f) - Change markdown rendering to set breaks to false. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5360](https://github.com/gradio-app/gradio/pull/5360) [`64666525`](https://github.com/gradio-app/gradio/commit/6466652583e3c620df995fb865ef3511a34cb676) - Cancel Dropdown Filter. Thanks [@deckar01](https://github.com/deckar01)! + +## 3.41.2 + +### Features + +- [#5284](https://github.com/gradio-app/gradio/pull/5284) [`5f25eb68`](https://github.com/gradio-app/gradio/commit/5f25eb6836f6a78ce6208b53495a01e1fc1a1d2f) - Minor bug fix sweep. Thanks [@aliabid94](https://github.com/aliabid94)!/n - Our use of __exit__ was catching errors and corrupting the traceback of any component that failed to instantiate (try running blocks_kitchen_sink off main for an example). Now the __exit__ exits immediately if there's been an exception, so the original exception can be printed cleanly/n - HighlightedText was rendering weird, cleaned it up + +### Fixes + +- [#5319](https://github.com/gradio-app/gradio/pull/5319) [`3341148c`](https://github.com/gradio-app/gradio/commit/3341148c109b5458cc88435d27eb154210efc472) - Fix: wrap avatar-image in a div to clip its shape. Thanks [@Keldos-Li](https://github.com/Keldos-Li)! +- [#5340](https://github.com/gradio-app/gradio/pull/5340) [`df090e89`](https://github.com/gradio-app/gradio/commit/df090e89f74a16e4cb2b700a1e3263cabd2bdd91) - Fix Checkbox select dispatch. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! + +## 3.41.1 + +### Fixes + +- [#5324](https://github.com/gradio-app/gradio/pull/5324) [`31996c99`](https://github.com/gradio-app/gradio/commit/31996c991d6bfca8cef975eb8e3c9f61a7aced19) - ensure login form has correct styles. Thanks [@pngwn](https://github.com/pngwn)! +- [#5323](https://github.com/gradio-app/gradio/pull/5323) [`e32b0928`](https://github.com/gradio-app/gradio/commit/e32b0928d2d00342ca917ebb10c379ffc2ec200d) - ensure dropdown stays open when identical data is passed in. Thanks [@pngwn](https://github.com/pngwn)! + +## 3.41.0 + +### Highlights + +#### Improve startup performance and markdown support ([#5279](https://github.com/gradio-app/gradio/pull/5279) [`fe057300`](https://github.com/gradio-app/gradio/commit/fe057300f0672c62dab9d9b4501054ac5d45a4ec)) + +##### Improved markdown support + +We now have better support for markdown in `gr.Markdown` and `gr.Dataframe`. Including syntax highlighting and Github Flavoured Markdown. We also have more consistent markdown behaviour and styling. + +##### Various performance improvements + +These improvements will be particularly beneficial to large applications. + +- Rather than attaching events manually, they are now delegated, leading to a significant performance improvement and addressing a performance regression introduced in a recent version of Gradio. App startup for large applications is now around twice as fast. +- Optimised the mounting of individual components, leading to a modest performance improvement during startup (~30%). +- Corrected an issue that was causing markdown to re-render infinitely. +- Ensured that the `gr.3DModel` does re-render prematurely. + + Thanks [@pngwn](https://github.com/pngwn)! + +#### Enable streaming audio in python client ([#5248](https://github.com/gradio-app/gradio/pull/5248) [`390624d8`](https://github.com/gradio-app/gradio/commit/390624d8ad2b1308a5bf8384435fd0db98d8e29e)) + +The `gradio_client` now supports streaming file outputs 🌊 + +No new syntax! Connect to a gradio demo that supports streaming file outputs and call `predict` or `submit` as you normally would. + +```python +import gradio_client as grc +client = grc.Client("gradio/stream_audio_out") + +# Get the entire generated audio as a local file +client.predict("/Users/freddy/Pictures/bark_demo.mp4", api_name="/predict") + +job = client.submit("/Users/freddy/Pictures/bark_demo.mp4", api_name="/predict") + +# Get the entire generated audio as a local file +job.result() + +# Each individual chunk +job.outputs() +``` + + Thanks [@freddyaboulton](https://github.com/freddyaboulton)! + +#### Add `render` function to `` ([#5158](https://github.com/gradio-app/gradio/pull/5158) [`804fcc05`](https://github.com/gradio-app/gradio/commit/804fcc058e147f283ece67f1f353874e26235535)) + +We now have an event `render` on the web component, which is triggered once the embedded space has finished rendering. + +```html + +``` + + Thanks [@hannahblair](https://github.com/hannahblair)! + +### Features + +- [#5268](https://github.com/gradio-app/gradio/pull/5268) [`f49028cf`](https://github.com/gradio-app/gradio/commit/f49028cfe3e21097001ddbda71c560b3d8b42e1c) - Move markdown & latex processing to the frontend for the gr.Markdown and gr.DataFrame components. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5215](https://github.com/gradio-app/gradio/pull/5215) [`fbdad78a`](https://github.com/gradio-app/gradio/commit/fbdad78af4c47454cbb570f88cc14bf4479bbceb) - Lazy load interactive or static variants of a component individually, rather than loading both variants regardless. This change will improve performance for many applications. Thanks [@pngwn](https://github.com/pngwn)! +- [#5216](https://github.com/gradio-app/gradio/pull/5216) [`4b58ea6d`](https://github.com/gradio-app/gradio/commit/4b58ea6d98e7a43b3f30d8a4cb6f379bc2eca6a8) - Update i18n tokens and locale files. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5283](https://github.com/gradio-app/gradio/pull/5283) [`a7460557`](https://github.com/gradio-app/gradio/commit/a74605572dd0d6bb41df6b38b120d656370dd67d) - Add height parameter and scrolling to `gr.Dataframe`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5232](https://github.com/gradio-app/gradio/pull/5232) [`c57d4c23`](https://github.com/gradio-app/gradio/commit/c57d4c232a97e03b4671f9e9edc3af456438fe89) - `gr.Radio` and `gr.CheckboxGroup` can now accept different names and values. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5219](https://github.com/gradio-app/gradio/pull/5219) [`e8fd4e4e`](https://github.com/gradio-app/gradio/commit/e8fd4e4ec68a6c974bc8c84b61f4a0ec50a85bc6) - Add `api_name` parameter to `gr.Interface`. Additionally, completely hide api page if show_api=False. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! +- [#5280](https://github.com/gradio-app/gradio/pull/5280) [`a2f42e28`](https://github.com/gradio-app/gradio/commit/a2f42e28bd793bce4bed6d54164bb2a327a46fd5) - Allow updating the label of `gr.UpdateButton`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5112](https://github.com/gradio-app/gradio/pull/5112) [`1cefee7f`](https://github.com/gradio-app/gradio/commit/1cefee7fc05175aca23ba04b3a3fda7b97f49bf0) - chore(deps): update dependency marked to v7. Thanks [@renovate](https://github.com/apps/renovate)! +- [#5260](https://github.com/gradio-app/gradio/pull/5260) [`a773eaf7`](https://github.com/gradio-app/gradio/commit/a773eaf7504abb53b99885b3454dc1e027adbb42) - Stop passing inputs and preprocessing on iterators. Thanks [@aliabid94](https://github.com/aliabid94)! +- [#4943](https://github.com/gradio-app/gradio/pull/4943) [`947d615d`](https://github.com/gradio-app/gradio/commit/947d615db6f76519d0e8bc0d1a0d7edf89df267b) - Sign in with Hugging Face (OAuth support). Thanks [@Wauplin](https://github.com/Wauplin)! +- [#5298](https://github.com/gradio-app/gradio/pull/5298) [`cf167cd1`](https://github.com/gradio-app/gradio/commit/cf167cd1dd4acd9aee225ff1cb6fac0e849806ba) - Create event listener table for components on docs. Thanks [@aliabd](https://github.com/aliabd)! +- [#5173](https://github.com/gradio-app/gradio/pull/5173) [`730f0c1d`](https://github.com/gradio-app/gradio/commit/730f0c1d54792eb11359e40c9f2326e8a6e39203) - Ensure gradio client works as expected for functions that return nothing. Thanks [@raymondtri](https://github.com/raymondtri)! +- [#5188](https://github.com/gradio-app/gradio/pull/5188) [`b22e1888`](https://github.com/gradio-app/gradio/commit/b22e1888fcf0843520525c1e4b7e1fe73fdeb948) - Fix the images in the theme builder to use permanent URI. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5221](https://github.com/gradio-app/gradio/pull/5221) [`f344592a`](https://github.com/gradio-app/gradio/commit/f344592aeb1658013235ded154107f72d86f24e7) - Allows setting a height to `gr.File` and improves the UI of the component. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5265](https://github.com/gradio-app/gradio/pull/5265) [`06982212`](https://github.com/gradio-app/gradio/commit/06982212dfbd613853133d5d0eebd75577967027) - Removes scrollbar from File preview when not needed. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5305](https://github.com/gradio-app/gradio/pull/5305) [`15075241`](https://github.com/gradio-app/gradio/commit/15075241fa7ad3f7fd9ae2a91e54faf8f19a46f9) - Rotate axes labels on LinePlot, BarPlot, and ScatterPlot. Thanks [@Faiga91](https://github.com/Faiga91)! +- [#5258](https://github.com/gradio-app/gradio/pull/5258) [`92282cea`](https://github.com/gradio-app/gradio/commit/92282cea6afdf7e9930ece1046d8a63be34b3cea) - Chatbot Avatar Images. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! +- [#5244](https://github.com/gradio-app/gradio/pull/5244) [`b3e50db9`](https://github.com/gradio-app/gradio/commit/b3e50db92f452f376aa2cc081326d40bb69d6dd7) - Remove aiohttp dependency. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! +- [#5264](https://github.com/gradio-app/gradio/pull/5264) [`46a2b600`](https://github.com/gradio-app/gradio/commit/46a2b600a7ff030a9ea1560b882b3bf3ad266bbc) - ensure translations for audio work correctly. Thanks [@hannahblair](https://github.com/hannahblair)! + +### Fixes + +- [#5256](https://github.com/gradio-app/gradio/pull/5256) [`933db53e`](https://github.com/gradio-app/gradio/commit/933db53e93a1229fdf149556d61da5c4c7e1a331) - Better handling of empty dataframe in `gr.DataFrame`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5242](https://github.com/gradio-app/gradio/pull/5242) [`2b397791`](https://github.com/gradio-app/gradio/commit/2b397791fe2059e4beb72937ff0436f2d4d28b4b) - Fix message text overflow onto copy button in `gr.Chatbot`. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5253](https://github.com/gradio-app/gradio/pull/5253) [`ddac7e4d`](https://github.com/gradio-app/gradio/commit/ddac7e4d0f55c3bdc6c3e9a9e24588b2563e4049) - Ensure File component uploads files to the server. Thanks [@pngwn](https://github.com/pngwn)! +- [#5179](https://github.com/gradio-app/gradio/pull/5179) [`6fb92b48`](https://github.com/gradio-app/gradio/commit/6fb92b48a916104db573602011a448b904d42e5e) - Fixes audio streaming issues. Thanks [@aliabid94](https://github.com/aliabid94)! +- [#5295](https://github.com/gradio-app/gradio/pull/5295) [`7b8fa8aa`](https://github.com/gradio-app/gradio/commit/7b8fa8aa58f95f5046b9add64b40368bd3f1b700) - Allow caching examples with streamed output. Thanks [@aliabid94](https://github.com/aliabid94)! +- [#5285](https://github.com/gradio-app/gradio/pull/5285) [`cdfd4217`](https://github.com/gradio-app/gradio/commit/cdfd42174a9c777eaee9c1209bf8e90d8c7791f2) - Tweaks to `icon` parameter in `gr.Button()`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5122](https://github.com/gradio-app/gradio/pull/5122) [`3b805346`](https://github.com/gradio-app/gradio/commit/3b8053469aca6c7a86a6731e641e4400fc34d7d3) - Allows code block in chatbot to scroll horizontally. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! +- [#5312](https://github.com/gradio-app/gradio/pull/5312) [`f769cb67`](https://github.com/gradio-app/gradio/commit/f769cb67149d8e209091508f06d87014acaed965) - only start listening for events after the components are mounted. Thanks [@pngwn](https://github.com/pngwn)! +- [#5254](https://github.com/gradio-app/gradio/pull/5254) [`c39f06e1`](https://github.com/gradio-app/gradio/commit/c39f06e16b9feea97984e4822df35a99c807461c) - Fix `.update()` for `gr.Radio()` and `gr.CheckboxGroup()`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5231](https://github.com/gradio-app/gradio/pull/5231) [`87f1c2b4`](https://github.com/gradio-app/gradio/commit/87f1c2b4ac7c685c43477215fa5b96b6cbeffa05) - Allow `gr.Interface.from_pipeline()` and `gr.load()` to work within `gr.Blocks()`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5238](https://github.com/gradio-app/gradio/pull/5238) [`de23e9f7`](https://github.com/gradio-app/gradio/commit/de23e9f7d67e685e791faf48a21f34121f6d094a) - Improve audio streaming. Thanks [@aliabid94](https://github.com/aliabid94)!/n - Proper audio streaming with WAV files. We now do the proper processing to stream out wav files as a single stream of audio without any cracks in the seams./n - Audio streaming with bytes. Stream any audio type by yielding out bytes, and it should work flawlessly. +- [#5313](https://github.com/gradio-app/gradio/pull/5313) [`54bcb724`](https://github.com/gradio-app/gradio/commit/54bcb72417b2781ad9d7500ea0f89aa9d80f7d8f) - Restores missing part of bottom border on file component. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5235](https://github.com/gradio-app/gradio/pull/5235) [`1ecf88ac`](https://github.com/gradio-app/gradio/commit/1ecf88ac5f20bc5a1c91792d1a68559575e6afd7) - fix #5229. Thanks [@breengles](https://github.com/breengles)! +- [#5276](https://github.com/gradio-app/gradio/pull/5276) [`502f1015`](https://github.com/gradio-app/gradio/commit/502f1015bf23b365bc32446dd2e549b0c5d0dc72) - Ensure `Blocks` translation copy renders correctly. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5296](https://github.com/gradio-app/gradio/pull/5296) [`a0f22626`](https://github.com/gradio-app/gradio/commit/a0f22626f2aff297754414bbc83d5c4cfe086ea0) - `make_waveform()` twitter video resolution fix. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! + +## 3.40.0 + +### Highlights + +#### Client.predict will now return the final output for streaming endpoints ([#5057](https://github.com/gradio-app/gradio/pull/5057) [`35856f8b`](https://github.com/gradio-app/gradio/commit/35856f8b54548cae7bd3b8d6a4de69e1748283b2)) + +### This is a breaking change (for gradio_client only)! + +Previously, `Client.predict` would only return the first output of an endpoint that streamed results. This was causing confusion for developers that wanted to call these streaming demos via the client. + +We realize that developers using the client don't know the internals of whether a demo streams or not, so we're changing the behavior of predict to match developer expectations. + +Using `Client.predict` will now return the final output of a streaming endpoint. This will make it even easier to use gradio apps via the client. + + Thanks [@freddyaboulton](https://github.com/freddyaboulton)! + +#### Gradio now supports streaming audio outputs + +Allows users to use generators to stream audio out, yielding consecutive chunks of audio. Requires `streaming=True` to be set on the output audio. + +```python +import gradio as gr +from pydub import AudioSegment + +def stream_audio(audio_file): + audio = AudioSegment.from_mp3(audio_file) + i = 0 + chunk_size = 3000 + + while chunk_size*i < len(audio): + chunk = audio[chunk_size*i:chunk_size*(i+1)] + i += 1 + if chunk: + file = f"/tmp/{i}.mp3" + chunk.export(file, format="mp3") + yield file + +demo = gr.Interface( + fn=stream_audio, + inputs=gr.Audio(type="filepath", label="Audio file to stream"), + outputs=gr.Audio(autoplay=True, streaming=True), +) + +demo.queue().launch() +``` + +From the backend, streamed outputs are served from the `/stream/` endpoint instead of the `/file/` endpoint. Currently just used to serve audio streaming output. The output JSON will have `is_stream`: `true`, instead of `is_file`: `true` in the file data object. Thanks [@aliabid94](https://github.com/aliabid94)! + +### Features + +- [#5081](https://github.com/gradio-app/gradio/pull/5081) [`d7f83823`](https://github.com/gradio-app/gradio/commit/d7f83823fbd7604456b0127d689a63eed759807d) - solve how can I config root_path dynamically? #4968. Thanks [@eastonsuo](https://github.com/eastonsuo)! +- [#5025](https://github.com/gradio-app/gradio/pull/5025) [`6693660a`](https://github.com/gradio-app/gradio/commit/6693660a790996f8f481feaf22a8c49130d52d89) - Add download button to selected images in `Gallery`. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5133](https://github.com/gradio-app/gradio/pull/5133) [`61129052`](https://github.com/gradio-app/gradio/commit/61129052ed1391a75c825c891d57fa0ad6c09fc8) - Update dependency esbuild to ^0.19.0. Thanks [@renovate](https://github.com/apps/renovate)! +- [#5125](https://github.com/gradio-app/gradio/pull/5125) [`80be7a1c`](https://github.com/gradio-app/gradio/commit/80be7a1ca44c0adef1668367b2cf36b65e52e576) - chatbot conversation nodes can contain a copy button. Thanks [@fazpu](https://github.com/fazpu)! +- [#5048](https://github.com/gradio-app/gradio/pull/5048) [`0b74a159`](https://github.com/gradio-app/gradio/commit/0b74a1595b30df744e32a2c358c07acb7fd1cfe5) - Use `importlib` in favor of deprecated `pkg_resources`. Thanks [@jayceslesar](https://github.com/jayceslesar)! +- [#5045](https://github.com/gradio-app/gradio/pull/5045) [`3b9494f5`](https://github.com/gradio-app/gradio/commit/3b9494f5c57e6b52e6a040ce8d6b5141f780e84d) - Lite: Fix the analytics module to use asyncio to work in the Wasm env. Thanks [@whitphx](https://github.com/whitphx)! +- [#5046](https://github.com/gradio-app/gradio/pull/5046) [`5244c587`](https://github.com/gradio-app/gradio/commit/5244c5873c355cf3e2f0acb7d67fda3177ef8b0b) - Allow new lines in `HighlightedText` with `/n` and preserve whitespace. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5076](https://github.com/gradio-app/gradio/pull/5076) [`2745075a`](https://github.com/gradio-app/gradio/commit/2745075a26f80e0e16863d483401ff1b6c5ada7a) - Add deploy_discord to docs. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! +- [#5116](https://github.com/gradio-app/gradio/pull/5116) [`0dc49b4c`](https://github.com/gradio-app/gradio/commit/0dc49b4c517706f572240f285313a881089ced79) - Add support for async functions and async generators to `gr.ChatInterface`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5047](https://github.com/gradio-app/gradio/pull/5047) [`883ac364`](https://github.com/gradio-app/gradio/commit/883ac364f69d92128774ac446ce49bdf8415fd7b) - Add `step` param to `Number`. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5137](https://github.com/gradio-app/gradio/pull/5137) [`22aa5eba`](https://github.com/gradio-app/gradio/commit/22aa5eba3fee3f14473e4b0fac29cf72fe31ef04) - Use font size `--text-md` for `` in Chatbot messages. Thanks [@jaywonchung](https://github.com/jaywonchung)! +- [#5005](https://github.com/gradio-app/gradio/pull/5005) [`f5539c76`](https://github.com/gradio-app/gradio/commit/f5539c7618e31451420bd3228754774da14dc65f) - Enhancement: Add focus event to textbox and number component. Thanks [@JodyZ0203](https://github.com/JodyZ0203)! +- [#5104](https://github.com/gradio-app/gradio/pull/5104) [`34f6b22e`](https://github.com/gradio-app/gradio/commit/34f6b22efbfedfa569d452f3f99ed2e6593e3c21) - Strip leading and trailing spaces from username in login route. Thanks [@sweep-ai](https://github.com/apps/sweep-ai)! +- [#5149](https://github.com/gradio-app/gradio/pull/5149) [`144df459`](https://github.com/gradio-app/gradio/commit/144df459a3b7895e524defcfc4c03fbb8b083aca) - Add `show_edit_button` param to `gr.Audio`. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5136](https://github.com/gradio-app/gradio/pull/5136) [`eaa1ce14`](https://github.com/gradio-app/gradio/commit/eaa1ce14ac41de1c23321e93f11f1b03a2f3c7f4) - Enhancing Tamil Translation: Language Refinement 🌟. Thanks [@sanjaiyan-dev](https://github.com/sanjaiyan-dev)! +- [#5035](https://github.com/gradio-app/gradio/pull/5035) [`8b4eb8ca`](https://github.com/gradio-app/gradio/commit/8b4eb8cac9ea07bde31b44e2006ca2b7b5f4de36) - JS Client: Fixes cannot read properties of null (reading 'is_file'). Thanks [@raymondtri](https://github.com/raymondtri)! +- [#5023](https://github.com/gradio-app/gradio/pull/5023) [`e6317d77`](https://github.com/gradio-app/gradio/commit/e6317d77f87d3dad638acca3dbc4a9228570e63c) - Update dependency extendable-media-recorder to v8. Thanks [@renovate](https://github.com/apps/renovate)! +- [#5085](https://github.com/gradio-app/gradio/pull/5085) [`13e47835`](https://github.com/gradio-app/gradio/commit/13e478353532c4af18cfa50772f8b6fb3c6c9818) - chore(deps): update dependency extendable-media-recorder to v8. Thanks [@renovate](https://github.com/apps/renovate)! +- [#5080](https://github.com/gradio-app/gradio/pull/5080) [`37caa2e0`](https://github.com/gradio-app/gradio/commit/37caa2e0fe95d6cab8beb174580fb557904f137f) - Add icon and link params to `gr.Button`. Thanks [@hannahblair](https://github.com/hannahblair)! + +### Fixes + +- [#5062](https://github.com/gradio-app/gradio/pull/5062) [`7d897165`](https://github.com/gradio-app/gradio/commit/7d89716519d0751072792c9bbda668ffeb597296) - `gr.Dropdown` now has correct behavior in static mode as well as when an option is selected. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5077](https://github.com/gradio-app/gradio/pull/5077) [`667875b2`](https://github.com/gradio-app/gradio/commit/667875b2441753e74d25bd9d3c8adedd8ede11cd) - Live audio streaming output +- [#5118](https://github.com/gradio-app/gradio/pull/5118) [`1b017e68`](https://github.com/gradio-app/gradio/commit/1b017e68f6a9623cc2ec085bd20e056229552028) - Add `interactive` args to `gr.ColorPicker`. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5114](https://github.com/gradio-app/gradio/pull/5114) [`56d2609d`](https://github.com/gradio-app/gradio/commit/56d2609de93387a75dc82b1c06c1240c5b28c0b8) - Reset textbox value to empty string when value is None. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#5075](https://github.com/gradio-app/gradio/pull/5075) [`67265a58`](https://github.com/gradio-app/gradio/commit/67265a58027ef1f9e4c0eb849a532f72eaebde48) - Allow supporting >1000 files in `gr.File()` and `gr.UploadButton()`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5135](https://github.com/gradio-app/gradio/pull/5135) [`80727bbe`](https://github.com/gradio-app/gradio/commit/80727bbe2c6d631022054edf01515017691b3bdd) - Fix dataset features and dataset preview for HuggingFaceDatasetSaver. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! +- [#5039](https://github.com/gradio-app/gradio/pull/5039) [`620e4645`](https://github.com/gradio-app/gradio/commit/620e46452729d6d4877b3fab84a65daf2f2b7bc6) - `gr.Dropdown()` now supports values with arbitrary characters and doesn't clear value when re-focused. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5061](https://github.com/gradio-app/gradio/pull/5061) [`136adc9c`](https://github.com/gradio-app/gradio/commit/136adc9ccb23e5cb4d02d2e88f23f0b850041f98) - Ensure `gradio_client` is backwards compatible with `gradio==3.24.1`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5129](https://github.com/gradio-app/gradio/pull/5129) [`97d804c7`](https://github.com/gradio-app/gradio/commit/97d804c748be9acfe27b8369dd2d64d61f43c2e7) - [Spaces] ZeroGPU Queue fix. Thanks [@cbensimon](https://github.com/cbensimon)! +- [#5140](https://github.com/gradio-app/gradio/pull/5140) [`cd1353fa`](https://github.com/gradio-app/gradio/commit/cd1353fa3eb1b015f5860ca5d5a8e8d1aa4a831c) - Fixes the display of minutes in the video player. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5111](https://github.com/gradio-app/gradio/pull/5111) [`b84a35b7`](https://github.com/gradio-app/gradio/commit/b84a35b7b91eca947f787648ceb361b1d023427b) - Add icon and link to DuplicateButton. Thanks [@aliabd](https://github.com/aliabd)! +- [#5030](https://github.com/gradio-app/gradio/pull/5030) [`f6c491b0`](https://github.com/gradio-app/gradio/commit/f6c491b079d335af633dd854c68eb26f9e61c552) - highlightedtext throws an error basing on model. Thanks [@rajeunoia](https://github.com/rajeunoia)! + +## 3.39.0 + +### Highlights + +#### Create Discord Bots from Gradio Apps 🤖 ([#4960](https://github.com/gradio-app/gradio/pull/4960) [`46e4ef67`](https://github.com/gradio-app/gradio/commit/46e4ef67d287dd68a91473b73172b29cbad064bc)) + +We're excited to announce that Gradio can now automatically create a discord bot from any `gr.ChatInterface` app. + +It's as easy as importing `gradio_client`, connecting to the app, and calling `deploy_discord`! + +_🦙 Turning Llama 2 70b into a discord bot 🦙_ + +```python +import gradio_client as grc +grc.Client("ysharma/Explore_llamav2_with_TGI").deploy_discord(to_id="llama2-70b-discord-bot") +``` + + + +#### Getting started with template spaces + +To help get you started, we have created an organization on Hugging Face called [gradio-discord-bots](https://huggingface.co/gradio-discord-bots) with template spaces you can use to turn state of the art LLMs powered by Gradio to discord bots. + +Currently we have template spaces for: + +- [Llama-2-70b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-70b-chat-hf) powered by a FREE Hugging Face Inference Endpoint! +- [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-13b-chat-hf) powered by Hugging Face Inference Endpoints. +- [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/llama-2-13b-chat-transformers) powered by Hugging Face transformers. +- [falcon-7b-instruct](https://huggingface.co/spaces/gradio-discord-bots/falcon-7b-instruct) powered by Hugging Face Inference Endpoints. +- [gpt-3.5-turbo](https://huggingface.co/spaces/gradio-discord-bots/gpt-35-turbo), powered by openai. Requires an OpenAI key. + +But once again, you can deploy ANY `gr.ChatInterface` app exposed on the internet! So don't hesitate to try it on your own Chatbots. + +❗️ Additional Note ❗️: Technically, any gradio app that exposes an api route that takes in a single string and outputs a single string can be deployed to discord. But `gr.ChatInterface` apps naturally lend themselves to discord's chat functionality so we suggest you start with those. + +Thanks [@freddyaboulton](https://github.com/freddyaboulton)! + +### Features + +- [#4995](https://github.com/gradio-app/gradio/pull/4995) [`3f8c210b`](https://github.com/gradio-app/gradio/commit/3f8c210b01ef1ceaaf8ee73be4bf246b5b745bbf) - Implement left and right click in `Gallery` component and show implicit images in `Gallery` grid. Thanks [@hannahblair](https://github.com/hannahblair)! +- [#4993](https://github.com/gradio-app/gradio/pull/4993) [`dc07a9f9`](https://github.com/gradio-app/gradio/commit/dc07a9f947de44b419d8384987a02dcf94977851) - Bringing back the "Add download button for audio" PR by [@leuryr](https://github.com/leuryr). Thanks [@abidlabs](https://github.com/abidlabs)! +- [#4979](https://github.com/gradio-app/gradio/pull/4979) [`44ac8ad0`](https://github.com/gradio-app/gradio/commit/44ac8ad08d82ea12c503dde5c78f999eb0452de2) - Allow setting sketch color default. Thanks [@aliabid94](https://github.com/aliabid94)! +- [#4985](https://github.com/gradio-app/gradio/pull/4985) [`b74f8453`](https://github.com/gradio-app/gradio/commit/b74f8453034328f0e42da8e41785f5eb039b45d7) - Adds `additional_inputs` to `gr.ChatInterface`. Thanks [@abidlabs](https://github.com/abidlabs)! + +### Fixes + +- [#4997](https://github.com/gradio-app/gradio/pull/4997) [`41c83070`](https://github.com/gradio-app/gradio/commit/41c83070b01632084e7d29123048a96c1e261407) - Add CSS resets and specifiers to play nice with HF blog. Thanks [@aliabid94](https://github.com/aliabid94)! + +## 3.38 + +### New Features: + +- Provide a parameter `animate` (`False` by default) in `gr.make_waveform()` which animates the overlayed waveform by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4918](https://github.com/gradio-app/gradio/pull/4918) +- Add `show_download_button` param to allow the download button in static Image components to be hidden by [@hannahblair](https://github.com/hannahblair) in [PR 4959](https://github.com/gradio-app/gradio/pull/4959) +- Added autofocus argument to Textbox by [@aliabid94](https://github.com/aliabid94) in [PR 4978](https://github.com/gradio-app/gradio/pull/4978) +- The `gr.ChatInterface` UI now converts the "Submit" button to a "Stop" button in ChatInterface while streaming, which can be used to pause generation. By [@abidlabs](https://github.com/abidlabs) in [PR 4971](https://github.com/gradio-app/gradio/pull/4971). +- Add a `border_color_accent_subdued` theme variable to add a subdued border color to accented items. This is used by chatbot user messages. Set the value of this variable in `Default` theme to `*primary_200`. By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4989](https://github.com/gradio-app/gradio/pull/4989) +- Add default sketch color argument `brush_color`. Also, masks drawn on images are now slightly translucent (and mask color can also be set via brush_color). By [@aliabid94](https://github.com/aliabid94) in [PR 4979](https://github.com/gradio-app/gradio/pull/4979) + +### Bug Fixes: + +- Fixes `cancels` for generators so that if a generator is canceled before it is complete, subsequent runs of the event do not continue from the previous iteration, but rather start from the beginning. By [@abidlabs](https://github.com/abidlabs) in [PR 4969](https://github.com/gradio-app/gradio/pull/4969). +- Use `gr.State` in `gr.ChatInterface` to reduce latency by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4976](https://github.com/gradio-app/gradio/pull/4976) +- Fix bug with `gr.Interface` where component labels inferred from handler parameters were including special args like `gr.Request` or `gr.EventData`. By [@cbensimon](https://github.com/cbensimon) in [PR 4956](https://github.com/gradio-app/gradio/pull/4956) + +### Breaking Changes: + +No changes to highlight. + +### Other Changes: + +- Apply pyright to the `components` directory by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4948](https://github.com/gradio-app/gradio/pull/4948) +- Improved look of ChatInterface by [@aliabid94](https://github.com/aliabid94) in [PR 4978](https://github.com/gradio-app/gradio/pull/4978) + +## 3.37 + +### New Features: + +Introducing a new `gr.ChatInterface` abstraction, which allows Gradio users to build fully functioning Chat interfaces very easily. The only required parameter is a chat function `fn`, which accepts a (string) user input `message` and a (list of lists) chat `history` and returns a (string) response. Here's a toy example: + +```py +import gradio as gr + +def echo(message, history): + return message + +demo = gr.ChatInterface(fn=echo, examples=["hello", "hola", "merhaba"], title="Echo Bot") +demo.launch() +``` + +Which produces: + +image + +And a corresponding easy-to-use API at `/chat`: + +image + +The `gr.ChatInterface` abstraction works nicely with various LLM libraries, such as `langchain`. See the [dedicated guide](https://gradio.app/guides/creating-a-chatbot-fast) for more examples using `gr.ChatInterface`. Collective team effort in [PR 4869](https://github.com/gradio-app/gradio/pull/4869) + +- Chatbot messages now show hyperlinks to download files uploaded to `gr.Chatbot()` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4848](https://github.com/gradio-app/gradio/pull/4848) +- Cached examples now work with generators and async generators by [@abidlabs](https://github.com/abidlabs) in [PR 4927](https://github.com/gradio-app/gradio/pull/4927) +- Add RTL support to `gr.Markdown`, `gr.Chatbot`, `gr.Textbox` (via the `rtl` boolean parameter) and text-alignment to `gr.Textbox`(via the string `text_align` parameter) by [@abidlabs](https://github.com/abidlabs) in [PR 4933](https://github.com/gradio-app/gradio/pull/4933) + +Examples of usage: + +```py +with gr.Blocks() as demo: + gr.Textbox(interactive=True, text_align="right") +demo.launch() +``` + +```py +with gr.Blocks() as demo: + gr.Markdown("سلام", rtl=True) +demo.launch() +``` + +- The `get_api_info` method of `Blocks` now supports layout output components [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4871](https://github.com/gradio-app/gradio/pull/4871) + +- Added the support for the new command `gradio environment`to make it easier for people to file bug reports if we shipped an easy command to list the OS, gradio version, and versions of gradio/gradio-client dependencies. bu [@varshneydevansh](https://github.com/varshneydevansh) in [PR 4915](https://github.com/gradio-app/gradio/pull/4915). + +### Bug Fixes: + +- The `.change()` event is fixed in `Video` and `Image` so that it only fires once by [@abidlabs](https://github.com/abidlabs) in [PR 4793](https://github.com/gradio-app/gradio/pull/4793) +- The `.change()` event is fixed in `Audio` so that fires when the component value is programmatically updated by [@abidlabs](https://github.com/abidlabs) in [PR 4793](https://github.com/gradio-app/gradio/pull/4793) + +* Add missing `display: flex` property to `Row` so that flex styling is applied to children by [@hannahblair] in [PR 4896](https://github.com/gradio-app/gradio/pull/4896) +* Fixed bug where `gr.Video` could not preprocess urls by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4904](https://github.com/gradio-app/gradio/pull/4904) +* Fixed copy button rendering in API page on Safari by [@aliabid94](https://github.com/aliabid94) in [PR 4924](https://github.com/gradio-app/gradio/pull/4924) +* Fixed `gr.Group` and `container=False`. `container` parameter only available for `Textbox`, `Number`, and `Dropdown`, the only elements where it makes sense. By [@aliabid94](https://github.com/aliabid94) in [PR 4916](https://github.com/gradio-app/gradio/pull/4916) +* Fixed broken image link in auto-generated `app.py` from `ThemeClass.push_to_hub` by [@deepkyu](https://github.com/deepkyu) in [PR 4944](https://github.com/gradio-app/gradio/pull/4944) + +### Other Changes: + +- Warning on mobile that if a user leaves the tab, websocket connection may break. On broken connection, tries to rejoin queue and displays error conveying connection broke. By [@aliabid94](https://github.com/aliabid94) in [PR 4742](https://github.com/gradio-app/gradio/pull/4742) +- Remove blocking network calls made before the local URL gets printed - these slow down the display of the local URL, especially when no internet is available. [@aliabid94](https://github.com/aliabid94) in [PR 4905](https://github.com/gradio-app/gradio/pull/4905). +- Pinned dependencies to major versions to reduce the likelihood of a broken `gradio` due to changes in downstream dependencies by [@abidlabs](https://github.com/abidlabs) in [PR 4885](https://github.com/gradio-app/gradio/pull/4885) +- Queue `max_size` defaults to parent Blocks `max_thread` when running on Spaces with ZeroGPU hardware. By [@cbensimon](https://github.com/cbensimon) in [PR 4937](https://github.com/gradio-app/gradio/pull/4937) + +### Breaking Changes: + +Motivated by the release of `pydantic==2.0`, which included breaking changes that broke a large number of Gradio apps, we've pinned many gradio dependencies. Note that pinned dependencies can cause downstream conflicts, so this may be a breaking change. That being said, we've kept the pins pretty loose, and we're expecting change to be better for the long-term stability of Gradio apps. + +## 3.36.1 + +### New Features: + +- Hotfix to support pydantic v1 and v2 by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4835](https://github.com/gradio-app/gradio/pull/4835) + +### Bug Fixes: + +- Fix bug where `gr.File` change event was not triggered when the value was changed by another event by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4811](https://github.com/gradio-app/gradio/pull/4811) + +### Other Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +## 3.36.0 + +### New Features: + +- The `gr.Video`, `gr.Audio`, `gr.Image`, `gr.Chatbot`, and `gr.Gallery` components now include a share icon when deployed on Spaces. This behavior can be modified by setting the `show_share_button` parameter in the component classes. by [@aliabid94](https://github.com/aliabid94) in [PR 4651](https://github.com/gradio-app/gradio/pull/4651) +- Allow the web component `space`, `src`, and `host` attributes to be updated dynamically by [@pngwn](https://github.com/pngwn) in [PR 4461](https://github.com/gradio-app/gradio/pull/4461) +- Suggestion for Spaces Duplication built into Gradio, by [@aliabid94](https://github.com/aliabid94) in [PR 4458](https://github.com/gradio-app/gradio/pull/4458) +- The `api_name` parameter now accepts `False` as a value, which means it does not show up in named or unnamed endpoints. By [@abidlabs](https://github.com/aliabid94) in [PR 4683](https://github.com/gradio-app/gradio/pull/4683) +- Added support for `pathlib.Path` in `gr.Video`, `gr.Gallery`, and `gr.Chatbot` by [sunilkumardash9](https://github.com/sunilkumardash9) in [PR 4581](https://github.com/gradio-app/gradio/pull/4581). + +### Bug Fixes: + +- Updated components with `info` attribute to update when `update()` is called on them. by [@jebarpg](https://github.com/jebarpg) in [PR 4715](https://github.com/gradio-app/gradio/pull/4715). +- Ensure the `Image` components undo button works mode is `mask` or `color-sketch` by [@amyorz](https://github.com/AmyOrz) in [PR 4692](https://github.com/gradio-app/gradio/pull/4692) +- Load the iframe resizer external asset asynchronously, by [@akx](https://github.com/akx) in [PR 4336](https://github.com/gradio-app/gradio/pull/4336) +- Restored missing imports in `gr.components` by [@abidlabs](https://github.com/abidlabs) in [PR 4566](https://github.com/gradio-app/gradio/pull/4566) +- Fix bug where `select` event was not triggered in `gr.Gallery` if `height` was set to be large with `allow_preview=False` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4551](https://github.com/gradio-app/gradio/pull/4551) +- Fix bug where setting `visible=False` in `gr.Group` event did not work by [@abidlabs](https://github.com/abidlabs) in [PR 4567](https://github.com/gradio-app/gradio/pull/4567) +- Fix `make_waveform` to work with paths that contain spaces [@akx](https://github.com/akx) in [PR 4570](https://github.com/gradio-app/gradio/pull/4570) & [PR 4578](https://github.com/gradio-app/gradio/pull/4578) +- Send captured data in `stop_recording` event for `gr.Audio` and `gr.Video` components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4554](https://github.com/gradio-app/gradio/pull/4554) +- Fix bug in `gr.Gallery` where `height` and `object_fit` parameters where being ignored by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4576](https://github.com/gradio-app/gradio/pull/4576) +- Fixes an HTML sanitization issue in DOMPurify where links in markdown were not opening in a new window by [@hannahblair] in [PR 4577](https://github.com/gradio-app/gradio/pull/4577) +- Fixed Dropdown height rendering in Columns by [@aliabid94](https://github.com/aliabid94) in [PR 4584](https://github.com/gradio-app/gradio/pull/4584) +- Fixed bug where `AnnotatedImage` css styling was causing the annotation masks to not be displayed correctly by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4628](https://github.com/gradio-app/gradio/pull/4628) +- Ensure that Gradio does not silently fail when running on a port that is occupied by [@abidlabs](https://github.com/abidlabs) in [PR 4624](https://github.com/gradio-app/gradio/pull/4624). +- Fix double upload bug that caused lag in file uploads by [@aliabid94](https://github.com/aliabid94) in [PR 4661](https://github.com/gradio-app/gradio/pull/4661) +- `Progress` component now appears even when no `iterable` is specified in `tqdm` constructor by [@itrushkin](https://github.com/itrushkin) in [PR 4475](https://github.com/gradio-app/gradio/pull/4475) +- Deprecation warnings now point at the user code using those deprecated features, instead of Gradio internals, by (https://github.com/akx) in [PR 4694](https://github.com/gradio-app/gradio/pull/4694) +- Adapt column widths in gr.Examples based on content by [@pngwn](https://github.com/pngwn) & [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4700](https://github.com/gradio-app/gradio/pull/4700) +- The `plot` parameter deprecation warnings should now only be emitted for `Image` components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4709](https://github.com/gradio-app/gradio/pull/4709) +- Removed uncessessary `type` deprecation warning by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4709](https://github.com/gradio-app/gradio/pull/4709) +- Ensure Audio autoplays works when `autoplay=True` and the video source is dynamically updated [@pngwn](https://github.com/pngwn) in [PR 4705](https://github.com/gradio-app/gradio/pull/4705) +- When an error modal is shown in spaces, ensure we scroll to the top so it can be seen by [@pngwn](https://github.com/pngwn) in [PR 4712](https://github.com/gradio-app/gradio/pull/4712) +- Update depedencies by [@pngwn](https://github.com/pngwn) in [PR 4675](https://github.com/gradio-app/gradio/pull/4675) +- Fixes `gr.Dropdown` being cutoff at the bottom by [@abidlabs](https://github.com/abidlabs) in [PR 4691](https://github.com/gradio-app/gradio/pull/4691). +- Scroll top when clicking "View API" in spaces by [@pngwn](https://github.com/pngwn) in [PR 4714](https://github.com/gradio-app/gradio/pull/4714) +- Fix bug where `show_label` was hiding the entire component for `gr.Label` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4713](https://github.com/gradio-app/gradio/pull/4713) +- Don't crash when uploaded image has broken EXIF data, by [@akx](https://github.com/akx) in [PR 4764](https://github.com/gradio-app/gradio/pull/4764) +- Place toast messages at the top of the screen by [@pngwn](https://github.com/pngwn) in [PR 4796](https://github.com/gradio-app/gradio/pull/4796) +- Fix regressed styling of Login page when auth is enabled by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4797](https://github.com/gradio-app/gradio/pull/4797) +- Prevent broken scrolling to output on Spaces by [@aliabid94](https://github.com/aliabid94) in [PR 4822](https://github.com/gradio-app/gradio/pull/4822) + +### Other Changes: + +- Add `.git-blame-ignore-revs` by [@akx](https://github.com/akx) in [PR 4586](https://github.com/gradio-app/gradio/pull/4586) +- Update frontend dependencies in [PR 4601](https://github.com/gradio-app/gradio/pull/4601) +- Use `typing.Literal` where possible in gradio library and client by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4608](https://github.com/gradio-app/gradio/pull/4608) +- Remove unnecessary mock json files for frontend E2E tests by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4625](https://github.com/gradio-app/gradio/pull/4625) +- Update dependencies by [@pngwn](https://github.com/pngwn) in [PR 4643](https://github.com/gradio-app/gradio/pull/4643) +- The theme builder now launches successfully, and the API docs are cleaned up. By [@abidlabs](https://github.com/aliabid94) in [PR 4683](https://github.com/gradio-app/gradio/pull/4683) +- Remove `cleared_value` from some components as its no longer used internally by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4685](https://github.com/gradio-app/gradio/pull/4685) +- Better errors when you define two Blocks and reference components in one Blocks from the events in the other Blocks [@abidlabs](https://github.com/abidlabs) in [PR 4738](https://github.com/gradio-app/gradio/pull/4738). +- Better message when share link is not created by [@abidlabs](https://github.com/abidlabs) in [PR 4773](https://github.com/gradio-app/gradio/pull/4773). +- Improve accessibility around selected images in gr.Gallery component by [@hannahblair](https://github.com/hannahblair) in [PR 4790](https://github.com/gradio-app/gradio/pull/4790) + +### Breaking Changes: + +[PR 4683](https://github.com/gradio-app/gradio/pull/4683) removes the explict named endpoint "load_examples" from gr.Interface that was introduced in [PR 4456](https://github.com/gradio-app/gradio/pull/4456). + +## 3.35.2 + +### New Features: + +No changes to highlight. + +### Bug Fixes: + +- Fix chatbot streaming by [@aliabid94](https://github.com/aliabid94) in [PR 4537](https://github.com/gradio-app/gradio/pull/4537) +- Fix chatbot height and scrolling by [@aliabid94](https://github.com/aliabid94) in [PR 4540](https://github.com/gradio-app/gradio/pull/4540) + +### Other Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +## 3.35.1 + +### New Features: + +No changes to highlight. + +### Bug Fixes: + +- Fix chatbot streaming by [@aliabid94](https://github.com/aliabid94) in [PR 4537](https://github.com/gradio-app/gradio/pull/4537) +- Fix error modal position and text size by [@pngwn](https://github.com/pngwn) in [PR 4538](https://github.com/gradio-app/gradio/pull/4538). + +### Other Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +## 3.35.0 + +### New Features: + +- A `gr.ClearButton` which allows users to easily clear the values of components by [@abidlabs](https://github.com/abidlabs) in [PR 4456](https://github.com/gradio-app/gradio/pull/4456) + +Example usage: + +```py +import gradio as gr + +with gr.Blocks() as demo: + chatbot = gr.Chatbot([("Hello", "How are you?")]) + with gr.Row(): + textbox = gr.Textbox(scale=3, interactive=True) + gr.ClearButton([textbox, chatbot], scale=1) + +demo.launch() +``` + +- Min and max value for gr.Number by [@artegoser](https://github.com/artegoser) and [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3991](https://github.com/gradio-app/gradio/pull/3991) +- Add `start_recording` and `stop_recording` events to `Video` and `Audio` components by [@pngwn](https://github.com/pngwn) in [PR 4422](https://github.com/gradio-app/gradio/pull/4422) +- Allow any function to generate an error message and allow multiple messages to appear at a time. Other error modal improvements such as auto dismiss after a time limit and a new layout on mobile [@pngwn](https://github.com/pngwn) in [PR 4459](https://github.com/gradio-app/gradio/pull/4459). +- Add `autoplay` kwarg to `Video` and `Audio` components by [@pngwn](https://github.com/pngwn) in [PR 4453](https://github.com/gradio-app/gradio/pull/4453) +- Add `allow_preview` parameter to `Gallery` to control whether a detailed preview is displayed on click by + [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4470](https://github.com/gradio-app/gradio/pull/4470) +- Add `latex_delimiters` parameter to `Chatbot` to control the delimiters used for LaTeX and to disable LaTeX in the `Chatbot` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4516](https://github.com/gradio-app/gradio/pull/4516) +- Can now issue `gr.Warning` and `gr.Info` modals. Simply put the code `gr.Warning("Your warning message")` or `gr.Info("Your info message")` as a standalone line in your function. By [@aliabid94](https://github.com/aliabid94) in [PR 4518](https://github.com/gradio-app/gradio/pull/4518). + +Example: + +```python +def start_process(name): + gr.Info("Starting process") + if name is None: + gr.Warning("Name is empty") + ... + if success == False: + raise gr.Error("Process failed") +``` + +### Bug Fixes: + +- Add support for PAUSED state in the JS client by [@abidlabs](https://github.com/abidlabs) in [PR 4438](https://github.com/gradio-app/gradio/pull/4438) +- Ensure Tabs only occupy the space required by [@pngwn](https://github.com/pngwn) in [PR 4419](https://github.com/gradio-app/gradio/pull/4419) +- Ensure components have the correct empty sizes to prevent empty containers from collapsing by [@pngwn](https://github.com/pngwn) in [PR 4447](https://github.com/gradio-app/gradio/pull/4447). +- Frontend code no longer crashes when there is a relative URL in an `` element, by [@akx](https://github.com/akx) in [PR 4449](https://github.com/gradio-app/gradio/pull/4449). +- Fix bug where setting `format='mp4'` on a video component would cause the function to error out if the uploaded video was not playable by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4467](https://github.com/gradio-app/gradio/pull/4467) +- Fix `_js` parameter to work even without backend function, by [@aliabid94](https://github.com/aliabid94) in [PR 4486](https://github.com/gradio-app/gradio/pull/4486). +- Fix new line issue with `gr.Chatbot()` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4491](https://github.com/gradio-app/gradio/pull/4491) +- Fixes issue with Clear button not working for `Label` component by [@abidlabs](https://github.com/abidlabs) in [PR 4456](https://github.com/gradio-app/gradio/pull/4456) +- Restores the ability to pass in a tuple (sample rate, audio array) to gr.Audio() by [@abidlabs](https://github.com/abidlabs) in [PR 4525](https://github.com/gradio-app/gradio/pull/4525) +- Ensure code is correctly formatted and copy button is always present in Chatbot by [@pngwn](https://github.com/pngwn) in [PR 4527](https://github.com/gradio-app/gradio/pull/4527) +- `show_label` will not automatically be set to `True` in `gr.BarPlot.update` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4531](https://github.com/gradio-app/gradio/pull/4531) +- `gr.BarPlot` group text now respects darkmode by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4531](https://github.com/gradio-app/gradio/pull/4531) +- Fix dispatched errors from within components [@aliabid94](https://github.com/aliabid94) in [PR 4786](https://github.com/gradio-app/gradio/pull/4786) + +### Other Changes: + +- Change styling of status and toast error components by [@hannahblair](https://github.com/hannahblair) in [PR 4454](https://github.com/gradio-app/gradio/pull/4454). +- Clean up unnecessary `new Promise()`s by [@akx](https://github.com/akx) in [PR 4442](https://github.com/gradio-app/gradio/pull/4442). +- Minor UI cleanup for Examples and Dataframe components [@aliabid94](https://github.com/aliabid94) in [PR 4455](https://github.com/gradio-app/gradio/pull/4455). +- Minor UI cleanup for Examples and Dataframe components [@aliabid94](https://github.com/aliabid94) in [PR 4455](https://github.com/gradio-app/gradio/pull/4455). +- Add Catalan translation [@jordimas](https://github.com/jordimas) in [PR 4483](https://github.com/gradio-app/gradio/pull/4483). +- The API endpoint that loads examples upon click has been given an explicit name ("/load_examples") by [@abidlabs](https://github.com/abidlabs) in [PR 4456](https://github.com/gradio-app/gradio/pull/4456). +- Allows configuration of FastAPI app when calling `mount_gradio_app`, by [@charlesfrye](https://github.com/charlesfrye) in [PR4519](https://github.com/gradio-app/gradio/pull/4519). + +### Breaking Changes: + +- The behavior of the `Clear` button has been changed for `Slider`, `CheckboxGroup`, `Radio`, `Dropdown` components by [@abidlabs](https://github.com/abidlabs) in [PR 4456](https://github.com/gradio-app/gradio/pull/4456). The Clear button now sets the value of these components to be empty as opposed to the original default set by the developer. This is to make them in line with the rest of the Gradio components. +- Python 3.7 end of life is June 27 2023. Gradio will no longer support python 3.7 by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4484](https://github.com/gradio-app/gradio/pull/4484) +- Removed `$` as a default LaTeX delimiter for the `Chatbot` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4516](https://github.com/gradio-app/gradio/pull/4516). The specific LaTeX delimeters can be set using the new `latex_delimiters` parameter in `Chatbot`. + +## 3.34.0 + +### New Features: + +- The `gr.UploadButton` component now supports the `variant` and `interactive` parameters by [@abidlabs](https://github.com/abidlabs) in [PR 4436](https://github.com/gradio-app/gradio/pull/4436). + +### Bug Fixes: + +- Remove target="\_blank" override on anchor tags with internal targets by [@hannahblair](https://github.com/hannahblair) in [PR 4405](https://github.com/gradio-app/gradio/pull/4405) +- Fixed bug where `gr.File(file_count='multiple')` could not be cached as output by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4421](https://github.com/gradio-app/gradio/pull/4421) +- Restricts the domains that can be proxied via `/proxy` route by [@abidlabs](https://github.com/abidlabs) in [PR 4406](https://github.com/gradio-app/gradio/pull/4406). +- Fixes issue where `gr.UploadButton` could not be used to upload the same file twice by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4437](https://github.com/gradio-app/gradio/pull/4437) +- Fixes bug where `/proxy` route was being incorrectly constructed by the frontend by [@abidlabs](https://github.com/abidlabs) in [PR 4430](https://github.com/gradio-app/gradio/pull/4430). +- Fix z-index of status component by [@hannahblair](https://github.com/hannahblair) in [PR 4429](https://github.com/gradio-app/gradio/pull/4429) +- Fix video rendering in Safari by [@aliabid94](https://github.com/aliabid94) in [PR 4433](https://github.com/gradio-app/gradio/pull/4433). +- The output directory for files downloaded when calling Blocks as a function is now set to a temporary directory by default (instead of the working directory in some cases) by [@abidlabs](https://github.com/abidlabs) in [PR 4501](https://github.com/gradio-app/gradio/pull/4501) + +### Other Changes: + +- When running on Spaces, handler functions will be transformed by the [PySpaces](https://pypi.org/project/spaces/) library in order to make them work with specific hardware. It will have no effect on standalone Gradio apps or regular Gradio Spaces and can be globally deactivated as follows : `import spaces; spaces.disable_gradio_auto_wrap()` by [@cbensimon](https://github.com/cbensimon) in [PR 4389](https://github.com/gradio-app/gradio/pull/4389). +- Deprecated `.style` parameter and moved arguments to constructor. Added support for `.update()` to all arguments initially in style. Added `scale` and `min_width` support to every Component. By [@aliabid94](https://github.com/aliabid94) in [PR 4374](https://github.com/gradio-app/gradio/pull/4374) + +### Breaking Changes: + +No changes to highlight. + +## 3.33.1 + +### New Features: + +No changes to highlight. + +### Bug Fixes: + +- Allow `every` to work with generators by [@dkjshk](https://github.com/dkjshk) in [PR 4434](https://github.com/gradio-app/gradio/pull/4434) +- Fix z-index of status component by [@hannahblair](https://github.com/hannahblair) in [PR 4429](https://github.com/gradio-app/gradio/pull/4429) +- Allow gradio to work offline, by [@aliabid94](https://github.com/aliabid94) in [PR 4398](https://github.com/gradio-app/gradio/pull/4398). +- Fixed `validate_url` to check for 403 errors and use a GET request in place of a HEAD by [@alvindaiyan](https://github.com/alvindaiyan) in [PR 4388](https://github.com/gradio-app/gradio/pull/4388). + +### Other Changes: + +- More explicit error message when share link binary is blocked by antivirus by [@abidlabs](https://github.com/abidlabs) in [PR 4380](https://github.com/gradio-app/gradio/pull/4380). + +### Breaking Changes: + +No changes to highlight. + +## 3.33.0 + +### New Features: + +- Introduced `gradio deploy` to launch a Gradio app to Spaces directly from your terminal. By [@aliabid94](https://github.com/aliabid94) in [PR 4033](https://github.com/gradio-app/gradio/pull/4033). +- Introduce `show_progress='corner'` argument to event listeners, which will not cover the output components with the progress animation, but instead show it in the corner of the components. By [@aliabid94](https://github.com/aliabid94) in [PR 4396](https://github.com/gradio-app/gradio/pull/4396). + +### Bug Fixes: + +- Fix bug where Label change event was triggering itself by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4371](https://github.com/gradio-app/gradio/pull/4371) +- Make `Blocks.load` behave like other event listeners (allows chaining `then` off of it) [@anentropic](https://github.com/anentropic/) in [PR 4304](https://github.com/gradio-app/gradio/pull/4304) +- Respect `interactive=True` in output components of a `gr.Interface` by [@abidlabs](https://github.com/abidlabs) in [PR 4356](https://github.com/gradio-app/gradio/pull/4356). +- Remove unused frontend code by [@akx](https://github.com/akx) in [PR 4275](https://github.com/gradio-app/gradio/pull/4275) +- Fixes favicon path on Windows by [@abidlabs](https://github.com/abidlabs) in [PR 4369](https://github.com/gradio-app/gradio/pull/4369). +- Prevent path traversal in `/file` routes by [@abidlabs](https://github.com/abidlabs) in [PR 4370](https://github.com/gradio-app/gradio/pull/4370). +- Do not send HF token to other domains via `/proxy` route by [@abidlabs](https://github.com/abidlabs) in [PR 4368](https://github.com/gradio-app/gradio/pull/4368). +- Replace default `markedjs` sanitize function with DOMPurify sanitizer for `gr.Chatbot()` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4360](https://github.com/gradio-app/gradio/pull/4360) +- Prevent the creation of duplicate copy buttons in the chatbot and ensure copy buttons work in non-secure contexts by [@binary-husky](https://github.com/binary-husky) in [PR 4350](https://github.com/gradio-app/gradio/pull/4350). + +### Other Changes: + +- Remove flicker of loading bar by adding opacity transition, by [@aliabid94](https://github.com/aliabid94) in [PR 4349](https://github.com/gradio-app/gradio/pull/4349). +- Performance optimization in the frontend's Blocks code by [@akx](https://github.com/akx) in [PR 4334](https://github.com/gradio-app/gradio/pull/4334) +- Upgrade the pnpm lock file format version from v6.0 to v6.1 by [@whitphx](https://github.com/whitphx) in [PR 4393](https://github.com/gradio-app/gradio/pull/4393) + +### Breaking Changes: + +- The `/file=` route no longer allows accessing dotfiles or files in "dot directories" by [@akx](https://github.com/akx) in [PR 4303](https://github.com/gradio-app/gradio/pull/4303) + +## 3.32.0 + +### New Features: + +- `Interface.launch()` and `Blocks.launch()` now accept an `app_kwargs` argument to allow customizing the configuration of the underlying FastAPI app, by [@akx](https://github.com/akx) in [PR 4282](https://github.com/gradio-app/gradio/pull/4282) + +### Bug Fixes: + +- Fixed Gallery/AnnotatedImage components not respecting GRADIO_DEFAULT_DIR variable by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4256](https://github.com/gradio-app/gradio/pull/4256) +- Fixed Gallery/AnnotatedImage components resaving identical images by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4256](https://github.com/gradio-app/gradio/pull/4256) +- Fixed Audio/Video/File components creating empty tempfiles on each run by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4256](https://github.com/gradio-app/gradio/pull/4256) +- Fixed the behavior of the `run_on_click` parameter in `gr.Examples` by [@abidlabs](https://github.com/abidlabs) in [PR 4258](https://github.com/gradio-app/gradio/pull/4258). +- Ensure error modal displays when the queue is enabled by [@pngwn](https://github.com/pngwn) in [PR 4273](https://github.com/gradio-app/gradio/pull/4273) +- Ensure js client respcts the full root when making requests to the server by [@pngwn](https://github.com/pngwn) in [PR 4271](https://github.com/gradio-app/gradio/pull/4271) + +### Other Changes: + +- Refactor web component `initial_height` attribute by [@whitphx](https://github.com/whitphx) in [PR 4223](https://github.com/gradio-app/gradio/pull/4223) +- Relocate `mount_css` fn to remove circular dependency [@whitphx](https://github.com/whitphx) in [PR 4222](https://github.com/gradio-app/gradio/pull/4222) +- Upgrade Black to 23.3 by [@akx](https://github.com/akx) in [PR 4259](https://github.com/gradio-app/gradio/pull/4259) +- Add frontend LaTeX support in `gr.Chatbot()` using `KaTeX` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4285](https://github.com/gradio-app/gradio/pull/4285). + +### Breaking Changes: + +No changes to highlight. + +## 3.31.0 + +### New Features: + +- The reloader command (`gradio app.py`) can now accept command line arguments by [@micky2be](https://github.com/micky2be) in [PR 4119](https://github.com/gradio-app/gradio/pull/4119) +- Added `format` argument to `Audio` component by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4178](https://github.com/gradio-app/gradio/pull/4178) +- Add JS client code snippets to use via api page by [@aliabd](https://github.com/aliabd) in [PR 3927](https://github.com/gradio-app/gradio/pull/3927). +- Update to the JS client by [@pngwn](https://github.com/pngwn) in [PR 4202](https://github.com/gradio-app/gradio/pull/4202) + +### Bug Fixes: + +- Fix "TypeError: issubclass() arg 1 must be a class" When use Optional[Types] by [@lingfengchencn](https://github.com/lingfengchencn) in [PR 4200](https://github.com/gradio-app/gradio/pull/4200). +- Gradio will no longer send any analytics or call home if analytics are disabled with the GRADIO_ANALYTICS_ENABLED environment variable. By [@akx](https://github.com/akx) in [PR 4194](https://github.com/gradio-app/gradio/pull/4194) and [PR 4236](https://github.com/gradio-app/gradio/pull/4236) +- The deprecation warnings for kwargs now show the actual stack level for the invocation, by [@akx](https://github.com/akx) in [PR 4203](https://github.com/gradio-app/gradio/pull/4203). +- Fix "TypeError: issubclass() arg 1 must be a class" When use Optional[Types] by [@lingfengchencn](https://github.com/lingfengchencn) in [PR 4200](https://github.com/gradio-app/gradio/pull/4200). +- Ensure cancelling functions work correctly by [@pngwn](https://github.com/pngwn) in [PR 4225](https://github.com/gradio-app/gradio/pull/4225) +- Fixes a bug with typing.get_type_hints() on Python 3.9 by [@abidlabs](https://github.com/abidlabs) in [PR 4228](https://github.com/gradio-app/gradio/pull/4228). +- Fixes JSONDecodeError by [@davidai](https://github.com/davidai) in [PR 4241](https://github.com/gradio-app/gradio/pull/4241) +- Fix `chatbot_dialogpt` demo by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4238](https://github.com/gradio-app/gradio/pull/4238). + +### Other Changes: + +- Change `gr.Chatbot()` markdown parsing to frontend using `marked` library and `prism` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4150](https://github.com/gradio-app/gradio/pull/4150) +- Update the js client by [@pngwn](https://github.com/pngwn) in [PR 3899](https://github.com/gradio-app/gradio/pull/3899) +- Fix documentation for the shape of the numpy array produced by the `Image` component by [@der3318](https://github.com/der3318) in [PR 4204](https://github.com/gradio-app/gradio/pull/4204). +- Updates the timeout for websocket messaging from 1 second to 5 seconds by [@abidlabs](https://github.com/abidlabs) in [PR 4235](https://github.com/gradio-app/gradio/pull/4235) + +### Breaking Changes: + +No changes to highlight. + +## 3.30.0 + +### New Features: + +- Adds a `root_path` parameter to `launch()` that allows running Gradio applications on subpaths (e.g. www.example.com/app) behind a proxy, by [@abidlabs](https://github.com/abidlabs) in [PR 4133](https://github.com/gradio-app/gradio/pull/4133) +- Fix dropdown change listener to trigger on change when updated as an output by [@aliabid94](https://github.com/aliabid94) in [PR 4128](https://github.com/gradio-app/gradio/pull/4128). +- Add `.input` event listener, which is only triggered when a user changes the component value (as compared to `.change`, which is also triggered when a component updates as the result of a function trigger), by [@aliabid94](https://github.com/aliabid94) in [PR 4157](https://github.com/gradio-app/gradio/pull/4157). + +### Bug Fixes: + +- Records username when flagging by [@abidlabs](https://github.com/abidlabs) in [PR 4135](https://github.com/gradio-app/gradio/pull/4135) +- Fix website build issue by [@aliabd](https://github.com/aliabd) in [PR 4142](https://github.com/gradio-app/gradio/pull/4142) +- Fix lang agnostic type info for `gr.File(file_count='multiple')` output components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4153](https://github.com/gradio-app/gradio/pull/4153) + +### Other Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +## 3.29.0 + +### New Features: + +- Returning language agnostic types in the `/info` route by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4039](https://github.com/gradio-app/gradio/pull/4039) + +### Bug Fixes: + +- Allow users to upload audio files in Audio component on iOS by by [@aliabid94](https://github.com/aliabid94) in [PR 4071](https://github.com/gradio-app/gradio/pull/4071). +- Fixes the gradio theme builder error that appeared on launch by [@aliabid94](https://github.com/aliabid94) and [@abidlabs](https://github.com/abidlabs) in [PR 4080](https://github.com/gradio-app/gradio/pull/4080) +- Keep Accordion content in DOM by [@aliabid94](https://github.com/aliabid94) in [PR 4070](https://github.com/gradio-app/gradio/pull/4073) +- Fixed bug where type hints in functions caused the event handler to crash by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4068](https://github.com/gradio-app/gradio/pull/4068) +- Fix dropdown default value not appearing by [@aliabid94](https://github.com/aliabid94) in [PR 4072](https://github.com/gradio-app/gradio/pull/4072). +- Soft theme label color fix by [@aliabid94](https://github.com/aliabid94) in [PR 4070](https://github.com/gradio-app/gradio/pull/4070) +- Fix `gr.Slider` `release` event not triggering on mobile by [@space-nuko](https://github.com/space-nuko) in [PR 4098](https://github.com/gradio-app/gradio/pull/4098) +- Removes extraneous `State` component info from the `/info` route by [@abidlabs](https://github.com/freddyaboulton) in [PR 4107](https://github.com/gradio-app/gradio/pull/4107) +- Make .then() work even if first event fails by [@aliabid94](https://github.com/aliabid94) in [PR 4115](https://github.com/gradio-app/gradio/pull/4115). + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Allow users to submit with enter in Interfaces with textbox / number inputs [@aliabid94](https://github.com/aliabid94) in [PR 4090](https://github.com/gradio-app/gradio/pull/4090). +- Updates gradio's requirements.txt to requires uvicorn>=0.14.0 by [@abidlabs](https://github.com/abidlabs) in [PR 4086](https://github.com/gradio-app/gradio/pull/4086) +- Updates some error messaging by [@abidlabs](https://github.com/abidlabs) in [PR 4086](https://github.com/gradio-app/gradio/pull/4086) +- Renames simplified Chinese translation file from `zh-cn.json` to `zh-CN.json` by [@abidlabs](https://github.com/abidlabs) in [PR 4086](https://github.com/gradio-app/gradio/pull/4086) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.28.3 + +### New Features: + +No changes to highlight. + +### Bug Fixes: + +- Fixes issue with indentation in `gr.Code()` component with streaming by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4043](https://github.com/gradio-app/gradio/pull/4043) + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +No changes to highlight. + +### Contributors Shoutout: + +No changes to highlight. + +## 3.28.2 + +### Bug Fixes + +- Code component visual updates by [@pngwn](https://github.com/pngwn) in [PR 4051](https://github.com/gradio-app/gradio/pull/4051) + +### New Features: + +- Add support for `visual-question-answering`, `document-question-answering`, and `image-to-text` using `gr.Interface.load("models/...")` and `gr.Interface.from_pipeline` by [@osanseviero](https://github.com/osanseviero) in [PR 3887](https://github.com/gradio-app/gradio/pull/3887) +- Add code block support in `gr.Chatbot()`, by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4048](https://github.com/gradio-app/gradio/pull/4048) +- Adds the ability to blocklist filepaths (and also improves the allowlist mechanism) by [@abidlabs](https://github.com/abidlabs) in [PR 4047](https://github.com/gradio-app/gradio/pull/4047). +- Adds the ability to specify the upload directory via an environment variable by [@abidlabs](https://github.com/abidlabs) in [PR 4047](https://github.com/gradio-app/gradio/pull/4047). + +### Bug Fixes: + +- Fixes issue with `matplotlib` not rendering correctly if the backend was not set to `Agg` by [@abidlabs](https://github.com/abidlabs) in [PR 4029](https://github.com/gradio-app/gradio/pull/4029) +- Fixes bug where rendering the same `gr.State` across different Interfaces/Blocks within larger Blocks would not work by [@abidlabs](https://github.com/abidlabs) in [PR 4030](https://github.com/gradio-app/gradio/pull/4030) +- Code component visual updates by [@pngwn](https://github.com/pngwn) in [PR 4051](https://github.com/gradio-app/gradio/pull/4051) + +### Documentation Changes: + +- Adds a Guide on how to use the Python Client within a FastAPI app, by [@abidlabs](https://github.com/abidlabs) in [PR 3892](https://github.com/gradio-app/gradio/pull/3892) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +- `gr.HuggingFaceDatasetSaver` behavior changed internally. The `flagging/` folder is not a `.git/` folder anymore when using it. `organization` parameter is now ignored in favor of passing a full dataset id as `dataset_name` (e.g. `"username/my-dataset"`). +- New lines (`\n`) are not automatically converted to `
` in `gr.Markdown()` or `gr.Chatbot()`. For multiple new lines, a developer must add multiple `
` tags. + +### Full Changelog: + +- Safer version of `gr.HuggingFaceDatasetSaver` using HTTP methods instead of git pull/push by [@Wauplin](https://github.com/Wauplin) in [PR 3973](https://github.com/gradio-app/gradio/pull/3973) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.28.1 + +### New Features: + +- Add a "clear mask" button to `gr.Image` sketch modes, by [@space-nuko](https://github.com/space-nuko) in [PR 3615](https://github.com/gradio-app/gradio/pull/3615) + +### Bug Fixes: + +- Fix dropdown default value not appearing by [@aliabid94](https://github.com/aliabid94) in [PR 3996](https://github.com/gradio-app/gradio/pull/3996). +- Fix faded coloring of output textboxes in iOS / Safari by [@aliabid94](https://github.com/aliabid94) in [PR 3993](https://github.com/gradio-app/gradio/pull/3993) + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +- CI: Simplified Python CI workflow by [@akx](https://github.com/akx) in [PR 3982](https://github.com/gradio-app/gradio/pull/3982) +- Upgrade pyright to 1.1.305 by [@akx](https://github.com/akx) in [PR 4042](https://github.com/gradio-app/gradio/pull/4042) +- More Ruff rules are enabled and lint errors fixed by [@akx](https://github.com/akx) in [PR 4038](https://github.com/gradio-app/gradio/pull/4038) + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +No changes to highlight. + +### Contributors Shoutout: + +No changes to highlight. + +## 3.28.0 + +### Bug Fixes: + +- Fix duplicate play commands in full-screen mode of 'video'. by [@tomchang25](https://github.com/tomchang25) in [PR 3968](https://github.com/gradio-app/gradio/pull/3968). +- Fix the issue of the UI stuck caused by the 'selected' of DataFrame not being reset. by [@tomchang25](https://github.com/tomchang25) in [PR 3916](https://github.com/gradio-app/gradio/pull/3916). +- Fix issue where `gr.Video()` would not work inside a `gr.Tab()` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3891](https://github.com/gradio-app/gradio/pull/3891) +- Fixed issue with old_value check in File. by [@tomchang25](https://github.com/tomchang25) in [PR 3859](https://github.com/gradio-app/gradio/pull/3859). +- Fixed bug where all bokeh plots appeared in the same div by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3896](https://github.com/gradio-app/gradio/pull/3896) +- Fixed image outputs to automatically take full output image height, unless explicitly set, by [@aliabid94](https://github.com/aliabid94) in [PR 3905](https://github.com/gradio-app/gradio/pull/3905) +- Fix issue in `gr.Gallery()` where setting height causes aspect ratio of images to collapse by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3830](https://github.com/gradio-app/gradio/pull/3830) +- Fix issue where requesting for a non-existing file would trigger a 500 error by [@micky2be](https://github.com/micky2be) in `[PR 3895](https://github.com/gradio-app/gradio/pull/3895)`. +- Fix bugs with abspath about symlinks, and unresolvable path on Windows by [@micky2be](https://github.com/micky2be) in `[PR 3895](https://github.com/gradio-app/gradio/pull/3895)`. +- Fixes type in client `Status` enum by [@10zinten](https://github.com/10zinten) in [PR 3931](https://github.com/gradio-app/gradio/pull/3931) +- Fix `gr.ChatBot` to handle image url [tye-singwa](https://github.com/tye-signwa) in [PR 3953](https://github.com/gradio-app/gradio/pull/3953) +- Move Google Tag Manager related initialization code to analytics-enabled block by [@akx](https://github.com/akx) in [PR 3956](https://github.com/gradio-app/gradio/pull/3956) +- Fix bug where port was not reused if the demo was closed and then re-launched by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3896](https://github.com/gradio-app/gradio/pull/3959) +- Fixes issue where dropdown does not position itself at selected element when opened [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3639](https://github.com/gradio-app/gradio/pull/3639) + +### Documentation Changes: + +- Make use of `gr` consistent across the docs by [@duerrsimon](https://github.com/duerrsimon) in [PR 3901](https://github.com/gradio-app/gradio/pull/3901) +- Fixed typo in theming-guide.md by [@eltociear](https://github.com/eltociear) in [PR 3952](https://github.com/gradio-app/gradio/pull/3952) + +### Testing and Infrastructure Changes: + +- CI: Python backend lint is only run once, by [@akx](https://github.com/akx) in [PR 3960](https://github.com/gradio-app/gradio/pull/3960) +- Format invocations and concatenations were replaced by f-strings where possible by [@akx](https://github.com/akx) in [PR 3984](https://github.com/gradio-app/gradio/pull/3984) +- Linting rules were made more strict and issues fixed by [@akx](https://github.com/akx) in [PR 3979](https://github.com/gradio-app/gradio/pull/3979). + +### Breaking Changes: + +- Some re-exports in `gradio.themes` utilities (introduced in 3.24.0) have been eradicated. + By [@akx](https://github.com/akx) in [PR 3958](https://github.com/gradio-app/gradio/pull/3958) + +### Full Changelog: + +- Add DESCRIPTION.md to image_segmentation demo by [@aliabd](https://github.com/aliabd) in [PR 3866](https://github.com/gradio-app/gradio/pull/3866) +- Fix error in running `gr.themes.builder()` by [@deepkyu](https://github.com/deepkyu) in [PR 3869](https://github.com/gradio-app/gradio/pull/3869) +- Fixed a JavaScript TypeError when loading custom JS with `_js` and setting `outputs` to `None` in `gradio.Blocks()` by [@DavG25](https://github.com/DavG25) in [PR 3883](https://github.com/gradio-app/gradio/pull/3883) +- Fixed bg_background_fill theme property to expand to whole background, block_radius to affect form elements as well, and added block_label_shadow theme property by [@aliabid94](https://github.com/aliabid94) in [PR 3590](https://github.com/gradio-app/gradio/pull/3590) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.27.0 + +### New Features: + +###### AnnotatedImage Component + +New AnnotatedImage component allows users to highlight regions of an image, either by providing bounding boxes, or 0-1 pixel masks. This component is useful for tasks such as image segmentation, object detection, and image captioning. + +![AnnotatedImage screenshot](https://user-images.githubusercontent.com/7870876/232142720-86e0020f-beaf-47b9-a843-689c9621f09c.gif) + +Example usage: + +```python +with gr.Blocks() as demo: + img = gr.Image() + img_section = gr.AnnotatedImage() + def mask(img): + top_left_corner = [0, 0, 20, 20] + random_mask = np.random.randint(0, 2, img.shape[:2]) + return (img, [(top_left_corner, "left corner"), (random_mask, "random")]) + img.change(mask, img, img_section) +``` + +See the [image_segmentation demo](https://github.com/gradio-app/gradio/tree/main/demo/image_segmentation) for a full example. By [@aliabid94](https://github.com/aliabid94) in [PR 3836](https://github.com/gradio-app/gradio/pull/3836) + +### Bug Fixes: + +No changes to highlight. + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +No changes to highlight. + +### Contributors Shoutout: + +No changes to highlight. + +## 3.26.0 + +### New Features: + +###### `Video` component supports subtitles + +- Allow the video component to accept subtitles as input, by [@tomchang25](https://github.com/tomchang25) in [PR 3673](https://github.com/gradio-app/gradio/pull/3673). To provide subtitles, simply return a tuple consisting of `(path_to_video, path_to_subtitles)` from your function. Both `.srt` and `.vtt` formats are supported: + +```py +with gr.Blocks() as demo: + gr.Video(("video.mp4", "captions.srt")) +``` + +### Bug Fixes: + +- Fix code markdown support in `gr.Chatbot()` component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3816](https://github.com/gradio-app/gradio/pull/3816) + +### Documentation Changes: + +- Updates the "view API" page in Gradio apps to use the `gradio_client` library by [@aliabd](https://github.com/aliabd) in [PR 3765](https://github.com/gradio-app/gradio/pull/3765) + +- Read more about how to use the `gradio_client` library here: https://gradio.app/getting-started-with-the-python-client/ + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +No changes to highlight. + +### Contributors Shoutout: + +No changes to highlight. + +## 3.25.0 + +### New Features: + +- Improve error messages when number of inputs/outputs to event handlers mismatch, by [@space-nuko](https://github.com/space-nuko) in [PR 3519](https://github.com/gradio-app/gradio/pull/3519) + +- Add `select` listener to Images, allowing users to click on any part of an image and get the coordinates of the click by [@aliabid94](https://github.com/aliabid94) in [PR 3786](https://github.com/gradio-app/gradio/pull/3786). + +```python +with gr.Blocks() as demo: + img = gr.Image() + textbox = gr.Textbox() + + def select_handler(img, evt: gr.SelectData): + selected_pixel = img[evt.index[1], evt.index[0]] + return f"Selected pixel: {selected_pixel}" + + img.select(select_handler, img, textbox) +``` + +![Recording 2023-04-08 at 17 44 39](https://user-images.githubusercontent.com/7870876/230748572-90a2a8d5-116d-4769-bb53-5516555fbd0f.gif) + +### Bug Fixes: + +- Increase timeout for sending analytics data by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3647](https://github.com/gradio-app/gradio/pull/3647) +- Fix bug where http token was not accessed over websocket connections by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3735](https://github.com/gradio-app/gradio/pull/3735) +- Add ability to specify `rows`, `columns` and `object-fit` in `style()` for `gr.Gallery()` component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3586](https://github.com/gradio-app/gradio/pull/3586) +- Fix bug where recording an audio file through the microphone resulted in a corrupted file name by [@abidlabs](https://github.com/abidlabs) in [PR 3770](https://github.com/gradio-app/gradio/pull/3770) +- Added "ssl_verify" to blocks.launch method to allow for use of self-signed certs by [@garrettsutula](https://github.com/garrettsutula) in [PR 3873](https://github.com/gradio-app/gradio/pull/3873) +- Fix bug where iterators where not being reset for processes that terminated early by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3777](https://github.com/gradio-app/gradio/pull/3777) +- Fix bug where the upload button was not properly handling the `file_count='multiple'` case by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3782](https://github.com/gradio-app/gradio/pull/3782) +- Fix bug where use Via API button was giving error by [@Devang-C](https://github.com/Devang-C) in [PR 3783](https://github.com/gradio-app/gradio/pull/3783) + +### Documentation Changes: + +- Fix invalid argument docstrings, by [@akx](https://github.com/akx) in [PR 3740](https://github.com/gradio-app/gradio/pull/3740) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Fixed IPv6 listening to work with bracket [::1] notation, by [@dsully](https://github.com/dsully) in [PR 3695](https://github.com/gradio-app/gradio/pull/3695) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.24.1 + +### New Features: + +- No changes to highlight. + +### Bug Fixes: + +- Fixes Chatbot issue where new lines were being created every time a message was sent back and forth by [@aliabid94](https://github.com/aliabid94) in [PR 3717](https://github.com/gradio-app/gradio/pull/3717). +- Fixes data updating in DataFrame invoking a `select` event once the dataframe has been selected. By [@yiyuezhuo](https://github.com/yiyuezhuo) in [PR 3861](https://github.com/gradio-app/gradio/pull/3861) +- Fixes false positive warning which is due to too strict type checking by [@yiyuezhuo](https://github.com/yiyuezhuo) in [PR 3837](https://github.com/gradio-app/gradio/pull/3837). + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +No changes to highlight. + +### Contributors Shoutout: + +No changes to highlight. + +## 3.24.0 + +### New Features: + +- Trigger the release event when Slider number input is released or unfocused by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3589](https://github.com/gradio-app/gradio/pull/3589) +- Created Theme Builder, which allows users to create themes without writing any code, by [@aliabid94](https://github.com/aliabid94) in [PR 3664](https://github.com/gradio-app/gradio/pull/3664). Launch by: + + ```python + import gradio as gr + gr.themes.builder() + ``` + + ![Theme Builder](https://user-images.githubusercontent.com/7870876/228204929-d71cbba5-69c2-45b3-bd20-e3a201d98b12.png) + +- The `Dropdown` component now has a `allow_custom_value` parameter that lets users type in custom values not in the original list of choices. +- The `Colorpicker` component now has a `.blur()` event + +###### Added a download button for videos! 📥 + +![download_video](https://user-images.githubusercontent.com/41651716/227009612-9bc5fb72-2a44-4c55-9b7b-a0fa098e7f25.gif) + +By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3581](https://github.com/gradio-app/gradio/pull/3581). + +- Trigger the release event when Slider number input is released or unfocused by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3589](https://github.com/gradio-app/gradio/pull/3589) + +### Bug Fixes: + +- Fixed bug where text for altair plots was not legible in dark mode by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3555](https://github.com/gradio-app/gradio/pull/3555) +- Fixes `Chatbot` and `Image` components so that files passed during processing are added to a directory where they can be served from, by [@abidlabs](https://github.com/abidlabs) in [PR 3523](https://github.com/gradio-app/gradio/pull/3523) +- Use Gradio API server to send telemetry using `huggingface_hub` [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3488](https://github.com/gradio-app/gradio/pull/3488) +- Fixes an an issue where if the Blocks scope was not exited, then State could be shared across sessions, by [@abidlabs](https://github.com/abidlabs) in [PR 3600](https://github.com/gradio-app/gradio/pull/3600) +- Ensures that `gr.load()` loads and applies the upstream theme, by [@abidlabs](https://github.com/abidlabs) in [PR 3641](https://github.com/gradio-app/gradio/pull/3641) +- Fixed bug where "or" was not being localized in file upload text by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3599](https://github.com/gradio-app/gradio/pull/3599) +- Fixed bug where chatbot does not autoscroll inside of a tab, row or column by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3637](https://github.com/gradio-app/gradio/pull/3637) +- Fixed bug where textbox shrinks when `lines` set to larger than 20 by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3637](https://github.com/gradio-app/gradio/pull/3637) +- Ensure CSS has fully loaded before rendering the application, by [@pngwn](https://github.com/pngwn) in [PR 3573](https://github.com/gradio-app/gradio/pull/3573) +- Support using an empty list as `gr.Dataframe` value, by [@space-nuko](https://github.com/space-nuko) in [PR 3646](https://github.com/gradio-app/gradio/pull/3646) +- Fixed `gr.Image` not filling the entire element size, by [@space-nuko](https://github.com/space-nuko) in [PR 3649](https://github.com/gradio-app/gradio/pull/3649) +- Make `gr.Code` support the `lines` property, by [@space-nuko](https://github.com/space-nuko) in [PR 3651](https://github.com/gradio-app/gradio/pull/3651) +- Fixes certain `_js` return values being double wrapped in an array, by [@space-nuko](https://github.com/space-nuko) in [PR 3594](https://github.com/gradio-app/gradio/pull/3594) +- Correct the documentation of `gr.File` component to state that its preprocessing method converts the uploaded file to a temporary file, by @RussellLuo in [PR 3660](https://github.com/gradio-app/gradio/pull/3660) +- Fixed bug in Serializer ValueError text by [@osanseviero](https://github.com/osanseviero) in [PR 3669](https://github.com/gradio-app/gradio/pull/3669) +- Fix default parameter argument and `gr.Progress` used in same function, by [@space-nuko](https://github.com/space-nuko) in [PR 3671](https://github.com/gradio-app/gradio/pull/3671) +- Hide `Remove All` button in `gr.Dropdown` single-select mode by [@space-nuko](https://github.com/space-nuko) in [PR 3678](https://github.com/gradio-app/gradio/pull/3678) +- Fix broken spaces in docs by [@aliabd](https://github.com/aliabd) in [PR 3698](https://github.com/gradio-app/gradio/pull/3698) +- Fix items in `gr.Dropdown` besides the selected item receiving a checkmark, by [@space-nuko](https://github.com/space-nuko) in [PR 3644](https://github.com/gradio-app/gradio/pull/3644) +- Fix several `gr.Dropdown` issues and improve usability, by [@space-nuko](https://github.com/space-nuko) in [PR 3705](https://github.com/gradio-app/gradio/pull/3705) + +### Documentation Changes: + +- Makes some fixes to the Theme Guide related to naming of variables, by [@abidlabs](https://github.com/abidlabs) in [PR 3561](https://github.com/gradio-app/gradio/pull/3561) +- Documented `HuggingFaceDatasetJSONSaver` by [@osanseviero](https://github.com/osanseviero) in [PR 3604](https://github.com/gradio-app/gradio/pull/3604) +- Makes some additions to documentation of `Audio` and `State` components, and fixes the `pictionary` demo by [@abidlabs](https://github.com/abidlabs) in [PR 3611](https://github.com/gradio-app/gradio/pull/3611) +- Fix outdated sharing your app guide by [@aliabd](https://github.com/aliabd) in [PR 3699](https://github.com/gradio-app/gradio/pull/3699) + +### Testing and Infrastructure Changes: + +- Removed heavily-mocked tests related to comet_ml, wandb, and mlflow as they added a significant amount of test dependencies that prevented installation of test dependencies on Windows environments. By [@abidlabs](https://github.com/abidlabs) in [PR 3608](https://github.com/gradio-app/gradio/pull/3608) +- Added Windows continuous integration, by [@space-nuko](https://github.com/space-nuko) in [PR 3628](https://github.com/gradio-app/gradio/pull/3628) +- Switched linting from flake8 + isort to `ruff`, by [@akx](https://github.com/akx) in [PR 3710](https://github.com/gradio-app/gradio/pull/3710) + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Mobile responsive iframes in themes guide by [@aliabd](https://github.com/aliabd) in [PR 3562](https://github.com/gradio-app/gradio/pull/3562) +- Remove extra $demo from theme guide by [@aliabd](https://github.com/aliabd) in [PR 3563](https://github.com/gradio-app/gradio/pull/3563) +- Set the theme name to be the upstream repo name when loading from the hub by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3595](https://github.com/gradio-app/gradio/pull/3595) +- Copy everything in website Dockerfile, fix build issues by [@aliabd](https://github.com/aliabd) in [PR 3659](https://github.com/gradio-app/gradio/pull/3659) +- Raise error when an event is queued but the queue is not configured by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3640](https://github.com/gradio-app/gradio/pull/3640) +- Allows users to apss in a string name for a built-in theme, by [@abidlabs](https://github.com/abidlabs) in [PR 3641](https://github.com/gradio-app/gradio/pull/3641) +- Added `orig_name` to Video output in the backend so that the front end can set the right name for downloaded video files by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3700](https://github.com/gradio-app/gradio/pull/3700) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.23.0 + +### New Features: + +###### Theme Sharing! + +Once you have created a theme, you can upload it to the HuggingFace Hub to let others view it, use it, and build off of it! You can also download, reuse, and remix other peoples' themes. See https://gradio.app/theming-guide/ for more details. + +By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3428](https://github.com/gradio-app/gradio/pull/3428) + +### Bug Fixes: + +- Removes leading spaces from all lines of code uniformly in the `gr.Code()` component. By [@abidlabs](https://github.com/abidlabs) in [PR 3556](https://github.com/gradio-app/gradio/pull/3556) +- Fixed broken login page, by [@aliabid94](https://github.com/aliabid94) in [PR 3529](https://github.com/gradio-app/gradio/pull/3529) + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Fix rendering of dropdowns to take more space, and related bugs, by [@aliabid94](https://github.com/aliabid94) in [PR 3549](https://github.com/gradio-app/gradio/pull/3549) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.22.1 + +### New Features: + +No changes to highlight. + +### Bug Fixes: + +- Restore label bars by [@aliabid94](https://github.com/aliabid94) in [PR 3507](https://github.com/gradio-app/gradio/pull/3507) + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +No changes to highlight. + +### Contributors Shoutout: + +No changes to highlight. + +## 3.22.0 + +### New Features: + +###### Official Theme release + +Gradio now supports a new theme system, which allows you to customize the look and feel of your app. You can now use the `theme=` kwarg to pass in a prebuilt theme, or customize your own! See https://gradio.app/theming-guide/ for more details. By [@aliabid94](https://github.com/aliabid94) in [PR 3470](https://github.com/gradio-app/gradio/pull/3470) and [PR 3497](https://github.com/gradio-app/gradio/pull/3497) + +###### `elem_classes` + +Add keyword argument `elem_classes` to Components to control class names of components, in the same manner as existing `elem_id`. +By [@aliabid94](https://github.com/aliabid94) in [PR 3466](https://github.com/gradio-app/gradio/pull/3466) + +### Bug Fixes: + +- Fixes the File.upload() event trigger which broke as part of the change in how we uploaded files by [@abidlabs](https://github.com/abidlabs) in [PR 3462](https://github.com/gradio-app/gradio/pull/3462) +- Fixed issue with `gr.Request` object failing to handle dictionaries when nested keys couldn't be converted to variable names [#3454](https://github.com/gradio-app/gradio/issues/3454) by [@radames](https://github.com/radames) in [PR 3459](https://github.com/gradio-app/gradio/pull/3459) +- Fixed bug where css and client api was not working properly when mounted in a subpath by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3482](https://github.com/gradio-app/gradio/pull/3482) + +### Documentation Changes: + +- Document gr.Error in the docs by [@aliabd](https://github.com/aliabd) in [PR 3465](https://github.com/gradio-app/gradio/pull/3465) + +### Testing and Infrastructure Changes: + +- Pinned `pyright==1.1.298` for stability by [@abidlabs](https://github.com/abidlabs) in [PR 3475](https://github.com/gradio-app/gradio/pull/3475) +- Removed `IOComponent.add_interactive_to_config()` by [@space-nuko](https://github.com/space-nuko) in [PR 3476](https://github.com/gradio-app/gradio/pull/3476) +- Removed `IOComponent.generate_sample()` by [@space-nuko](https://github.com/space-nuko) in [PR 3475](https://github.com/gradio-app/gradio/pull/3483) + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Revert primary button background color in dark mode by [@aliabid94](https://github.com/aliabid94) in [PR 3468](https://github.com/gradio-app/gradio/pull/3468) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.21.0 + +### New Features: + +###### Theme Sharing 🎨 🤝 + +You can now share your gradio themes with the world! + +After creating a theme, you can upload it to the HuggingFace Hub to let others view it, use it, and build off of it! + +###### Uploading + +There are two ways to upload a theme, via the theme class instance or the command line. + +1. Via the class instance + +```python +my_theme.push_to_hub(repo_name="my_theme", + version="0.2.0", + hf_token="...") +``` + +2. Via the command line + +First save the theme to disk + +```python +my_theme.dump(filename="my_theme.json") +``` + +Then use the `upload_theme` command: + +```bash +upload_theme\ +"my_theme.json"\ +"my_theme"\ +"0.2.0"\ +"" +``` + +The `version` must be a valid [semantic version](https://www.geeksforgeeks.org/introduction-semantic-versioning/) string. + +This creates a space on the huggingface hub to host the theme files and show potential users a preview of your theme. + +An example theme space is here: https://huggingface.co/spaces/freddyaboulton/dracula_revamped + +###### Downloading + +To use a theme from the hub, use the `from_hub` method on the `ThemeClass` and pass it to your app: + +```python +my_theme = gr.Theme.from_hub("freddyaboulton/my_theme") + +with gr.Blocks(theme=my_theme) as demo: + .... +``` + +You can also pass the theme string directly to `Blocks` or `Interface` (`gr.Blocks(theme="freddyaboulton/my_theme")`) + +You can pin your app to an upstream theme version by using semantic versioning expressions. + +For example, the following would ensure the theme we load from the `my_theme` repo was between versions `0.1.0` and `0.2.0`: + +```python +with gr.Blocks(theme="freddyaboulton/my_theme@>=0.1.0,<0.2.0") as demo: + .... +``` + +by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3428](https://github.com/gradio-app/gradio/pull/3428) + +###### Code component 🦾 + +New code component allows you to enter, edit and display code with full syntax highlighting by [@pngwn](https://github.com/pngwn) in [PR 3421](https://github.com/gradio-app/gradio/pull/3421) + +###### The `Chatbot` component now supports audio, video, and images + +The `Chatbot` component now supports audio, video, and images with a simple syntax: simply +pass in a tuple with the URL or filepath (the second optional element of the tuple is alt text), and the image/audio/video will be displayed: + +```python +gr.Chatbot([ + (("driving.mp4",), "cool video"), + (("cantina.wav",), "cool audio"), + (("lion.jpg", "A lion"), "cool pic"), +]).style(height=800) +``` + +image + +Note: images were previously supported via Markdown syntax and that is still supported for backwards compatibility. By [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3413](https://github.com/gradio-app/gradio/pull/3413) + +- Allow consecutive function triggers with `.then` and `.success` by [@aliabid94](https://github.com/aliabid94) in [PR 3430](https://github.com/gradio-app/gradio/pull/3430) + +- New code component allows you to enter, edit and display code with full syntax highlighting by [@pngwn](https://github.com/pngwn) in [PR 3421](https://github.com/gradio-app/gradio/pull/3421) + +![](https://user-images.githubusercontent.com/12937446/224116643-5cfb94b3-93ce-43ee-bb7b-c25c3b66e0a1.png) + +- Added the `.select()` event listener, which also includes event data that can be passed as an argument to a function with type hint `gr.SelectData`. The following components support the `.select()` event listener: Chatbot, CheckboxGroup, Dataframe, Dropdown, File, Gallery, HighlightedText, Label, Radio, TabItem, Tab, Textbox. Example usage: + +```python +import gradio as gr + +with gr.Blocks() as demo: + gallery = gr.Gallery(["images/1.jpg", "images/2.jpg", "images/3.jpg"]) + selected_index = gr.Textbox() + + def on_select(evt: gr.SelectData): + return evt.index + + gallery.select(on_select, None, selected_index) +``` + +By [@aliabid94](https://github.com/aliabid94) in [PR 3399](https://github.com/gradio-app/gradio/pull/3399) + +- The `Textbox` component now includes a copy button by [@abidlabs](https://github.com/abidlabs) in [PR 3452](https://github.com/gradio-app/gradio/pull/3452) + +### Bug Fixes: + +- Use `huggingface_hub` to send telemetry on `interface` and `blocks`; eventually to replace segment by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3342](https://github.com/gradio-app/gradio/pull/3342) +- Ensure load events created by components (randomize for slider, callable values) are never queued unless every is passed by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3391](https://github.com/gradio-app/gradio/pull/3391) +- Prevent in-place updates of `generic_update` by shallow copying by [@gitgithan](https://github.com/gitgithan) in [PR 3405](https://github.com/gradio-app/gradio/pull/3405) to fix [#3282](https://github.com/gradio-app/gradio/issues/3282) +- Fix bug caused by not importing `BlockContext` in `utils.py` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3424](https://github.com/gradio-app/gradio/pull/3424) +- Ensure dropdown does not highlight partial matches by [@pngwn](https://github.com/pngwn) in [PR 3421](https://github.com/gradio-app/gradio/pull/3421) +- Fix mic button display by [@aliabid94](https://github.com/aliabid94) in [PR 3456](https://github.com/gradio-app/gradio/pull/3456) + +### Documentation Changes: + +- Added a section on security and access when sharing Gradio apps by [@abidlabs](https://github.com/abidlabs) in [PR 3408](https://github.com/gradio-app/gradio/pull/3408) +- Add Chinese README by [@uanu2002](https://github.com/uanu2002) in [PR 3394](https://github.com/gradio-app/gradio/pull/3394) +- Adds documentation for web components by [@abidlabs](https://github.com/abidlabs) in [PR 3407](https://github.com/gradio-app/gradio/pull/3407) +- Fixed link in Chinese readme by [@eltociear](https://github.com/eltociear) in [PR 3417](https://github.com/gradio-app/gradio/pull/3417) +- Document Blocks methods by [@aliabd](https://github.com/aliabd) in [PR 3427](https://github.com/gradio-app/gradio/pull/3427) +- Fixed bug where event handlers were not showing up in documentation by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3434](https://github.com/gradio-app/gradio/pull/3434) + +### Testing and Infrastructure Changes: + +- Fixes tests that were failing locally but passing on CI by [@abidlabs](https://github.com/abidlabs) in [PR 3411](https://github.com/gradio-app/gradio/pull/3411) +- Remove codecov from the repo by [@aliabd](https://github.com/aliabd) in [PR 3415](https://github.com/gradio-app/gradio/pull/3415) + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Prevent in-place updates of `generic_update` by shallow copying by [@gitgithan](https://github.com/gitgithan) in [PR 3405](https://github.com/gradio-app/gradio/pull/3405) to fix [#3282](https://github.com/gradio-app/gradio/issues/3282) +- Persist file names of files uploaded through any Gradio component by [@abidlabs](https://github.com/abidlabs) in [PR 3412](https://github.com/gradio-app/gradio/pull/3412) +- Fix markdown embedded component in docs by [@aliabd](https://github.com/aliabd) in [PR 3410](https://github.com/gradio-app/gradio/pull/3410) +- Clean up event listeners code by [@aliabid94](https://github.com/aliabid94) in [PR 3420](https://github.com/gradio-app/gradio/pull/3420) +- Fix css issue with spaces logo by [@aliabd](https://github.com/aliabd) in [PR 3422](https://github.com/gradio-app/gradio/pull/3422) +- Makes a few fixes to the `JSON` component (show_label parameter, icons) in [@abidlabs](https://github.com/abidlabs) in [PR 3451](https://github.com/gradio-app/gradio/pull/3451) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.20.1 + +### New Features: + +- Add `height` kwarg to style in `gr.Chatbot()` component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3369](https://github.com/gradio-app/gradio/pull/3369) + +```python +chatbot = gr.Chatbot().style(height=500) +``` + +### Bug Fixes: + +- Ensure uploaded images are always shown in the sketch tool by [@pngwn](https://github.com/pngwn) in [PR 3386](https://github.com/gradio-app/gradio/pull/3386) +- Fixes bug where when if fn is a non-static class member, then self should be ignored as the first param of the fn by [@or25](https://github.com/or25) in [PR #3227](https://github.com/gradio-app/gradio/pull/3227) + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +No changes to highlight. + +### Contributors Shoutout: + +No changes to highlight. + +## 3.20.0 + +### New Features: + +###### Release event for Slider + +Now you can trigger your python function to run when the slider is released as opposed to every slider change value! + +Simply use the `release` method on the slider + +```python +slider.release(function, inputs=[...], outputs=[...], api_name="predict") +``` + +By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3353](https://github.com/gradio-app/gradio/pull/3353) + +###### Dropdown Component Updates + +The standard dropdown component now supports searching for choices. Also when `multiselect` is `True`, you can specify `max_choices` to set the maximum number of choices you want the user to be able to select from the dropdown component. + +```python +gr.Dropdown(label="Choose your favorite colors", choices=["red", "blue", "green", "yellow", "orange"], multiselect=True, max_choices=2) +``` + +by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3211](https://github.com/gradio-app/gradio/pull/3211) + +###### Download button for images 🖼️ + +Output images will now automatically have a download button displayed to make it easier to save and share +the results of Machine Learning art models. + +![download_sketch](https://user-images.githubusercontent.com/41651716/221025113-e693bf41-eabd-42b3-a4f2-26f2708d98fe.gif) + +By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3297](https://github.com/gradio-app/gradio/pull/3297) + +- Updated image upload component to accept all image formats, including lossless formats like .webp by [@fienestar](https://github.com/fienestar) in [PR 3225](https://github.com/gradio-app/gradio/pull/3225) +- Adds a disabled mode to the `gr.Button` component by setting `interactive=False` by [@abidlabs](https://github.com/abidlabs) in [PR 3266](https://github.com/gradio-app/gradio/pull/3266) and [PR 3288](https://github.com/gradio-app/gradio/pull/3288) +- Adds visual feedback to the when the Flag button is clicked, by [@abidlabs](https://github.com/abidlabs) in [PR 3289](https://github.com/gradio-app/gradio/pull/3289) +- Adds ability to set `flagging_options` display text and saved flag separately by [@abidlabs](https://github.com/abidlabs) in [PR 3289](https://github.com/gradio-app/gradio/pull/3289) +- Allow the setting of `brush_radius` for the `Image` component both as a default and via `Image.update()` by [@pngwn](https://github.com/pngwn) in [PR 3277](https://github.com/gradio-app/gradio/pull/3277) +- Added `info=` argument to form components to enable extra context provided to users, by [@aliabid94](https://github.com/aliabid94) in [PR 3291](https://github.com/gradio-app/gradio/pull/3291) +- Allow developers to access the username of a logged-in user from the `gr.Request()` object using the `.username` attribute by [@abidlabs](https://github.com/abidlabs) in [PR 3296](https://github.com/gradio-app/gradio/pull/3296) +- Add `preview` option to `Gallery.style` that launches the gallery in preview mode when first loaded by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3345](https://github.com/gradio-app/gradio/pull/3345) + +### Bug Fixes: + +- Ensure `mirror_webcam` is always respected by [@pngwn](https://github.com/pngwn) in [PR 3245](https://github.com/gradio-app/gradio/pull/3245) +- Fix issue where updated markdown links were not being opened in a new tab by [@gante](https://github.com/gante) in [PR 3236](https://github.com/gradio-app/gradio/pull/3236) +- API Docs Fixes by [@aliabd](https://github.com/aliabd) in [PR 3287](https://github.com/gradio-app/gradio/pull/3287) +- Added a timeout to queue messages as some demos were experiencing infinitely growing queues from active jobs waiting forever for clients to respond by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3196](https://github.com/gradio-app/gradio/pull/3196) +- Fixes the height of rendered LaTeX images so that they match the height of surrounding text by [@abidlabs](https://github.com/abidlabs) in [PR 3258](https://github.com/gradio-app/gradio/pull/3258) and in [PR 3276](https://github.com/gradio-app/gradio/pull/3276) +- Fix bug where matplotlib images where always too small on the front end by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3274](https://github.com/gradio-app/gradio/pull/3274) +- Remove embed's `initial_height` when loading is complete so the embed finds its natural height once it is loaded [@pngwn](https://github.com/pngwn) in [PR 3292](https://github.com/gradio-app/gradio/pull/3292) +- Prevent Sketch from crashing when a default image is provided by [@pngwn](https://github.com/pngwn) in [PR 3277](https://github.com/gradio-app/gradio/pull/3277) +- Respect the `shape` argument on the front end when creating Image Sketches by [@pngwn](https://github.com/pngwn) in [PR 3277](https://github.com/gradio-app/gradio/pull/3277) +- Fix infinite loop caused by setting `Dropdown's` value to be `[]` and adding a change event on the dropdown by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3295](https://github.com/gradio-app/gradio/pull/3295) +- Fix change event listed twice in image docs by [@aliabd](https://github.com/aliabd) in [PR 3318](https://github.com/gradio-app/gradio/pull/3318) +- Fix bug that cause UI to be vertically centered at all times by [@pngwn](https://github.com/pngwn) in [PR 3336](https://github.com/gradio-app/gradio/pull/3336) +- Fix bug where `height` set in `Gallery.style` was not respected by the front-end by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3343](https://github.com/gradio-app/gradio/pull/3343) +- Ensure markdown lists are rendered correctly by [@pngwn](https://github.com/pngwn) in [PR 3341](https://github.com/gradio-app/gradio/pull/3341) +- Ensure that the initial empty value for `gr.Dropdown(Multiselect=True)` is an empty list and the initial value for `gr.Dropdown(Multiselect=False)` is an empty string by [@pngwn](https://github.com/pngwn) in [PR 3338](https://github.com/gradio-app/gradio/pull/3338) +- Ensure uploaded images respect the shape property when the canvas is also enabled by [@pngwn](https://github.com/pngwn) in [PR 3351](https://github.com/gradio-app/gradio/pull/3351) +- Ensure that Google Analytics works correctly when gradio apps are created with `analytics_enabled=True` by [@abidlabs](https://github.com/abidlabs) in [PR 3349](https://github.com/gradio-app/gradio/pull/3349) +- Fix bug where files were being re-uploaded after updates by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3375](https://github.com/gradio-app/gradio/pull/3375) +- Fix error when using backen_fn and custom js at the same time by [@jialeicui](https://github.com/jialeicui) in [PR 3358](https://github.com/gradio-app/gradio/pull/3358) +- Support new embeds for huggingface spaces subdomains by [@pngwn](https://github.com/pngwn) in [PR 3367](https://github.com/gradio-app/gradio/pull/3367) + +### Documentation Changes: + +- Added the `types` field to the dependency field in the config by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3315](https://github.com/gradio-app/gradio/pull/3315) +- Gradio Status Page by [@aliabd](https://github.com/aliabd) in [PR 3331](https://github.com/gradio-app/gradio/pull/3331) +- Adds a Guide on setting up a dashboard from Supabase data using the `gr.BarPlot` + component by [@abidlabs](https://github.com/abidlabs) in [PR 3275](https://github.com/gradio-app/gradio/pull/3275) + +### Testing and Infrastructure Changes: + +- Adds a script to benchmark the performance of the queue and adds some instructions on how to use it. By [@freddyaboulton](https://github.com/freddyaboulton) and [@abidlabs](https://github.com/abidlabs) in [PR 3272](https://github.com/gradio-app/gradio/pull/3272) +- Flaky python tests no longer cancel non-flaky tests by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3344](https://github.com/gradio-app/gradio/pull/3344) + +### Breaking Changes: + +- Chatbot bubble colors can no longer be set by `chatbot.style(color_map=)` by [@aliabid94] in [PR 3370](https://github.com/gradio-app/gradio/pull/3370) + +### Full Changelog: + +- Fixed comment typo in components.py by [@eltociear](https://github.com/eltociear) in [PR 3235](https://github.com/gradio-app/gradio/pull/3235) +- Cleaned up chatbot ui look and feel by [@aliabid94] in [PR 3370](https://github.com/gradio-app/gradio/pull/3370) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.19.1 + +### New Features: + +No changes to highlight. + +### Bug Fixes: + +- UI fixes including footer and API docs by [@aliabid94](https://github.com/aliabid94) in [PR 3242](https://github.com/gradio-app/gradio/pull/3242) +- Updated image upload component to accept all image formats, including lossless formats like .webp by [@fienestar](https://github.com/fienestar) in [PR 3225](https://github.com/gradio-app/gradio/pull/3225) + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Added backend support for themes by [@aliabid94](https://github.com/aliabid94) in [PR 2931](https://github.com/gradio-app/gradio/pull/2931) +- Added support for button sizes "lg" (default) and "sm". + +### Contributors Shoutout: + +No changes to highlight. + +## 3.19.0 + +### New Features: + +###### Improved embedding experience + +When embedding a spaces-hosted gradio app as a web component, you now get an improved UI linking back to the original space, better error handling and more intelligent load performance. No changes are required to your code to benefit from this enhanced experience; simply upgrade your gradio SDK to the latest version. + +![](https://user-images.githubusercontent.com/12937446/219653294-86937632-72c1-4e93-a77c-af705d49382a.png) + +This behaviour is configurable. You can disable the info panel at the bottom by passing `info="false"`. You can disable the container entirely by passing `container="false"`. + +Error statuses are reported in the UI with an easy way for end-users to report problems to the original space author via the community tab of that Hugginface space: + +![](https://user-images.githubusercontent.com/12937446/219655499-88019443-d694-44e7-9e6d-242e19d10a5c.png) + +By default, gradio apps are lazy loaded, vastly improving performance when there are several demos on the page. Metadata is loaded ahead of time, but the space will only be loaded and rendered when it is in view. + +This behaviour is configurable. You can pass `eager="true"` to load and render the space regardless of whether or not it is currently on the screen. + +by [@pngwn](https://github.com/pngwn) in [PR 3205](https://github.com/gradio-app/gradio/pull/3205) + +###### New `gr.BarPlot` component! 📊 + +Create interactive bar plots from a high-level interface with `gr.BarPlot`. +No need to remember matplotlib syntax anymore! + +Example usage: + +```python +import gradio as gr +import pandas as pd + +simple = pd.DataFrame({ + 'a': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'], + 'b': [28, 55, 43, 91, 81, 53, 19, 87, 52] +}) + +with gr.Blocks() as demo: + gr.BarPlot( + simple, + x="a", + y="b", + title="Simple Bar Plot with made up data", + tooltip=['a', 'b'], + ) + +demo.launch() +``` + +By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3157](https://github.com/gradio-app/gradio/pull/3157) + +###### Bokeh plots are back! 🌠 + +Fixed a bug that prevented bokeh plots from being displayed on the front end and extended support for both 2.x and 3.x versions of bokeh! + +![image](https://user-images.githubusercontent.com/41651716/219468324-0d82e07f-8fb4-4ff9-b40c-8250b29e45f7.png) + +By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3212](https://github.com/gradio-app/gradio/pull/3212) + +### Bug Fixes: + +- Adds ability to add a single message from the bot or user side. Ex: specify `None` as the second value in the tuple, to add a single message in the chatbot from the "bot" side. + +```python +gr.Chatbot([("Hi, I'm DialoGPT. Try asking me a question.", None)]) +``` + +By [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3165](https://github.com/gradio-app/gradio/pull/3165) + +- Fixes `gr.utils.delete_none` to only remove props whose values are `None` from the config by [@abidlabs](https://github.com/abidlabs) in [PR 3188](https://github.com/gradio-app/gradio/pull/3188) +- Fix bug where embedded demos were not loading files properly by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3177](https://github.com/gradio-app/gradio/pull/3177) +- The `change` event is now triggered when users click the 'Clear All' button of the multiselect DropDown component by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3195](https://github.com/gradio-app/gradio/pull/3195) +- Stops File component from freezing when a large file is uploaded by [@aliabid94](https://github.com/aliabid94) in [PR 3191](https://github.com/gradio-app/gradio/pull/3191) +- Support Chinese pinyin in Dataframe by [@aliabid94](https://github.com/aliabid94) in [PR 3206](https://github.com/gradio-app/gradio/pull/3206) +- The `clear` event is now triggered when images are cleared by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3218](https://github.com/gradio-app/gradio/pull/3218) +- Fix bug where auth cookies where not sent when connecting to an app via http by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3223](https://github.com/gradio-app/gradio/pull/3223) +- Ensure latext CSS is always applied in light and dark mode by [@pngwn](https://github.com/pngwn) in [PR 3233](https://github.com/gradio-app/gradio/pull/3233) + +### Documentation Changes: + +- Sort components in docs by alphabetic order by [@aliabd](https://github.com/aliabd) in [PR 3152](https://github.com/gradio-app/gradio/pull/3152) +- Changes to W&B guide by [@scottire](https://github.com/scottire) in [PR 3153](https://github.com/gradio-app/gradio/pull/3153) +- Keep pnginfo metadata for gallery by [@wfng92](https://github.com/wfng92) in [PR 3150](https://github.com/gradio-app/gradio/pull/3150) +- Add a section on how to run a Gradio app locally [@osanseviero](https://github.com/osanseviero) in [PR 3170](https://github.com/gradio-app/gradio/pull/3170) +- Fixed typos in gradio events function documentation by [@vidalmaxime](https://github.com/vidalmaxime) in [PR 3168](https://github.com/gradio-app/gradio/pull/3168) +- Added an example using Gradio's batch mode with the diffusers library by [@abidlabs](https://github.com/abidlabs) in [PR 3224](https://github.com/gradio-app/gradio/pull/3224) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Fix demos page css and add close demos button by [@aliabd](https://github.com/aliabd) in [PR 3151](https://github.com/gradio-app/gradio/pull/3151) +- Caches temp files from base64 input data by giving them a deterministic path based on the contents of data by [@abidlabs](https://github.com/abidlabs) in [PR 3197](https://github.com/gradio-app/gradio/pull/3197) +- Better warnings (when there is a mismatch between the number of output components and values returned by a function, or when the `File` component or `UploadButton` component includes a `file_types` parameter along with `file_count=="dir"`) by [@abidlabs](https://github.com/abidlabs) in [PR 3194](https://github.com/gradio-app/gradio/pull/3194) +- Raises a `gr.Error` instead of a regular Python error when you use `gr.Interface.load()` to load a model and there's an error querying the HF API by [@abidlabs](https://github.com/abidlabs) in [PR 3194](https://github.com/gradio-app/gradio/pull/3194) +- Fixed gradio share links so that they are persistent and do not reset if network + connection is disrupted by by [XciD](https://github.com/XciD), [Wauplin](https://github.com/Wauplin), and [@abidlabs](https://github.com/abidlabs) in [PR 3149](https://github.com/gradio-app/gradio/pull/3149) and a follow-up to allow it to work for users upgrading from a previous Gradio version in [PR 3221](https://github.com/gradio-app/gradio/pull/3221) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.18.0 + +### New Features: + +###### Revamped Stop Button for Interfaces 🛑 + +If your Interface function is a generator, there used to be a separate `Stop` button displayed next +to the `Submit` button. + +We've revamed the `Submit` button so that it turns into a `Stop` button during the generation process. +Clicking on the `Stop` button will cancel the generation and turn it back to a `Submit` button. +The `Stop` button will automatically turn back to a `Submit` button at the end of the generation if you don't use it! + +By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3124](https://github.com/gradio-app/gradio/pull/3124) + +###### Queue now works with reload mode! + +You can now call `queue` on your `demo` outside of the `if __name__ == "__main__"` block and +run the script in reload mode with the `gradio` command. + +Any changes to the `app.py` file will be reflected in the webpage automatically and the queue will work +properly! + +By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3089](https://github.com/gradio-app/gradio/pull/3089) + +###### Allow serving files from additional directories + +```python +demo = gr.Interface(...) +demo.launch( + file_directories=["/var/lib/demo/path/to/resources"] +) +``` + +By [@maxaudron](https://github.com/maxaudron) in [PR 3075](https://github.com/gradio-app/gradio/pull/3075) + +### Bug Fixes: + +- Fixes URL resolution on Windows by [@abidlabs](https://github.com/abidlabs) in [PR 3108](https://github.com/gradio-app/gradio/pull/3108) +- Example caching now works with components without a label attribute (e.g. `Column`) by [@abidlabs](https://github.com/abidlabs) in [PR 3123](https://github.com/gradio-app/gradio/pull/3123) +- Ensure the Video component correctly resets the UI state when a new video source is loaded and reduce choppiness of UI by [@pngwn](https://github.com/abidlabs) in [PR 3117](https://github.com/gradio-app/gradio/pull/3117) +- Fixes loading private Spaces by [@abidlabs](https://github.com/abidlabs) in [PR 3068](https://github.com/gradio-app/gradio/pull/3068) +- Added a warning when attempting to launch an `Interface` via the `%%blocks` jupyter notebook magic command by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3126](https://github.com/gradio-app/gradio/pull/3126) +- Fixes bug where interactive output image cannot be set when in edit mode by [@dawoodkhan82](https://github.com/@dawoodkhan82) in [PR 3135](https://github.com/gradio-app/gradio/pull/3135) +- A share link will automatically be created when running on Sagemaker notebooks so that the front-end is properly displayed by [@abidlabs](https://github.com/abidlabs) in [PR 3137](https://github.com/gradio-app/gradio/pull/3137) +- Fixes a few dropdown component issues; hide checkmark next to options as expected, and keyboard hover is visible by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3145]https://github.com/gradio-app/gradio/pull/3145) +- Fixed bug where example pagination buttons were not visible in dark mode or displayed under the examples table. By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3144](https://github.com/gradio-app/gradio/pull/3144) +- Fixed bug where the font color of axis labels and titles for native plots did not respond to dark mode preferences. By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3146](https://github.com/gradio-app/gradio/pull/3146) + +### Documentation Changes: + +- Added a guide on the 4 kinds of Gradio Interfaces by [@yvrjsharma](https://github.com/yvrjsharma) and [@abidlabs](https://github.com/abidlabs) in [PR 3003](https://github.com/gradio-app/gradio/pull/3003) +- Explained that the parameters in `launch` will not be respected when using reload mode, e.g. `gradio` command by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3089](https://github.com/gradio-app/gradio/pull/3089) +- Added a demo to show how to set up variable numbers of outputs in Gradio by [@abidlabs](https://github.com/abidlabs) in [PR 3127](https://github.com/gradio-app/gradio/pull/3127) +- Updated docs to reflect that the `equal_height` parameter should be passed to the `.style()` method of `gr.Row()` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3125](https://github.com/gradio-app/gradio/pull/3125) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Changed URL of final image for `fake_diffusion` demos by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3120](https://github.com/gradio-app/gradio/pull/3120) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.17.1 + +### New Features: + +###### iOS image rotation fixed 🔄 + +Previously photos uploaded via iOS would be rotated after processing. This has been fixed by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3089](https://github.com/gradio-app/gradio/pull/3091) + +######### Before + +![image](https://user-images.githubusercontent.com/41651716/215846507-a36e9d05-1ac2-4867-8ab3-ce045a9415d9.png) + +######### After + +![image](https://user-images.githubusercontent.com/41651716/215846554-e41773ed-70f0-491a-9952-6a18babf91ef.png) + +###### Run on Kaggle kernels 🧪 + +A share link will automatically be created when running on Kaggle kernels (notebooks) so that the front-end is properly displayed. + +![image](https://user-images.githubusercontent.com/41651716/216104254-2cf55599-449c-436c-b57e-40f6a83f9eee.png) + +By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3101](https://github.com/gradio-app/gradio/pull/3101) + +### Bug Fixes: + +- Fix bug where examples were not rendered correctly for demos created with Blocks api that had multiple input compinents by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3090](https://github.com/gradio-app/gradio/pull/3090) +- Fix change event listener for JSON, HighlightedText, Chatbot by [@aliabid94](https://github.com/aliabid94) in [PR 3095](https://github.com/gradio-app/gradio/pull/3095) +- Fixes bug where video and file change event not working [@tomchang25](https://github.com/tomchang25) in [PR 3098](https://github.com/gradio-app/gradio/pull/3098) +- Fixes bug where static_video play and pause event not working [@tomchang25](https://github.com/tomchang25) in [PR 3098](https://github.com/gradio-app/gradio/pull/3098) +- Fixed `Gallery.style(grid=...)` by by [@aliabd](https://github.com/aliabd) in [PR 3107](https://github.com/gradio-app/gradio/pull/3107) + +### Documentation Changes: + +- Update chatbot guide to include blocks demo and markdown support section by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3023](https://github.com/gradio-app/gradio/pull/3023) + +* Fix a broken link in the Quick Start guide, by [@cakiki](https://github.com/cakiki) in [PR 3109](https://github.com/gradio-app/gradio/pull/3109) +* Better docs navigation on mobile by [@aliabd](https://github.com/aliabd) in [PR 3112](https://github.com/gradio-app/gradio/pull/3112) +* Add a guide on using Gradio with [Comet](https://comet.com/), by [@DN6](https://github.com/DN6/) in [PR 3058](https://github.com/gradio-app/gradio/pull/3058) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Set minimum `markdown-it-py` version to `2.0.0` so that the dollar math plugin is compatible by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3102](https://github.com/gradio-app/gradio/pull/3102) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.17.0 + +### New Features: + +###### Extended support for Interface.load! 🏗️ + +You can now load `image-to-text` and `conversational` pipelines from the hub! + +###### Image-to-text Demo + +```python +io = gr.Interface.load("models/nlpconnect/vit-gpt2-image-captioning", + api_key="") +io.launch() +``` + +image + +###### conversational Demo + +```python +chatbot = gr.Interface.load("models/microsoft/DialoGPT-medium", + api_key="") +chatbot.launch() +``` + +![chatbot_load](https://user-images.githubusercontent.com/41651716/213260220-3eaa25b7-a38b-48c6-adeb-2718bdf297a2.gif) + +By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3011](https://github.com/gradio-app/gradio/pull/3011) + +###### Download Button added to Model3D Output Component 📥 + +No need for an additional file output component to enable model3d file downloads anymore. We now added a download button to the model3d component itself. + +Screenshot 2023-01-18 at 3 52 45 PM + +By [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3014](https://github.com/gradio-app/gradio/pull/3014) + +###### Fixing Auth on Spaces 🔑 + +Authentication on spaces works now! Third party cookies must be enabled on your browser to be able +to log in. Some browsers disable third party cookies by default (Safari, Chrome Incognito). + +![auth_spaces](https://user-images.githubusercontent.com/41651716/215528417-09538933-0576-4d1d-b3b9-1e877ab01905.gif) + +### Bug Fixes: + +- Fixes bug where interpretation event was not configured correctly by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2993](https://github.com/gradio-app/gradio/pull/2993) +- Fix relative import bug in reload mode by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2992](https://github.com/gradio-app/gradio/pull/2992) +- Fixes bug where png files were not being recognized when uploading images by [@abidlabs](https://github.com/abidlabs) in [PR 3002](https://github.com/gradio-app/gradio/pull/3002) +- Fixes bug where external Spaces could not be loaded and used as functions if they returned files by [@abidlabs](https://github.com/abidlabs) in [PR 3004](https://github.com/gradio-app/gradio/pull/3004) +- Fix bug where file serialization output was not JSON serializable by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2999](https://github.com/gradio-app/gradio/pull/2999) +- Fixes bug where png files were not being recognized when uploading images by [@abidlabs](https://github.com/abidlabs) in [PR 3002](https://github.com/gradio-app/gradio/pull/3002) +- Fixes bug where temporary uploaded files were not being added to temp sets by [@abidlabs](https://github.com/abidlabs) in [PR 3005](https://github.com/gradio-app/gradio/pull/3005) +- Fixes issue where markdown support in chatbot breaks older demos [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3006](https://github.com/gradio-app/gradio/pull/3006) +- Fixes the `/file/` route that was broken in a recent change in [PR 3010](https://github.com/gradio-app/gradio/pull/3010) +- Fix bug where the Image component could not serialize image urls by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2957](https://github.com/gradio-app/gradio/pull/2957) +- Fix forwarding for guides after SEO renaming by [@aliabd](https://github.com/aliabd) in [PR 3017](https://github.com/gradio-app/gradio/pull/3017) +- Switch all pages on the website to use latest stable gradio by [@aliabd](https://github.com/aliabd) in [PR 3016](https://github.com/gradio-app/gradio/pull/3016) +- Fix bug related to deprecated parameters in `huggingface_hub` for the HuggingFaceDatasetSaver in [PR 3025](https://github.com/gradio-app/gradio/pull/3025) +- Added better support for symlinks in the way absolute paths are resolved by [@abidlabs](https://github.com/abidlabs) in [PR 3037](https://github.com/gradio-app/gradio/pull/3037) +- Fix several minor frontend bugs (loading animation, examples as gallery) frontend [@aliabid94](https://github.com/3026) in [PR 2961](https://github.com/gradio-app/gradio/pull/3026). +- Fixes bug that the chatbot sample code does not work with certain input value by [@petrov826](https://github.com/petrov826) in [PR 3039](https://github.com/gradio-app/gradio/pull/3039). +- Fix shadows for form element and ensure focus styles more visible in dark mode [@pngwn](https://github.com/pngwn) in [PR 3042](https://github.com/gradio-app/gradio/pull/3042). +- Fixed bug where the Checkbox and Dropdown change events were not triggered in response to other component changes by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3045](https://github.com/gradio-app/gradio/pull/3045) +- Fix bug where the queue was not properly restarted after launching a `closed` app by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3022](https://github.com/gradio-app/gradio/pull/3022) +- Adding missing embedded components on docs by [@aliabd](https://github.com/aliabd) in [PR 3027](https://github.com/gradio-app/gradio/pull/3027) +- Fixes bug where app would crash if the `file_types` parameter of `gr.File` or `gr.UploadButton` was not a list by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3048](https://github.com/gradio-app/gradio/pull/3048) +- Ensure CSS mounts correctly regardless of how many Gradio instances are on the page [@pngwn](https://github.com/pngwn) in [PR 3059](https://github.com/gradio-app/gradio/pull/3059). +- Fix bug where input component was not hidden in the frontend for `UploadButton` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3053](https://github.com/gradio-app/gradio/pull/3053) +- Fixes issue where after clicking submit or undo, the sketch output wouldn't clear. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3047](https://github.com/gradio-app/gradio/pull/3047) +- Ensure spaces embedded via the web component always use the correct URLs for server requests and change ports for testing to avoid strange collisions when users are working with embedded apps locally by [@pngwn](https://github.com/pngwn) in [PR 3065](https://github.com/gradio-app/gradio/pull/3065) +- Preserve selected image of Gallery through updated by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3061](https://github.com/gradio-app/gradio/pull/3061) +- Fix bug where auth was not respected on HF spaces by [@freddyaboulton](https://github.com/freddyaboulton) and [@aliabid94](https://github.com/aliabid94) in [PR 3049](https://github.com/gradio-app/gradio/pull/3049) +- Fixes bug where tabs selected attribute not working if manually change tab by [@tomchang25](https://github.com/tomchang25) in [3055](https://github.com/gradio-app/gradio/pull/3055) +- Change chatbot to show dots on progress, and fix bug where chatbot would not stick to bottom in the case of images by [@aliabid94](https://github.com/aliabid94) in [PR 3067](https://github.com/gradio-app/gradio/pull/3079) + +### Documentation Changes: + +- SEO improvements to guides by[@aliabd](https://github.com/aliabd) in [PR 2915](https://github.com/gradio-app/gradio/pull/2915) +- Use `gr.LinePlot` for the `blocks_kinematics` demo by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2998](https://github.com/gradio-app/gradio/pull/2998) +- Updated the `interface_series_load` to include some inline markdown code by [@abidlabs](https://github.com/abidlabs) in [PR 3051](https://github.com/gradio-app/gradio/pull/3051) + +### Testing and Infrastructure Changes: + +- Adds a GitHub action to test if any large files (> 5MB) are present by [@abidlabs](https://github.com/abidlabs) in [PR 3013](https://github.com/gradio-app/gradio/pull/3013) + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Rewrote frontend using CSS variables for themes by [@pngwn](https://github.com/pngwn) in [PR 2840](https://github.com/gradio-app/gradio/pull/2840) +- Moved telemetry requests to run on background threads by [@abidlabs](https://github.com/abidlabs) in [PR 3054](https://github.com/gradio-app/gradio/pull/3054) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.16.2 + +### New Features: + +No changes to highlight. + +### Bug Fixes: + +- Fixed file upload fails for files with zero size by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2923](https://github.com/gradio-app/gradio/pull/2923) +- Fixed bug where `mount_gradio_app` would not launch if the queue was enabled in a gradio app by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2939](https://github.com/gradio-app/gradio/pull/2939) +- Fix custom long CSS handling in Blocks by [@anton-l](https://github.com/anton-l) in [PR 2953](https://github.com/gradio-app/gradio/pull/2953) +- Recovers the dropdown change event by [@abidlabs](https://github.com/abidlabs) in [PR 2954](https://github.com/gradio-app/gradio/pull/2954). +- Fix audio file output by [@aliabid94](https://github.com/aliabid94) in [PR 2961](https://github.com/gradio-app/gradio/pull/2961). +- Fixed bug where file extensions of really long files were not kept after download by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2929](https://github.com/gradio-app/gradio/pull/2929) +- Fix bug where outputs for examples where not being returned by the backend by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2955](https://github.com/gradio-app/gradio/pull/2955) +- Fix bug in `blocks_plug` demo that prevented switching tabs programmatically with python [@TashaSkyUp](https://github.com/https://github.com/TashaSkyUp) in [PR 2971](https://github.com/gradio-app/gradio/pull/2971). + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +No changes to highlight. + +### Contributors Shoutout: + +No changes to highlight. + +## 3.16.1 + +### New Features: + +No changes to highlight. + +### Bug Fixes: + +- Fix audio file output by [@aliabid94](https://github.com/aliabid94) in [PR 2950](https://github.com/gradio-app/gradio/pull/2950). + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +No changes to highlight. + +### Contributors Shoutout: + +No changes to highlight. + +## 3.16.0 + +### New Features: + +###### Send custom progress updates by adding a `gr.Progress` argument after the input arguments to any function. Example: + +```python +def reverse(word, progress=gr.Progress()): + progress(0, desc="Starting") + time.sleep(1) + new_string = "" + for letter in progress.tqdm(word, desc="Reversing"): + time.sleep(0.25) + new_string = letter + new_string + return new_string + +demo = gr.Interface(reverse, gr.Text(), gr.Text()) +``` + +Progress indicator bar by [@aliabid94](https://github.com/aliabid94) in [PR 2750](https://github.com/gradio-app/gradio/pull/2750). + +- Added `title` argument to `TabbedInterface` by @MohamedAliRashad in [#2888](https://github.com/gradio-app/gradio/pull/2888) +- Add support for specifying file extensions for `gr.File` and `gr.UploadButton`, using `file_types` parameter (e.g `gr.File(file_count="multiple", file_types=["text", ".json", ".csv"])`) by @dawoodkhan82 in [#2901](https://github.com/gradio-app/gradio/pull/2901) +- Added `multiselect` option to `Dropdown` by @dawoodkhan82 in [#2871](https://github.com/gradio-app/gradio/pull/2871) + +###### With `multiselect` set to `true` a user can now select multiple options from the `gr.Dropdown` component. + +```python +gr.Dropdown(["angola", "pakistan", "canada"], multiselect=True, value=["angola"]) +``` + +Screenshot 2023-01-03 at 4 14 36 PM + +### Bug Fixes: + +- Fixed bug where an error opening an audio file led to a crash by [@FelixDombek](https://github.com/FelixDombek) in [PR 2898](https://github.com/gradio-app/gradio/pull/2898) +- Fixed bug where setting `default_enabled=False` made it so that the entire queue did not start by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2876](https://github.com/gradio-app/gradio/pull/2876) +- Fixed bug where csv preview for DataFrame examples would show filename instead of file contents by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2877](https://github.com/gradio-app/gradio/pull/2877) +- Fixed bug where an error raised after yielding iterative output would not be displayed in the browser by + [@JaySmithWpg](https://github.com/JaySmithWpg) in [PR 2889](https://github.com/gradio-app/gradio/pull/2889) +- Fixed bug in `blocks_style` demo that was preventing it from launching by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2890](https://github.com/gradio-app/gradio/pull/2890) +- Fixed bug where files could not be downloaded by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2926](https://github.com/gradio-app/gradio/pull/2926) +- Fixed bug where cached examples were not displaying properly by [@a-rogalska](https://github.com/a-rogalska) in [PR 2974](https://github.com/gradio-app/gradio/pull/2974) + +### Documentation Changes: + +- Added a Guide on using Google Sheets to create a real-time dashboard with Gradio's `DataFrame` and `LinePlot` component, by [@abidlabs](https://github.com/abidlabs) in [PR 2816](https://github.com/gradio-app/gradio/pull/2816) +- Add a components - events matrix on the docs by [@aliabd](https://github.com/aliabd) in [PR 2921](https://github.com/gradio-app/gradio/pull/2921) + +### Testing and Infrastructure Changes: + +- Deployed PRs from forks to spaces by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2895](https://github.com/gradio-app/gradio/pull/2895) + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- The `default_enabled` parameter of the `Blocks.queue` method has no effect by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2876](https://github.com/gradio-app/gradio/pull/2876) +- Added typing to several Python files in codebase by [@abidlabs](https://github.com/abidlabs) in [PR 2887](https://github.com/gradio-app/gradio/pull/2887) +- Excluding untracked files from demo notebook check action by [@aliabd](https://github.com/aliabd) in [PR 2897](https://github.com/gradio-app/gradio/pull/2897) +- Optimize images and gifs by [@aliabd](https://github.com/aliabd) in [PR 2922](https://github.com/gradio-app/gradio/pull/2922) +- Updated typing by [@1nF0rmed](https://github.com/1nF0rmed) in [PR 2904](https://github.com/gradio-app/gradio/pull/2904) + +### Contributors Shoutout: + +- @JaySmithWpg for making their first contribution to gradio! +- @MohamedAliRashad for making their first contribution to gradio! + +## 3.15.0 + +### New Features: + +Gradio's newest plotting component `gr.LinePlot`! 📈 + +With this component you can easily create time series visualizations with customizable +appearance for your demos and dashboards ... all without having to know an external plotting library. + +For an example of the api see below: + +```python +gr.LinePlot(stocks, + x="date", + y="price", + color="symbol", + color_legend_position="bottom", + width=600, height=400, title="Stock Prices") +``` + +![image](https://user-images.githubusercontent.com/41651716/208711646-81ae3745-149b-46a3-babd-0569aecdd409.png) + +By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2807](https://github.com/gradio-app/gradio/pull/2807) + +### Bug Fixes: + +- Fixed bug where the `examples_per_page` parameter of the `Examples` component was not passed to the internal `Dataset` component by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2861](https://github.com/gradio-app/gradio/pull/2861) +- Fixes loading Spaces that have components with default values by [@abidlabs](https://github.com/abidlabs) in [PR 2855](https://github.com/gradio-app/gradio/pull/2855) +- Fixes flagging when `allow_flagging="auto"` in `gr.Interface()` by [@abidlabs](https://github.com/abidlabs) in [PR 2695](https://github.com/gradio-app/gradio/pull/2695) +- Fixed bug where passing a non-list value to `gr.CheckboxGroup` would crash the entire app by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2866](https://github.com/gradio-app/gradio/pull/2866) + +### Documentation Changes: + +- Added a Guide on using BigQuery with Gradio's `DataFrame` and `ScatterPlot` component, + by [@abidlabs](https://github.com/abidlabs) in [PR 2794](https://github.com/gradio-app/gradio/pull/2794) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Fixed importing gradio can cause PIL.Image.registered_extensions() to break by `[@aliencaocao](https://github.com/aliencaocao)` in `[PR 2846](https://github.com/gradio-app/gradio/pull/2846)` +- Fix css glitch and navigation in docs by [@aliabd](https://github.com/aliabd) in [PR 2856](https://github.com/gradio-app/gradio/pull/2856) +- Added the ability to set `x_lim`, `y_lim` and legend positions for `gr.ScatterPlot` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2807](https://github.com/gradio-app/gradio/pull/2807) +- Remove footers and min-height the correct way by [@aliabd](https://github.com/aliabd) in [PR 2860](https://github.com/gradio-app/gradio/pull/2860) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.14.0 + +### New Features: + +###### Add Waveform Visual Support to Audio + +Adds a `gr.make_waveform()` function that creates a waveform video by combining an audio and an optional background image by [@dawoodkhan82](http://github.com/dawoodkhan82) and [@aliabid94](http://github.com/aliabid94) in [PR 2706](https://github.com/gradio-app/gradio/pull/2706. Helpful for making audio outputs much more shareable. + +![waveform screenrecording](https://user-images.githubusercontent.com/7870876/206062396-164a5e71-451a-4fe0-94a7-cbe9269d57e6.gif) + +###### Allows Every Component to Accept an `every` Parameter + +When a component's initial value is a function, the `every` parameter re-runs the function every `every` seconds. By [@abidlabs](https://github.com/abidlabs) in [PR 2806](https://github.com/gradio-app/gradio/pull/2806). Here's a code example: + +```py +import gradio as gr + +with gr.Blocks() as demo: + df = gr.DataFrame(run_query, every=60*60) + +demo.queue().launch() +``` + +### Bug Fixes: + +- Fixed issue where too many temporary files were created, all with randomly generated + filepaths. Now fewer temporary files are created and are assigned a path that is a + hash based on the file contents by [@abidlabs](https://github.com/abidlabs) in [PR 2758](https://github.com/gradio-app/gradio/pull/2758) + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +No changes to highlight. + +### Contributors Shoutout: + +No changes to highlight. + +## 3.13.2 + +### New Features: + +No changes to highlight. + +### Bug Fixes: + +\*No changes to highlight. + +- + +### Documentation Changes: + +- Improves documentation of several queuing-related parameters by [@abidlabs](https://github.com/abidlabs) in [PR 2825](https://github.com/gradio-app/gradio/pull/2825) + +### Testing and Infrastructure Changes: + +- Remove h11 pinning by [@ecederstrand](https://github.com/ecederstrand) in [PR 2820](https://github.com/gradio-app/gradio/pull/2820) + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +No changes to highlight. + +### Contributors Shoutout: + +No changes to highlight. + +## 3.13.1 + +### New Features: + +###### New Shareable Links + +Replaces tunneling logic based on ssh port-forwarding to that based on `frp` by [XciD](https://github.com/XciD) and [Wauplin](https://github.com/Wauplin) in [PR 2509](https://github.com/gradio-app/gradio/pull/2509) + +You don't need to do anything differently, but when you set `share=True` in `launch()`, +you'll get this message and a public link that look a little bit different: + +```bash +Setting up a public link... we have recently upgraded the way public links are generated. If you encounter any problems, please downgrade to gradio version 3.13.0 +. +Running on public URL: https://bec81a83-5b5c-471e.gradio.live +``` + +These links are a more secure and scalable way to create shareable demos! + +### Bug Fixes: + +- Allows `gr.Dataframe()` to take a `pandas.DataFrame` that includes numpy array and other types as its initial value, by [@abidlabs](https://github.com/abidlabs) in [PR 2804](https://github.com/gradio-app/gradio/pull/2804) +- Add `altair` to requirements.txt by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2811](https://github.com/gradio-app/gradio/pull/2811) +- Added aria-labels to icon buttons that are built into UI components by [@emilyuhde](http://github.com/emilyuhde) in [PR 2791](https://github.com/gradio-app/gradio/pull/2791) + +### Documentation Changes: + +- Fixed some typos in the "Plot Component for Maps" guide by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2811](https://github.com/gradio-app/gradio/pull/2811) + +### Testing and Infrastructure Changes: + +- Fixed test for IP address by [@abidlabs](https://github.com/abidlabs) in [PR 2808](https://github.com/gradio-app/gradio/pull/2808) + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Fixed typo in parameter `visible` in classes in `templates.py` by [@abidlabs](https://github.com/abidlabs) in [PR 2805](https://github.com/gradio-app/gradio/pull/2805) +- Switched external service for getting IP address from `https://api.ipify.org` to `https://checkip.amazonaws.com/` by [@abidlabs](https://github.com/abidlabs) in [PR 2810](https://github.com/gradio-app/gradio/pull/2810) + +### Contributors Shoutout: + +No changes to highlight. + +- Fixed typo in parameter `visible` in classes in `templates.py` by [@abidlabs](https://github.com/abidlabs) in [PR 2805](https://github.com/gradio-app/gradio/pull/2805) +- Switched external service for getting IP address from `https://api.ipify.org` to `https://checkip.amazonaws.com/` by [@abidlabs](https://github.com/abidlabs) in [PR 2810](https://github.com/gradio-app/gradio/pull/2810) + +## 3.13.0 + +### New Features: + +###### Scatter plot component + +It is now possible to create a scatter plot natively in Gradio! + +The `gr.ScatterPlot` component accepts a pandas dataframe and some optional configuration parameters +and will automatically create a plot for you! + +This is the first of many native plotting components in Gradio! + +For an example of how to use `gr.ScatterPlot` see below: + +```python +import gradio as gr +from vega_datasets import data + +cars = data.cars() + +with gr.Blocks() as demo: + gr.ScatterPlot(show_label=False, + value=cars, + x="Horsepower", + y="Miles_per_Gallon", + color="Origin", + tooltip="Name", + title="Car Data", + y_title="Miles per Gallon", + color_legend_title="Origin of Car").style(container=False) + +demo.launch() +``` + +image + +By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2764](https://github.com/gradio-app/gradio/pull/2764) + +###### Support for altair plots + +The `Plot` component can now accept altair plots as values! +Simply return an altair plot from your event listener and gradio will display it in the front-end. +See the example below: + +```python +import gradio as gr +import altair as alt +from vega_datasets import data + +cars = data.cars() +chart = ( + alt.Chart(cars) + .mark_point() + .encode( + x="Horsepower", + y="Miles_per_Gallon", + color="Origin", + ) +) + +with gr.Blocks() as demo: + gr.Plot(value=chart) +demo.launch() +``` + +image + +By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2741](https://github.com/gradio-app/gradio/pull/2741) + +###### Set the background color of a Label component + +The `Label` component now accepts a `color` argument by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2736](https://github.com/gradio-app/gradio/pull/2736). +The `color` argument should either be a valid css color name or hexadecimal string. +You can update the color with `gr.Label.update`! + +This lets you create Alert and Warning boxes with the `Label` component. See below: + +```python +import gradio as gr +import random + +def update_color(value): + if value < 0: + # This is bad so use red + return "#FF0000" + elif 0 <= value <= 20: + # Ok but pay attention (use orange) + return "#ff9966" + else: + # Nothing to worry about + return None + +def update_value(): + choice = random.choice(['good', 'bad', 'so-so']) + color = update_color(choice) + return gr.Label.update(value=choice, color=color) + + +with gr.Blocks() as demo: + label = gr.Label(value=-10) + demo.load(lambda: update_value(), inputs=None, outputs=[label], every=1) +demo.queue().launch() +``` + +![label_bg_color_update](https://user-images.githubusercontent.com/41651716/204400372-80e53857-f26f-4a38-a1ae-1acadff75e89.gif) + +###### Add Brazilian Portuguese translation + +Add Brazilian Portuguese translation (pt-BR.json) by [@pstwh](http://github.com/pstwh) in [PR 2753](https://github.com/gradio-app/gradio/pull/2753): + +image + +### Bug Fixes: + +- Fixed issue where image thumbnails were not showing when an example directory was provided + by [@abidlabs](https://github.com/abidlabs) in [PR 2745](https://github.com/gradio-app/gradio/pull/2745) +- Fixed bug loading audio input models from the hub by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2779](https://github.com/gradio-app/gradio/pull/2779). +- Fixed issue where entities were not merged when highlighted text was generated from the + dictionary inputs [@payoto](https://github.com/payoto) in [PR 2767](https://github.com/gradio-app/gradio/pull/2767) +- Fixed bug where generating events did not finish running even if the websocket connection was closed by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2783](https://github.com/gradio-app/gradio/pull/2783). + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Images in the chatbot component are now resized if they exceed a max width by [@abidlabs](https://github.com/abidlabs) in [PR 2748](https://github.com/gradio-app/gradio/pull/2748) +- Missing parameters have been added to `gr.Blocks().load()` by [@abidlabs](https://github.com/abidlabs) in [PR 2755](https://github.com/gradio-app/gradio/pull/2755) +- Deindex share URLs from search by [@aliabd](https://github.com/aliabd) in [PR 2772](https://github.com/gradio-app/gradio/pull/2772) +- Redirect old links and fix broken ones by [@aliabd](https://github.com/aliabd) in [PR 2774](https://github.com/gradio-app/gradio/pull/2774) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.12.0 + +### New Features: + +###### The `Chatbot` component now supports a subset of Markdown (including bold, italics, code, images) + +You can now pass in some Markdown to the Chatbot component and it will show up, +meaning that you can pass in images as well! by [@abidlabs](https://github.com/abidlabs) in [PR 2731](https://github.com/gradio-app/gradio/pull/2731) + +Here's a simple example that references a local image `lion.jpg` that is in the same +folder as the Python script: + +```py +import gradio as gr + +with gr.Blocks() as demo: + gr.Chatbot([("hi", "hello **abubakar**"), ("![](/file=lion.jpg)", "cool pic")]) + +demo.launch() +``` + +![Alt text](https://user-images.githubusercontent.com/1778297/204357455-5c1a4002-eee7-479d-9a1e-ba2c12522723.png) + +To see a more realistic example, see the new demo `/demo/chatbot_multimodal/run.py`. + +###### Latex support + +Added mathtext (a subset of latex) support to gr.Markdown. Added by [@kashif](https://github.com/kashif) and [@aliabid94](https://github.com/aliabid94) in [PR 2696](https://github.com/gradio-app/gradio/pull/2696). + +Example of how it can be used: + +```python +gr.Markdown( + r""" + # Hello World! $\frac{\sqrt{x + y}}{4}$ is today's lesson. + """) +``` + +###### Update Accordion properties from the backend + +You can now update the Accordion `label` and `open` status with `gr.Accordion.update` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2690](https://github.com/gradio-app/gradio/pull/2690) + +```python +import gradio as gr + +with gr.Blocks() as demo: + with gr.Accordion(label="Open for greeting", open=False) as accordion: + gr.Textbox("Hello!") + open_btn = gr.Button(value="Open Accordion") + close_btn = gr.Button(value="Close Accordion") + open_btn.click( + lambda: gr.Accordion.update(open=True, label="Open Accordion"), + inputs=None, + outputs=[accordion], + ) + close_btn.click( + lambda: gr.Accordion.update(open=False, label="Closed Accordion"), + inputs=None, + outputs=[accordion], + ) +demo.launch() +``` + +![update_accordion](https://user-images.githubusercontent.com/41651716/203164176-b102eae3-babe-4986-ae30-3ab4f400cedc.gif) + +### Bug Fixes: + +- Fixed bug where requests timeout is missing from utils.version_check() by [@yujiehecs](https://github.com/yujiehecs) in [PR 2729](https://github.com/gradio-app/gradio/pull/2729) +- Fixed bug where so that the `File` component can properly preprocess files to "binary" byte-string format by [CoffeeVampir3](https://github.com/CoffeeVampir3) in [PR 2727](https://github.com/gradio-app/gradio/pull/2727) +- Fixed bug to ensure that filenames are less than 200 characters even for non-English languages by [@SkyTNT](https://github.com/SkyTNT) in [PR 2685](https://github.com/gradio-app/gradio/pull/2685) + +### Documentation Changes: + +- Performance improvements to docs on mobile by [@aliabd](https://github.com/aliabd) in [PR 2730](https://github.com/gradio-app/gradio/pull/2730) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Make try examples button more prominent by [@aliabd](https://github.com/aliabd) in [PR 2705](https://github.com/gradio-app/gradio/pull/2705) +- Fix id clashes in docs by [@aliabd](https://github.com/aliabd) in [PR 2713](https://github.com/gradio-app/gradio/pull/2713) +- Fix typos in guide docs by [@andridns](https://github.com/andridns) in [PR 2722](https://github.com/gradio-app/gradio/pull/2722) +- Add option to `include_audio` in Video component. When `True`, for `source="webcam"` this will record audio and video, for `source="upload"` this will retain the audio in an uploaded video by [@mandargogate](https://github.com/MandarGogate) in [PR 2721](https://github.com/gradio-app/gradio/pull/2721) + +### Contributors Shoutout: + +- [@andridns](https://github.com/andridns) made their first contribution in [PR 2722](https://github.com/gradio-app/gradio/pull/2722)! + +## 3.11.0 + +### New Features: + +###### Upload Button + +There is now a new component called the `UploadButton` which is a file upload component but in button form! You can also specify what file types it should accept in the form of a list (ex: `image`, `video`, `audio`, `text`, or generic `file`). Added by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2591](https://github.com/gradio-app/gradio/pull/2591). + +Example of how it can be used: + +```python +import gradio as gr + +def upload_file(files): + file_paths = [file.name for file in files] + return file_paths + +with gr.Blocks() as demo: + file_output = gr.File() + upload_button = gr.UploadButton("Click to Upload a File", file_types=["image", "video"], file_count="multiple") + upload_button.upload(upload_file, upload_button, file_output) + +demo.launch() +``` + +###### Revamped API documentation page + +New API Docs page with in-browser playground and updated aesthetics. [@gary149](https://github.com/gary149) in [PR 2652](https://github.com/gradio-app/gradio/pull/2652) + +###### Revamped Login page + +Previously our login page had its own CSS, had no dark mode, and had an ugly json message on the wrong credentials. Made the page more aesthetically consistent, added dark mode support, and a nicer error message. [@aliabid94](https://github.com/aliabid94) in [PR 2684](https://github.com/gradio-app/gradio/pull/2684) + +###### Accessing the Requests Object Directly + +You can now access the Request object directly in your Python function by [@abidlabs](https://github.com/abidlabs) in [PR 2641](https://github.com/gradio-app/gradio/pull/2641). This means that you can access request headers, the client IP address, and so on. In order to use it, add a parameter to your function and set its type hint to be `gr.Request`. Here's a simple example: + +```py +import gradio as gr + +def echo(name, request: gr.Request): + if request: + print("Request headers dictionary:", request.headers) + print("IP address:", request.client.host) + return name + +io = gr.Interface(echo, "textbox", "textbox").launch() +``` + +### Bug Fixes: + +- Fixed bug that limited files from being sent over websockets to 16MB. The new limit + is now 1GB by [@abidlabs](https://github.com/abidlabs) in [PR 2709](https://github.com/gradio-app/gradio/pull/2709) + +### Documentation Changes: + +- Updated documentation for embedding Gradio demos on Spaces as web components by + [@julien-c](https://github.com/julien-c) in [PR 2698](https://github.com/gradio-app/gradio/pull/2698) +- Updated IFrames in Guides to use the host URL instead of the Space name to be consistent with the new method for embedding Spaces, by + [@julien-c](https://github.com/julien-c) in [PR 2692](https://github.com/gradio-app/gradio/pull/2692) +- Colab buttons on every demo in the website! Just click open in colab, and run the demo there. + +https://user-images.githubusercontent.com/9021060/202878400-cb16ed47-f4dd-4cb0-b2f0-102a9ff64135.mov + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Better warnings and error messages for `gr.Interface.load()` by [@abidlabs](https://github.com/abidlabs) in [PR 2694](https://github.com/gradio-app/gradio/pull/2694) +- Add open in colab buttons to demos in docs and /demos by [@aliabd](https://github.com/aliabd) in [PR 2608](https://github.com/gradio-app/gradio/pull/2608) +- Apply different formatting for the types in component docstrings by [@aliabd](https://github.com/aliabd) in [PR 2707](https://github.com/gradio-app/gradio/pull/2707) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.10.1 + +### New Features: + +No changes to highlight. + +### Bug Fixes: + +- Passes kwargs into `gr.Interface.load()` by [@abidlabs](https://github.com/abidlabs) in [PR 2669](https://github.com/gradio-app/gradio/pull/2669) + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Clean up printed statements in Embedded Colab Mode by [@aliabid94](https://github.com/aliabid94) in [PR 2612](https://github.com/gradio-app/gradio/pull/2612) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.10.0 + +- Add support for `'password'` and `'email'` types to `Textbox`. [@pngwn](https://github.com/pngwn) in [PR 2653](https://github.com/gradio-app/gradio/pull/2653) +- `gr.Textbox` component will now raise an exception if `type` is not "text", "email", or "password" [@pngwn](https://github.com/pngwn) in [PR 2653](https://github.com/gradio-app/gradio/pull/2653). This will cause demos using the deprecated `gr.Textbox(type="number")` to raise an exception. + +### Bug Fixes: + +- Updated the minimum FastApi used in tests to version 0.87 by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2647](https://github.com/gradio-app/gradio/pull/2647) +- Fixed bug where interfaces with examples could not be loaded with `gr.Interface.load` by [@freddyaboulton](https://github.com/freddyaboulton) [PR 2640](https://github.com/gradio-app/gradio/pull/2640) +- Fixed bug where the `interactive` property of a component could not be updated by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2639](https://github.com/gradio-app/gradio/pull/2639) +- Fixed bug where some URLs were not being recognized as valid URLs and thus were not + loading correctly in various components by [@abidlabs](https://github.com/abidlabs) in [PR 2659](https://github.com/gradio-app/gradio/pull/2659) + +### Documentation Changes: + +- Fix some typos in the embedded demo names in "05_using_blocks_like_functions.md" by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2656](https://github.com/gradio-app/gradio/pull/2656) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Add support for `'password'` and `'email'` types to `Textbox`. [@pngwn](https://github.com/pngwn) in [PR 2653](https://github.com/gradio-app/gradio/pull/2653) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.9.1 + +### New Features: + +No changes to highlight. + +### Bug Fixes: + +- Only set a min height on md and html when loading by [@pngwn](https://github.com/pngwn) in [PR 2623](https://github.com/gradio-app/gradio/pull/2623) + +### Documentation Changes: + +- See docs for the latest gradio commit to main as well the latest pip release: + +![main-vs-pip](https://user-images.githubusercontent.com/9021060/199607887-aab1ae4e-a070-4527-966d-024397abe15b.gif) + +- Modified the "Connecting To a Database Guide" to use `pd.read_sql` as opposed to low-level postgres connector by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2604](https://github.com/gradio-app/gradio/pull/2604) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Dropdown for seeing docs as latest or main by [@aliabd](https://github.com/aliabd) in [PR 2544](https://github.com/gradio-app/gradio/pull/2544) +- Allow `gr.Templates` to accept parameters to override the defaults by [@abidlabs](https://github.com/abidlabs) in [PR 2600](https://github.com/gradio-app/gradio/pull/2600) +- Components now throw a `ValueError()` if constructed with invalid parameters for `type` or `source` (for components that take those parameters) in [PR 2610](https://github.com/gradio-app/gradio/pull/2610) +- Allow auth with using queue by [@GLGDLY](https://github.com/GLGDLY) in [PR 2611](https://github.com/gradio-app/gradio/pull/2611) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.9 + +### New Features: + +- Gradio is now embedded directly in colab without requiring the share link by [@aliabid94](https://github.com/aliabid94) in [PR 2455](https://github.com/gradio-app/gradio/pull/2455) + +###### Calling functions by api_name in loaded apps + +When you load an upstream app with `gr.Blocks.load`, you can now specify which fn +to call with the `api_name` parameter. + +```python +import gradio as gr +english_translator = gr.Blocks.load(name="spaces/gradio/english-translator") +german = english_translator("My name is Freddy", api_name='translate-to-german') +``` + +The `api_name` parameter will take precedence over the `fn_index` parameter. + +### Bug Fixes: + +- Fixed bug where None could not be used for File,Model3D, and Audio examples by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2588](https://github.com/gradio-app/gradio/pull/2588) +- Fixed links in Plotly map guide + demo by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2578](https://github.com/gradio-app/gradio/pull/2578) +- `gr.Blocks.load()` now correctly loads example files from Spaces [@abidlabs](https://github.com/abidlabs) in [PR 2594](https://github.com/gradio-app/gradio/pull/2594) +- Fixed bug when image clear started upload dialog [@mezotaken](https://github.com/mezotaken) in [PR 2577](https://github.com/gradio-app/gradio/pull/2577) + +### Documentation Changes: + +- Added a Guide on how to configure the queue for maximum performance by [@abidlabs](https://github.com/abidlabs) in [PR 2558](https://github.com/gradio-app/gradio/pull/2558) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Add `api_name` to `Blocks.__call__` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2593](https://github.com/gradio-app/gradio/pull/2593) +- Update queue with using deque & update requirements by [@GLGDLY](https://github.com/GLGDLY) in [PR 2428](https://github.com/gradio-app/gradio/pull/2428) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.8.2 + +### Bug Fixes: + +- Ensure gradio apps embedded via spaces use the correct endpoint for predictions. [@pngwn](https://github.com/pngwn) in [PR 2567](https://github.com/gradio-app/gradio/pull/2567) +- Ensure gradio apps embedded via spaces use the correct websocket protocol. [@pngwn](https://github.com/pngwn) in [PR 2571](https://github.com/gradio-app/gradio/pull/2571) + +### New Features: + +###### Running Events Continuously + +Gradio now supports the ability to run an event continuously on a fixed schedule. To use this feature, +pass `every=# of seconds` to the event definition. This will run the event every given number of seconds! + +This can be used to: + +- Create live visualizations that show the most up to date data +- Refresh the state of the frontend automatically in response to changes in the backend + +Here is an example of a live plot that refreshes every half second: + +```python +import math +import gradio as gr +import plotly.express as px +import numpy as np + + +plot_end = 2 * math.pi + + +def get_plot(period=1): + global plot_end + x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02) + y = np.sin(2*math.pi*period * x) + fig = px.line(x=x, y=y) + plot_end += 2 * math.pi + return fig + + +with gr.Blocks() as demo: + with gr.Row(): + with gr.Column(): + gr.Markdown("Change the value of the slider to automatically update the plot") + period = gr.Slider(label="Period of plot", value=1, minimum=0, maximum=10, step=1) + plot = gr.Plot(label="Plot (updates every half second)") + + dep = demo.load(get_plot, None, plot, every=0.5) + period.change(get_plot, period, plot, every=0.5, cancels=[dep]) + +demo.queue().launch() +``` + +![live_demo](https://user-images.githubusercontent.com/41651716/198357377-633ce460-4e31-47bd-8202-1440cdd6fe19.gif) + +### Bug Fixes: + +No changes to highlight. + +### Documentation Changes: + +- Explained how to set up `queue` and `auth` when working with reload mode by by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3089](https://github.com/gradio-app/gradio/pull/3089) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Allows loading private Spaces by passing an an `api_key` to `gr.Interface.load()` + by [@abidlabs](https://github.com/abidlabs) in [PR 2568](https://github.com/gradio-app/gradio/pull/2568) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.8 + +### New Features: + +- Allows event listeners to accept a single dictionary as its argument, where the keys are the components and the values are the component values. This is set by passing the input components in the event listener as a set instead of a list. [@aliabid94](https://github.com/aliabid94) in [PR 2550](https://github.com/gradio-app/gradio/pull/2550) + +### Bug Fixes: + +- Fix whitespace issue when using plotly. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2548](https://github.com/gradio-app/gradio/pull/2548) +- Apply appropriate alt text to all gallery images. [@camenduru](https://github.com/camenduru) in [PR 2358](https://github.com/gradio-app/gradio/pull/2538) +- Removed erroneous tkinter import in gradio.blocks by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2555](https://github.com/gradio-app/gradio/pull/2555) + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Added the `every` keyword to event listeners that runs events on a fixed schedule by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2512](https://github.com/gradio-app/gradio/pull/2512) +- Fix whitespace issue when using plotly. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2548](https://github.com/gradio-app/gradio/pull/2548) +- Apply appropriate alt text to all gallery images. [@camenduru](https://github.com/camenduru) in [PR 2358](https://github.com/gradio-app/gradio/pull/2538) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.7 + +### New Features: + +###### Batched Functions + +Gradio now supports the ability to pass _batched_ functions. Batched functions are just +functions which take in a list of inputs and return a list of predictions. + +For example, here is a batched function that takes in two lists of inputs (a list of +words and a list of ints), and returns a list of trimmed words as output: + +```py +import time + +def trim_words(words, lens): + trimmed_words = [] + time.sleep(5) + for w, l in zip(words, lens): + trimmed_words.append(w[:l]) + return [trimmed_words] +``` + +The advantage of using batched functions is that if you enable queuing, the Gradio +server can automatically _batch_ incoming requests and process them in parallel, +potentially speeding up your demo. Here's what the Gradio code looks like (notice +the `batch=True` and `max_batch_size=16` -- both of these parameters can be passed +into event triggers or into the `Interface` class) + +```py +import gradio as gr + +with gr.Blocks() as demo: + with gr.Row(): + word = gr.Textbox(label="word", value="abc") + leng = gr.Number(label="leng", precision=0, value=1) + output = gr.Textbox(label="Output") + with gr.Row(): + run = gr.Button() + + event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16) + +demo.queue() +demo.launch() +``` + +In the example above, 16 requests could be processed in parallel (for a total inference +time of 5 seconds), instead of each request being processed separately (for a total +inference time of 80 seconds). + +###### Upload Event + +`Video`, `Audio`, `Image`, and `File` components now support a `upload()` event that is triggered when a user uploads a file into any of these components. + +Example usage: + +```py +import gradio as gr + +with gr.Blocks() as demo: + with gr.Row(): + input_video = gr.Video() + output_video = gr.Video() + + # Clears the output video when an input video is uploaded + input_video.upload(lambda : None, None, output_video) +``` + +### Bug Fixes: + +- Fixes issue where plotly animations, interactivity, titles, legends, were not working properly. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2486](https://github.com/gradio-app/gradio/pull/2486) +- Prevent requests to the `/api` endpoint from skipping the queue if the queue is enabled for that event by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2493](https://github.com/gradio-app/gradio/pull/2493) +- Fixes a bug with `cancels` in event triggers so that it works properly if multiple + Blocks are rendered by [@abidlabs](https://github.com/abidlabs) in [PR 2530](https://github.com/gradio-app/gradio/pull/2530) +- Prevent invalid targets of events from crashing the whole application. [@pngwn](https://github.com/pngwn) in [PR 2534](https://github.com/gradio-app/gradio/pull/2534) +- Properly dequeue cancelled events when multiple apps are rendered by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2540](https://github.com/gradio-app/gradio/pull/2540) +- Fixes videos being cropped due to height/width params not being used [@hannahblair](https://github.com/hannahblair) in [PR 4946](https://github.com/gradio-app/gradio/pull/4946) + +### Documentation Changes: + +- Added an example interactive dashboard to the "Tabular & Plots" section of the Demos page by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2508](https://github.com/gradio-app/gradio/pull/2508) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Fixes the error message if a user builds Gradio locally and tries to use `share=True` by [@abidlabs](https://github.com/abidlabs) in [PR 2502](https://github.com/gradio-app/gradio/pull/2502) +- Allows the render() function to return self by [@Raul9595](https://github.com/Raul9595) in [PR 2514](https://github.com/gradio-app/gradio/pull/2514) +- Fixes issue where plotly animations, interactivity, titles, legends, were not working properly. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2486](https://github.com/gradio-app/gradio/pull/2486) +- Gradio now supports batched functions by [@abidlabs](https://github.com/abidlabs) in [PR 2218](https://github.com/gradio-app/gradio/pull/2218) +- Add `upload` event for `Video`, `Audio`, `Image`, and `File` components [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2448](https://github.com/gradio-app/gradio/pull/2456) +- Changes websocket path for Spaces as it is no longer necessary to have a different URL for websocket connections on Spaces by [@abidlabs](https://github.com/abidlabs) in [PR 2528](https://github.com/gradio-app/gradio/pull/2528) +- Clearer error message when events are defined outside of a Blocks scope, and a warning if you + try to use `Series` or `Parallel` with `Blocks` by [@abidlabs](https://github.com/abidlabs) in [PR 2543](https://github.com/gradio-app/gradio/pull/2543) +- Adds support for audio samples that are in `float64`, `float16`, or `uint16` formats by [@abidlabs](https://github.com/abidlabs) in [PR 2545](https://github.com/gradio-app/gradio/pull/2545) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.6 + +### New Features: + +###### Cancelling Running Events + +Running events can be cancelled when other events are triggered! To test this feature, pass the `cancels` parameter to the event listener. +For this feature to work, the queue must be enabled. + +![cancel_on_change_rl](https://user-images.githubusercontent.com/41651716/195952623-61a606bd-e82b-4e1a-802e-223154cb8727.gif) + +Code: + +```python +import time +import gradio as gr + +def fake_diffusion(steps): + for i in range(steps): + time.sleep(1) + yield str(i) + +def long_prediction(*args, **kwargs): + time.sleep(10) + return 42 + + +with gr.Blocks() as demo: + with gr.Row(): + with gr.Column(): + n = gr.Slider(1, 10, value=9, step=1, label="Number Steps") + run = gr.Button() + output = gr.Textbox(label="Iterative Output") + stop = gr.Button(value="Stop Iterating") + with gr.Column(): + prediction = gr.Number(label="Expensive Calculation") + run_pred = gr.Button(value="Run Expensive Calculation") + with gr.Column(): + cancel_on_change = gr.Textbox(label="Cancel Iteration and Expensive Calculation on Change") + + click_event = run.click(fake_diffusion, n, output) + stop.click(fn=None, inputs=None, outputs=None, cancels=[click_event]) + pred_event = run_pred.click(fn=long_prediction, inputs=None, outputs=prediction) + + cancel_on_change.change(None, None, None, cancels=[click_event, pred_event]) + + +demo.queue(concurrency_count=1, max_size=20).launch() +``` + +For interfaces, a stop button will be added automatically if the function uses a `yield` statement. + +```python +import gradio as gr +import time + +def iteration(steps): + for i in range(steps): + time.sleep(0.5) + yield i + +gr.Interface(iteration, + inputs=gr.Slider(minimum=1, maximum=10, step=1, value=5), + outputs=gr.Number()).queue().launch() +``` + +![stop_interface_rl](https://user-images.githubusercontent.com/41651716/195952883-e7ca4235-aae3-4852-8f28-96d01d0c5822.gif) + +### Bug Fixes: + +- Add loading status tracker UI to HTML and Markdown components. [@pngwn](https://github.com/pngwn) in [PR 2474](https://github.com/gradio-app/gradio/pull/2474) +- Fixed videos being mirrored in the front-end if source is not webcam by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2475](https://github.com/gradio-app/gradio/pull/2475) +- Add clear button for timeseries component [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2487](https://github.com/gradio-app/gradio/pull/2487) +- Removes special characters from temporary filenames so that the files can be served by components [@abidlabs](https://github.com/abidlabs) in [PR 2480](https://github.com/gradio-app/gradio/pull/2480) +- Fixed infinite reload loop when mounting gradio as a sub application by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2477](https://github.com/gradio-app/gradio/pull/2477) + +### Documentation Changes: + +- Adds a demo to show how a sound alert can be played upon completion of a prediction by [@abidlabs](https://github.com/abidlabs) in [PR 2478](https://github.com/gradio-app/gradio/pull/2478) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Enable running events to be cancelled from other events by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2433](https://github.com/gradio-app/gradio/pull/2433) +- Small fix for version check before reuploading demos by [@aliabd](https://github.com/aliabd) in [PR 2469](https://github.com/gradio-app/gradio/pull/2469) +- Add loading status tracker UI to HTML and Markdown components. [@pngwn](https://github.com/pngwn) in [PR 2400](https://github.com/gradio-app/gradio/pull/2474) +- Add clear button for timeseries component [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2487](https://github.com/gradio-app/gradio/pull/2487) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.5 + +### Bug Fixes: + +- Ensure that Gradio does not take control of the HTML page title when embedding a gradio app as a web component, this behaviour flipped by adding `control_page_title="true"` to the webcomponent. [@pngwn](https://github.com/pngwn) in [PR 2400](https://github.com/gradio-app/gradio/pull/2400) +- Decreased latency in iterative-output demos by making the iteration asynchronous [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2409](https://github.com/gradio-app/gradio/pull/2409) +- Fixed queue getting stuck under very high load by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2374](https://github.com/gradio-app/gradio/pull/2374) +- Ensure that components always behave as if `interactive=True` were set when the following conditions are true: + + - no default value is provided, + - they are not set as the input or output of an event, + - `interactive` kwarg is not set. + + [@pngwn](https://github.com/pngwn) in [PR 2459](https://github.com/gradio-app/gradio/pull/2459) + +### New Features: + +- When an `Image` component is set to `source="upload"`, it is now possible to drag and drop and image to replace a previously uploaded image by [@pngwn](https://github.com/pngwn) in [PR 1711](https://github.com/gradio-app/gradio/issues/1711) +- The `gr.Dataset` component now accepts `HTML` and `Markdown` components by [@abidlabs](https://github.com/abidlabs) in [PR 2437](https://github.com/gradio-app/gradio/pull/2437) + +### Documentation Changes: + +- Improved documentation for the `gr.Dataset` component by [@abidlabs](https://github.com/abidlabs) in [PR 2437](https://github.com/gradio-app/gradio/pull/2437) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +- The `Carousel` component is officially deprecated. Since gradio 3.0, code containing the `Carousel` component would throw warnings. As of the next release, the `Carousel` component will raise an exception. + +### Full Changelog: + +- Speeds up Gallery component by using temporary files instead of base64 representation in the front-end by [@proxyphi](https://github.com/proxyphi), [@pngwn](https://github.com/pngwn), and [@abidlabs](https://github.com/abidlabs) in [PR 2265](https://github.com/gradio-app/gradio/pull/2265) +- Fixed some embedded demos in the guides by not loading the gradio web component in some guides by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2403](https://github.com/gradio-app/gradio/pull/2403) +- When an `Image` component is set to `source="upload"`, it is now possible to drag and drop and image to replace a previously uploaded image by [@pngwn](https://github.com/pngwn) in [PR 2400](https://github.com/gradio-app/gradio/pull/2410) +- Improve documentation of the `Blocks.load()` event by [@abidlabs](https://github.com/abidlabs) in [PR 2413](https://github.com/gradio-app/gradio/pull/2413) +- Decreased latency in iterative-output demos by making the iteration asynchronous [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2409](https://github.com/gradio-app/gradio/pull/2409) +- Updated share link message to reference new Spaces Hardware [@abidlabs](https://github.com/abidlabs) in [PR 2423](https://github.com/gradio-app/gradio/pull/2423) +- Automatically restart spaces if they're down by [@aliabd](https://github.com/aliabd) in [PR 2405](https://github.com/gradio-app/gradio/pull/2405) +- Carousel component is now deprecated by [@abidlabs](https://github.com/abidlabs) in [PR 2434](https://github.com/gradio-app/gradio/pull/2434) +- Build Gradio from source in ui tests by by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2440](https://github.com/gradio-app/gradio/pull/2440) +- Change "return ValueError" to "raise ValueError" by [@vzakharov](https://github.com/vzakharov) in [PR 2445](https://github.com/gradio-app/gradio/pull/2445) +- Add guide on creating a map demo using the `gr.Plot()` component [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2402](https://github.com/gradio-app/gradio/pull/2402) +- Add blur event for `Textbox` and `Number` components [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2448](https://github.com/gradio-app/gradio/pull/2448) +- Stops a gradio launch from hogging a port even after it's been killed [@aliabid94](https://github.com/aliabid94) in [PR 2453](https://github.com/gradio-app/gradio/pull/2453) +- Fix embedded interfaces on touch screen devices by [@aliabd](https://github.com/aliabd) in [PR 2457](https://github.com/gradio-app/gradio/pull/2457) +- Upload all demos to spaces by [@aliabd](https://github.com/aliabd) in [PR 2281](https://github.com/gradio-app/gradio/pull/2281) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.4.1 + +### New Features: + +###### 1. See Past and Upcoming Changes in the Release History 👀 + +You can now see gradio's release history directly on the website, and also keep track of upcoming changes. Just go [here](https://gradio.app/changelog/). + +![release-history](https://user-images.githubusercontent.com/9021060/193145458-3de699f7-7620-45de-aa73-a1c1b9b96257.gif) + +### Bug Fixes: + +1. Fix typo in guide image path by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2357](https://github.com/gradio-app/gradio/pull/2357) +2. Raise error if Blocks has duplicate component with same IDs by [@abidlabs](https://github.com/abidlabs) in [PR 2359](https://github.com/gradio-app/gradio/pull/2359) +3. Catch the permission exception on the audio component by [@Ian-GL](https://github.com/Ian-GL) in [PR 2330](https://github.com/gradio-app/gradio/pull/2330) +4. Fix image_classifier_interface_load demo by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2365](https://github.com/gradio-app/gradio/pull/2365) +5. Fix combining adjacent components without gaps by introducing `gr.Row(variant="compact")` by [@aliabid94](https://github.com/aliabid94) in [PR 2291](https://github.com/gradio-app/gradio/pull/2291) This comes with deprecation of the following arguments for `Component.style`: `round`, `margin`, `border`. +6. Fix audio streaming, which was previously choppy in [PR 2351](https://github.com/gradio-app/gradio/pull/2351). Big thanks to [@yannickfunk](https://github.com/yannickfunk) for the proposed solution. +7. Fix bug where new typeable slider doesn't respect the minimum and maximum values [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2380](https://github.com/gradio-app/gradio/pull/2380) + +### Documentation Changes: + +1. New Guide: Connecting to a Database 🗄️ + + A new guide by [@freddyaboulton](https://github.com/freddyaboulton) that explains how you can use Gradio to connect your app to a database. Read more [here](https://gradio.app/connecting_to_a_database/). + +2. New Guide: Running Background Tasks 🥷 + + A new guide by [@freddyaboulton](https://github.com/freddyaboulton) that explains how you can run background tasks from your gradio app. Read more [here](https://gradio.app/running_background_tasks/). + +3. Small fixes to docs for `Image` component by [@abidlabs](https://github.com/abidlabs) in [PR 2372](https://github.com/gradio-app/gradio/pull/2372) + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- Create a guide on how to connect an app to a database hosted on the cloud by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2341](https://github.com/gradio-app/gradio/pull/2341) +- Removes `analytics` dependency by [@abidlabs](https://github.com/abidlabs) in [PR 2347](https://github.com/gradio-app/gradio/pull/2347) +- Add guide on launching background tasks from your app by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2350](https://github.com/gradio-app/gradio/pull/2350) +- Fix typo in guide image path by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2357](https://github.com/gradio-app/gradio/pull/2357) +- Raise error if Blocks has duplicate component with same IDs by [@abidlabs](https://github.com/abidlabs) in [PR 2359](https://github.com/gradio-app/gradio/pull/2359) +- Hotfix: fix version back to 3.4 by [@abidlabs](https://github.com/abidlabs) in [PR 2361](https://github.com/gradio-app/gradio/pull/2361) +- Change version.txt to 3.4 instead of 3.4.0 by [@aliabd](https://github.com/aliabd) in [PR 2363](https://github.com/gradio-app/gradio/pull/2363) +- Catch the permission exception on the audio component by [@Ian-GL](https://github.com/Ian-GL) in [PR 2330](https://github.com/gradio-app/gradio/pull/2330) +- Fix image_classifier_interface_load demo by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2365](https://github.com/gradio-app/gradio/pull/2365) +- Small fixes to docs for `Image` component by [@abidlabs](https://github.com/abidlabs) in [PR 2372](https://github.com/gradio-app/gradio/pull/2372) +- Automated Release Notes by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2306](https://github.com/gradio-app/gradio/pull/2306) +- Fixed small typos in the docs [@julien-c](https://github.com/julien-c) in [PR 2373](https://github.com/gradio-app/gradio/pull/2373) +- Adds ability to disable pre/post-processing for examples [@abidlabs](https://github.com/abidlabs) in [PR 2383](https://github.com/gradio-app/gradio/pull/2383) +- Copy changelog file in website docker by [@aliabd](https://github.com/aliabd) in [PR 2384](https://github.com/gradio-app/gradio/pull/2384) +- Lets users provide a `gr.update()` dictionary even if post-processing is disabled [@abidlabs](https://github.com/abidlabs) in [PR 2385](https://github.com/gradio-app/gradio/pull/2385) +- Fix bug where errors would cause apps run in reload mode to hang forever by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2394](https://github.com/gradio-app/gradio/pull/2394) +- Fix bug where new typeable slider doesn't respect the minimum and maximum values [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2380](https://github.com/gradio-app/gradio/pull/2380) + +### Contributors Shoutout: + +No changes to highlight. + +## 3.4 + +### New Features: + +###### 1. Gallery Captions 🖼️ + +You can now pass captions to images in the Gallery component. To do so you need to pass a {List} of (image, {str} caption) tuples. This is optional and the component also accepts just a list of the images. + +Here's an example: + +```python +import gradio as gr + +images_with_captions = [ + ("https://images.unsplash.com/photo-1551969014-7d2c4cddf0b6", "Cheetah by David Groves"), + ("https://images.unsplash.com/photo-1546182990-dffeafbe841d", "Lion by Francesco"), + ("https://images.unsplash.com/photo-1561731216-c3a4d99437d5", "Tiger by Mike Marrah") + ] + +with gr.Blocks() as demo: + gr.Gallery(value=images_with_captions) + +demo.launch() +``` + +gallery_captions + +###### 2. Type Values into the Slider 🔢 + +You can now type values directly on the Slider component! Here's what it looks like: + +![type-slider](https://user-images.githubusercontent.com/9021060/192399877-76b662a1-fede-4417-a932-fc15f0da7360.gif) + +###### 3. Better Sketching and Inpainting 🎨 + +We've made a lot of changes to our Image component so that it can support better sketching and inpainting. + +Now supports: + +- A standalone black-and-white sketch + +```python +import gradio as gr +demo = gr.Interface(lambda x: x, gr.Sketchpad(), gr.Image()) +demo.launch() +``` + +![bw](https://user-images.githubusercontent.com/9021060/192410264-b08632b5-7b2a-4f86-afb0-5760e7b474cf.gif) + +- A standalone color sketch + +```python +import gradio as gr +demo = gr.Interface(lambda x: x, gr.Paint(), gr.Image()) +demo.launch() +``` + +![color-sketch](https://user-images.githubusercontent.com/9021060/192410500-3c8c3e64-a5fd-4df2-a991-f0a5cef93728.gif) + +- An uploadable image with black-and-white or color sketching + +```python +import gradio as gr +demo = gr.Interface(lambda x: x, gr.Image(source='upload', tool='color-sketch'), gr.Image()) # for black and white, tool = 'sketch' +demo.launch() +``` + +![sketch-new](https://user-images.githubusercontent.com/9021060/192402422-e53cb7b6-024e-448c-87eb-d6a35a63c476.gif) + +- Webcam with black-and-white or color sketching + +```python +import gradio as gr +demo = gr.Interface(lambda x: x, gr.Image(source='webcam', tool='color-sketch'), gr.Image()) # for black and white, tool = 'sketch' +demo.launch() +``` + +![webcam-sketch](https://user-images.githubusercontent.com/9021060/192410820-0ffaf324-776e-4e1f-9de6-0fdbbf4940fa.gif) + +As well as other fixes + +### Bug Fixes: + +1. Fix bug where max concurrency count is not respected in queue by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2286](https://github.com/gradio-app/gradio/pull/2286) +2. fix : queue could be blocked by [@SkyTNT](https://github.com/SkyTNT) in [PR 2288](https://github.com/gradio-app/gradio/pull/2288) +3. Supports `gr.update()` in example caching by [@abidlabs](https://github.com/abidlabs) in [PR 2309](https://github.com/gradio-app/gradio/pull/2309) +4. Clipboard fix for iframes by [@abidlabs](https://github.com/abidlabs) in [PR 2321](https://github.com/gradio-app/gradio/pull/2321) +5. Fix: Dataframe column headers are reset when you add a new column by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2318](https://github.com/gradio-app/gradio/pull/2318) +6. Added support for URLs for Video, Audio, and Image by [@abidlabs](https://github.com/abidlabs) in [PR 2256](https://github.com/gradio-app/gradio/pull/2256) +7. Add documentation about how to create and use the Gradio FastAPI app by [@abidlabs](https://github.com/abidlabs) in [PR 2263](https://github.com/gradio-app/gradio/pull/2263) + +### Documentation Changes: + +1. Adding a Playground Tab to the Website by [@aliabd](https://github.com/aliabd) in [PR 1860](https://github.com/gradio-app/gradio/pull/1860) +2. Gradio for Tabular Data Science Workflows Guide by [@merveenoyan](https://github.com/merveenoyan) in [PR 2199](https://github.com/gradio-app/gradio/pull/2199) +3. Promotes `postprocess` and `preprocess` to documented parameters by [@abidlabs](https://github.com/abidlabs) in [PR 2293](https://github.com/gradio-app/gradio/pull/2293) +4. Update 2)key_features.md by [@voidxd](https://github.com/voidxd) in [PR 2326](https://github.com/gradio-app/gradio/pull/2326) +5. Add docs to blocks context postprocessing function by [@Ian-GL](https://github.com/Ian-GL) in [PR 2332](https://github.com/gradio-app/gradio/pull/2332) + +### Testing and Infrastructure Changes + +1. Website fixes and refactoring by [@aliabd](https://github.com/aliabd) in [PR 2280](https://github.com/gradio-app/gradio/pull/2280) +2. Don't deploy to spaces on release by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2313](https://github.com/gradio-app/gradio/pull/2313) + +### Full Changelog: + +- Website fixes and refactoring by [@aliabd](https://github.com/aliabd) in [PR 2280](https://github.com/gradio-app/gradio/pull/2280) +- Fix bug where max concurrency count is not respected in queue by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2286](https://github.com/gradio-app/gradio/pull/2286) +- Promotes `postprocess` and `preprocess` to documented parameters by [@abidlabs](https://github.com/abidlabs) in [PR 2293](https://github.com/gradio-app/gradio/pull/2293) +- Raise warning when trying to cache examples but not all inputs have examples by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2279](https://github.com/gradio-app/gradio/pull/2279) +- fix : queue could be blocked by [@SkyTNT](https://github.com/SkyTNT) in [PR 2288](https://github.com/gradio-app/gradio/pull/2288) +- Don't deploy to spaces on release by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2313](https://github.com/gradio-app/gradio/pull/2313) +- Supports `gr.update()` in example caching by [@abidlabs](https://github.com/abidlabs) in [PR 2309](https://github.com/gradio-app/gradio/pull/2309) +- Respect Upstream Queue when loading interfaces/blocks from Spaces by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2294](https://github.com/gradio-app/gradio/pull/2294) +- Clipboard fix for iframes by [@abidlabs](https://github.com/abidlabs) in [PR 2321](https://github.com/gradio-app/gradio/pull/2321) +- Sketching + Inpainting Capabilities to Gradio by [@abidlabs](https://github.com/abidlabs) in [PR 2144](https://github.com/gradio-app/gradio/pull/2144) +- Update 2)key_features.md by [@voidxd](https://github.com/voidxd) in [PR 2326](https://github.com/gradio-app/gradio/pull/2326) +- release 3.4b3 by [@abidlabs](https://github.com/abidlabs) in [PR 2328](https://github.com/gradio-app/gradio/pull/2328) +- Fix: Dataframe column headers are reset when you add a new column by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2318](https://github.com/gradio-app/gradio/pull/2318) +- Start queue when gradio is a sub application by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2319](https://github.com/gradio-app/gradio/pull/2319) +- Fix Web Tracker Script by [@aliabd](https://github.com/aliabd) in [PR 2308](https://github.com/gradio-app/gradio/pull/2308) +- Add docs to blocks context postprocessing function by [@Ian-GL](https://github.com/Ian-GL) in [PR 2332](https://github.com/gradio-app/gradio/pull/2332) +- Fix typo in iterator variable name in run_predict function by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2340](https://github.com/gradio-app/gradio/pull/2340) +- Add captions to galleries by [@aliabid94](https://github.com/aliabid94) in [PR 2284](https://github.com/gradio-app/gradio/pull/2284) +- Typeable value on gradio.Slider by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2329](https://github.com/gradio-app/gradio/pull/2329) + +### Contributors Shoutout: + +- [@SkyTNT](https://github.com/SkyTNT) made their first contribution in [PR 2288](https://github.com/gradio-app/gradio/pull/2288) +- [@voidxd](https://github.com/voidxd) made their first contribution in [PR 2326](https://github.com/gradio-app/gradio/pull/2326) + +## 3.3 + +### New Features: + +###### 1. Iterative Outputs ⏳ + +You can now create an iterative output simply by having your function return a generator! + +Here's (part of) an example that was used to generate the interface below it. [See full code](https://colab.research.google.com/drive/1m9bWS6B82CT7bw-m4L6AJR8za7fEK7Ov?usp=sharing). + +```python +def predict(steps, seed): + generator = torch.manual_seed(seed) + for i in range(1,steps): + yield pipeline(generator=generator, num_inference_steps=i)["sample"][0] +``` + +![example](https://user-images.githubusercontent.com/9021060/189086273-f5e7087d-71fa-4158-90a9-08e84da0421c.mp4) + +###### 2. Accordion Layout 🆕 + +This version of Gradio introduces a new layout component to Blocks: the Accordion. Wrap your elements in a neat, expandable layout that allows users to toggle them as needed. + +Usage: ([Read the docs](https://gradio.app/docs/#accordion)) + +```python +with gr.Accordion("open up"): +# components here +``` + +![accordion](https://user-images.githubusercontent.com/9021060/189088465-f0ffd7f0-fc6a-42dc-9249-11c5e1e0529b.gif) + +###### 3. Skops Integration 📈 + +Our new integration with [skops](https://huggingface.co/blog/skops) allows you to load tabular classification and regression models directly from the [hub](https://huggingface.co/models). + +Here's a classification example showing how quick it is to set up an interface for a [model](https://huggingface.co/scikit-learn/tabular-playground). + +```python +import gradio as gr +gr.Interface.load("models/scikit-learn/tabular-playground").launch() +``` + +![187936493-5c90c01d-a6dd-400f-aa42-833a096156a1](https://user-images.githubusercontent.com/9021060/189090519-328fbcb4-120b-43c8-aa54-d6fccfa6b7e8.png) + +### Bug Fixes: + +No changes to highlight. + +### Documentation Changes: + +No changes to highlight. + +### Testing and Infrastructure Changes: + +No changes to highlight. + +### Breaking Changes: + +No changes to highlight. + +### Full Changelog: + +- safari fixes by [@pngwn](https://github.com/pngwn) in [PR 2138](https://github.com/gradio-app/gradio/pull/2138) +- Fix roundedness and form borders by [@aliabid94](https://github.com/aliabid94) in [PR 2147](https://github.com/gradio-app/gradio/pull/2147) +- Better processing of example data prior to creating dataset component by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2147](https://github.com/gradio-app/gradio/pull/2147) +- Show error on Connection drops by [@aliabid94](https://github.com/aliabid94) in [PR 2147](https://github.com/gradio-app/gradio/pull/2147) +- 3.2 release! by [@abidlabs](https://github.com/abidlabs) in [PR 2139](https://github.com/gradio-app/gradio/pull/2139) +- Fixed Named API Requests by [@abidlabs](https://github.com/abidlabs) in [PR 2151](https://github.com/gradio-app/gradio/pull/2151) +- Quick Fix: Cannot upload Model3D image after clearing it by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2168](https://github.com/gradio-app/gradio/pull/2168) +- Fixed misleading log when server_name is '0.0.0.0' by [@lamhoangtung](https://github.com/lamhoangtung) in [PR 2176](https://github.com/gradio-app/gradio/pull/2176) +- Keep embedded PngInfo metadata by [@cobryan05](https://github.com/cobryan05) in [PR 2170](https://github.com/gradio-app/gradio/pull/2170) +- Skops integration: Load tabular classification and regression models from the hub by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2126](https://github.com/gradio-app/gradio/pull/2126) +- Respect original filename when cached example files are downloaded by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2145](https://github.com/gradio-app/gradio/pull/2145) +- Add manual trigger to deploy to pypi by [@abidlabs](https://github.com/abidlabs) in [PR 2192](https://github.com/gradio-app/gradio/pull/2192) +- Fix bugs with gr.update by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2157](https://github.com/gradio-app/gradio/pull/2157) +- Make queue per app by [@aliabid94](https://github.com/aliabid94) in [PR 2193](https://github.com/gradio-app/gradio/pull/2193) +- Preserve Labels In Interpretation Components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2166](https://github.com/gradio-app/gradio/pull/2166) +- Quick Fix: Multiple file download not working by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2169](https://github.com/gradio-app/gradio/pull/2169) +- use correct MIME type for js-script file by [@daspartho](https://github.com/daspartho) in [PR 2200](https://github.com/gradio-app/gradio/pull/2200) +- Add accordion component by [@aliabid94](https://github.com/aliabid94) in [PR 2208](https://github.com/gradio-app/gradio/pull/2208) + +### Contributors Shoutout: + +- [@lamhoangtung](https://github.com/lamhoangtung) made their first contribution in [PR 2176](https://github.com/gradio-app/gradio/pull/2176) +- [@cobryan05](https://github.com/cobryan05) made their first contribution in [PR 2170](https://github.com/gradio-app/gradio/pull/2170) +- [@daspartho](https://github.com/daspartho) made their first contribution in [PR 2200](https://github.com/gradio-app/gradio/pull/2200) + +## 3.2 + +### New Features: + +###### 1. Improvements to Queuing 🥇 + +We've implemented a brand new queuing system based on **web sockets** instead of HTTP long polling. Among other things, this allows us to manage queue sizes better on Hugging Face Spaces. There are also additional queue-related parameters you can add: + +- Now supports concurrent workers (parallelization) + +```python +demo = gr.Interface(...) +demo.queue(concurrency_count=3) +demo.launch() +``` + +- Configure a maximum queue size + +```python +demo = gr.Interface(...) +demo.queue(max_size=100) +demo.launch() +``` + +- If a user closes their tab / browser, they leave the queue, which means the demo will run faster for everyone else + +###### 2. Fixes to Examples + +- Dataframe examples will render properly, and look much clearer in the UI: (thanks to PR #2125) + +![Screen Shot 2022-08-30 at 8 29 58 PM](https://user-images.githubusercontent.com/9021060/187586561-d915bafb-f968-4966-b9a2-ef41119692b2.png) + +- Image and Video thumbnails are cropped to look neater and more uniform: (thanks to PR #2109) + +![Screen Shot 2022-08-30 at 8 32 15 PM](https://user-images.githubusercontent.com/9021060/187586890-56e1e4f0-1b84-42d9-a82f-911772c41030.png) + +- Other fixes in PR #2131 and #2064 make it easier to design and use Examples + +###### 3. Component Fixes 🧱 + +- Specify the width and height of an image in its style tag (thanks to PR #2133) + +```python +components.Image().style(height=260, width=300) +``` + +- Automatic conversion of videos so they are playable in the browser (thanks to PR #2003). Gradio will check if a video's format is playable in the browser and, if it isn't, will automatically convert it to a format that is (mp4). +- Pass in a json filepath to the Label component (thanks to PR #2083) +- Randomize the default value of a Slider (thanks to PR #1935) + +![slider-random](https://user-images.githubusercontent.com/9021060/187596230-3db9697f-9f4d-42f5-9387-d77573513448.gif) + +- Improvements to State in PR #2100 + +###### 4. Ability to Randomize Input Sliders and Reload Data whenever the Page Loads + +- In some cases, you want to be able to show a different set of input data to every user as they load the page app. For example, you might want to randomize the value of a "seed" `Slider` input. Or you might want to show a `Textbox` with the current date. We now supporting passing _functions_ as the default value in input components. When you pass in a function, it gets **re-evaluated** every time someone loads the demo, allowing you to reload / change data for different users. + +Here's an example loading the current date time into an input Textbox: + +```python +import gradio as gr +import datetime + +with gr.Blocks() as demo: + gr.Textbox(datetime.datetime.now) + +demo.launch() +``` + +Note that we don't evaluate the function -- `datetime.datetime.now()` -- we pass in the function itself to get this behavior -- `datetime.datetime.now` + +Because randomizing the initial value of `Slider` is a common use case, we've added a `randomize` keyword argument you can use to randomize its initial value: + +```python +import gradio as gr +demo = gr.Interface(lambda x:x, gr.Slider(0, 10, randomize=True), "number") +demo.launch() +``` + +###### 5. New Guide 🖊️ + +- [Gradio and W&B Integration](https://gradio.app/Gradio_and_Wandb_Integration/) + +### Full Changelog: + +- Reset components to original state by setting value to None by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2044](https://github.com/gradio-app/gradio/pull/2044) +- Cleaning up the way data is processed for components by [@abidlabs](https://github.com/abidlabs) in [PR 1967](https://github.com/gradio-app/gradio/pull/1967) +- version 3.1.8b by [@abidlabs](https://github.com/abidlabs) in [PR 2063](https://github.com/gradio-app/gradio/pull/2063) +- Wandb guide by [@AK391](https://github.com/AK391) in [PR 1898](https://github.com/gradio-app/gradio/pull/1898) +- Add a flagging callback to save json files to a hugging face dataset by [@chrisemezue](https://github.com/chrisemezue) in [PR 1821](https://github.com/gradio-app/gradio/pull/1821) +- Add data science demos to landing page by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2067](https://github.com/gradio-app/gradio/pull/2067) +- Hide time series + xgboost demos by default by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2079](https://github.com/gradio-app/gradio/pull/2079) +- Encourage people to keep trying when queue full by [@apolinario](https://github.com/apolinario) in [PR 2076](https://github.com/gradio-app/gradio/pull/2076) +- Updated our analytics on creation of Blocks/Interface by [@abidlabs](https://github.com/abidlabs) in [PR 2082](https://github.com/gradio-app/gradio/pull/2082) +- `Label` component now accepts file paths to `.json` files by [@abidlabs](https://github.com/abidlabs) in [PR 2083](https://github.com/gradio-app/gradio/pull/2083) +- Fix issues related to demos in Spaces by [@abidlabs](https://github.com/abidlabs) in [PR 2086](https://github.com/gradio-app/gradio/pull/2086) +- Fix TimeSeries examples not properly displayed in UI by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2064](https://github.com/gradio-app/gradio/pull/2064) +- Fix infinite requests when doing tab item select by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2070](https://github.com/gradio-app/gradio/pull/2070) +- Accept deprecated `file` route as well by [@abidlabs](https://github.com/abidlabs) in [PR 2099](https://github.com/gradio-app/gradio/pull/2099) +- Allow frontend method execution on Block.load event by [@codedealer](https://github.com/codedealer) in [PR 2108](https://github.com/gradio-app/gradio/pull/2108) +- Improvements to `State` by [@abidlabs](https://github.com/abidlabs) in [PR 2100](https://github.com/gradio-app/gradio/pull/2100) +- Catch IndexError, KeyError in video_is_playable by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2113](https://github.com/gradio-app/gradio/pull/2113) +- Fix: Download button does not respect the filepath returned by the function by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2073](https://github.com/gradio-app/gradio/pull/2073) +- Refactoring Layout: Adding column widths, forms, and more. by [@aliabid94](https://github.com/aliabid94) in [PR 2097](https://github.com/gradio-app/gradio/pull/2097) +- Update CONTRIBUTING.md by [@abidlabs](https://github.com/abidlabs) in [PR 2118](https://github.com/gradio-app/gradio/pull/2118) +- 2092 df ex by [@pngwn](https://github.com/pngwn) in [PR 2125](https://github.com/gradio-app/gradio/pull/2125) +- feat(samples table/gallery): Crop thumbs to square by [@ronvoluted](https://github.com/ronvoluted) in [PR 2109](https://github.com/gradio-app/gradio/pull/2109) +- Some enhancements to `gr.Examples` by [@abidlabs](https://github.com/abidlabs) in [PR 2131](https://github.com/gradio-app/gradio/pull/2131) +- Image size fix by [@aliabid94](https://github.com/aliabid94) in [PR 2133](https://github.com/gradio-app/gradio/pull/2133) + +### Contributors Shoutout: + +- [@chrisemezue](https://github.com/chrisemezue) made their first contribution in [PR 1821](https://github.com/gradio-app/gradio/pull/1821) +- [@apolinario](https://github.com/apolinario) made their first contribution in [PR 2076](https://github.com/gradio-app/gradio/pull/2076) +- [@codedealer](https://github.com/codedealer) made their first contribution in [PR 2108](https://github.com/gradio-app/gradio/pull/2108) + +## 3.1 + +### New Features: + +###### 1. Embedding Demos on Any Website 💻 + +With PR #1444, Gradio is now distributed as a web component. This means demos can be natively embedded on websites. You'll just need to add two lines: one to load the gradio javascript, and one to link to the demos backend. + +Here's a simple example that embeds the demo from a Hugging Face space: + +```html + + +``` + +But you can also embed demos that are running anywhere, you just need to link the demo to `src` instead of `space`. In fact, all the demos on the gradio website are embedded this way: + +Screen Shot 2022-07-14 at 2 41 44 PM + +Read more in the [Embedding Gradio Demos](https://gradio.app/embedding_gradio_demos) guide. + +###### 2. Reload Mode 👨‍💻 + +Reload mode helps developers create gradio demos faster by automatically reloading the demo whenever the code changes. It can support development on Python IDEs (VS Code, PyCharm, etc), the terminal, as well as Jupyter notebooks. + +If your demo code is in a script named `app.py`, instead of running `python app.py` you can now run `gradio app.py` and that will launch the demo in reload mode: + +```bash +Launching in reload mode on: http://127.0.0.1:7860 (Press CTRL+C to quit) +Watching... +WARNING: The --reload flag should not be used in production on Windows. +``` + +If you're working from a Jupyter or Colab Notebook, use these magic commands instead: `%load_ext gradio` when you import gradio, and `%%blocks` in the top of the cell with the demo code. Here's an example that shows how much faster the development becomes: + +![Blocks](https://user-images.githubusercontent.com/9021060/178986488-ed378cc8-5141-4330-ba41-672b676863d0.gif) + +###### 3. Inpainting Support on `gr.Image()` 🎨 + +We updated the Image component to add support for inpainting demos. It works by adding `tool="sketch"` as a parameter, that passes both an image and a sketchable mask to your prediction function. + +Here's an example from the [LAMA space](https://huggingface.co/spaces/akhaliq/lama): + +![FXApVlFVsAALSD-](https://user-images.githubusercontent.com/9021060/178989479-549867c8-7fb0-436a-a97d-1e91c9f5e611.jpeg) + +###### 4. Markdown and HTML support in Dataframes 🔢 + +We upgraded the Dataframe component in PR #1684 to support rendering Markdown and HTML inside the cells. + +This means you can build Dataframes that look like the following: + +![image (8)](https://user-images.githubusercontent.com/9021060/178991233-41cb07a5-e7a3-433e-89b8-319bc78eb9c2.png) + +###### 5. `gr.Examples()` for Blocks 🧱 + +We've added the `gr.Examples` component helper to allow you to add examples to any Blocks demo. This class is a wrapper over the `gr.Dataset` component. + +Screen Shot 2022-07-14 at 2 23 50 PM + +gr.Examples takes two required parameters: + +- `examples` which takes in a nested list +- `inputs` which takes in a component or list of components + +You can read more in the [Examples docs](https://gradio.app/docs/#examples) or the [Adding Examples to your Demos guide](https://gradio.app/adding_examples_to_your_app/). + +###### 6. Fixes to Audio Streaming + +With [PR 1828](https://github.com/gradio-app/gradio/pull/1828) we now hide the status loading animation, as well as remove the echo in streaming. Check out the [stream_audio](https://github.com/gradio-app/gradio/blob/main/demo/stream_audio/run.py) demo for more or read through our [Real Time Speech Recognition](https://gradio.app/real_time_speech_recognition/) guide. + +Screen Shot 2022-07-19 at 6 02 35 PM + +### Full Changelog: + +- File component: list multiple files and allow for download #1446 by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1681](https://github.com/gradio-app/gradio/pull/1681) +- Add ColorPicker to docs by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1768](https://github.com/gradio-app/gradio/pull/1768) +- Mock out requests in TestRequest unit tests by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1794](https://github.com/gradio-app/gradio/pull/1794) +- Add requirements.txt and test_files to source dist by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1817](https://github.com/gradio-app/gradio/pull/1817) +- refactor: f-string for tunneling.py by [@nhankiet](https://github.com/nhankiet) in [PR 1819](https://github.com/gradio-app/gradio/pull/1819) +- Miscellaneous formatting improvements to website by [@aliabd](https://github.com/aliabd) in [PR 1754](https://github.com/gradio-app/gradio/pull/1754) +- `integrate()` method moved to `Blocks` by [@abidlabs](https://github.com/abidlabs) in [PR 1776](https://github.com/gradio-app/gradio/pull/1776) +- Add python-3.7 tests by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1818](https://github.com/gradio-app/gradio/pull/1818) +- Copy test dir in website dockers by [@aliabd](https://github.com/aliabd) in [PR 1827](https://github.com/gradio-app/gradio/pull/1827) +- Add info to docs on how to set default values for components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1788](https://github.com/gradio-app/gradio/pull/1788) +- Embedding Components on Docs by [@aliabd](https://github.com/aliabd) in [PR 1726](https://github.com/gradio-app/gradio/pull/1726) +- Remove usage of deprecated gr.inputs and gr.outputs from website by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1796](https://github.com/gradio-app/gradio/pull/1796) +- Some cleanups to the docs page by [@abidlabs](https://github.com/abidlabs) in [PR 1822](https://github.com/gradio-app/gradio/pull/1822) + +### Contributors Shoutout: + +- [@nhankiet](https://github.com/nhankiet) made their first contribution in [PR 1819](https://github.com/gradio-app/gradio/pull/1819) + +## 3.0 + +###### 🔥 Gradio 3.0 is the biggest update to the library, ever. + +### New Features: + +###### 1. Blocks 🧱 + +Blocks is a new, low-level API that allows you to have full control over the data flows and layout of your application. It allows you to build very complex, multi-step applications. For example, you might want to: + +- Group together related demos as multiple tabs in one web app +- Change the layout of your demo instead of just having all of the inputs on the left and outputs on the right +- Have multi-step interfaces, in which the output of one model becomes the input to the next model, or have more flexible data flows in general +- Change a component's properties (for example, the choices in a Dropdown) or its visibility based on user input + +Here's a simple example that creates the demo below it: + +```python +import gradio as gr + +def update(name): + return f"Welcome to Gradio, {name}!" + +demo = gr.Blocks() + +with demo: + gr.Markdown( + """ + # Hello World! + Start typing below to see the output. + """) + inp = gr.Textbox(placeholder="What is your name?") + out = gr.Textbox() + + inp.change(fn=update, + inputs=inp, + outputs=out) + +demo.launch() +``` + +![hello-blocks](https://user-images.githubusercontent.com/9021060/168684108-78cbd24b-e6bd-4a04-a8d9-20d535203434.gif) + +Read our [Introduction to Blocks](http://gradio.app/introduction_to_blocks/) guide for more, and join the 🎈 [Gradio Blocks Party](https://huggingface.co/spaces/Gradio-Blocks/README)! + +###### 2. Our Revamped Design 🎨 + +We've upgraded our design across the entire library: from components, and layouts all the way to dark mode. + +![kitchen_sink](https://user-images.githubusercontent.com/9021060/168686333-7a6e3096-3e23-4309-abf2-5cd7736e0463.gif) + +###### 3. A New Website 💻 + +We've upgraded [gradio.app](https://gradio.app) to make it cleaner, faster and easier to use. Our docs now come with components and demos embedded directly on the page. So you can quickly get up to speed with what you're looking for. + +![website](https://user-images.githubusercontent.com/9021060/168687191-10d6a3bd-101f-423a-8193-48f47a5e077d.gif) + +###### 4. New Components: Model3D, Dataset, and More.. + +We've introduced a lot of new components in `3.0`, including `Model3D`, `Dataset`, `Markdown`, `Button` and `Gallery`. You can find all the components and play around with them [here](https://gradio.app/docs/#components). + +![Model3d](https://user-images.githubusercontent.com/9021060/168689062-6ad77151-8cc5-467d-916c-f7c78e52ec0c.gif) + +### Full Changelog: + +- Gradio dash fe by [@pngwn](https://github.com/pngwn) in [PR 807](https://github.com/gradio-app/gradio/pull/807) +- Blocks components by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 765](https://github.com/gradio-app/gradio/pull/765) +- Blocks components V2 by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 843](https://github.com/gradio-app/gradio/pull/843) +- Blocks-Backend-Events by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 844](https://github.com/gradio-app/gradio/pull/844) +- Interfaces from Blocks by [@aliabid94](https://github.com/aliabid94) in [PR 849](https://github.com/gradio-app/gradio/pull/849) +- Blocks dev by [@aliabid94](https://github.com/aliabid94) in [PR 853](https://github.com/gradio-app/gradio/pull/853) +- Started updating demos to use the new `gradio.components` syntax by [@abidlabs](https://github.com/abidlabs) in [PR 848](https://github.com/gradio-app/gradio/pull/848) +- add test infra + add browser tests to CI by [@pngwn](https://github.com/pngwn) in [PR 852](https://github.com/gradio-app/gradio/pull/852) +- 854 textbox by [@pngwn](https://github.com/pngwn) in [PR 859](https://github.com/gradio-app/gradio/pull/859) +- Getting old Python unit tests to pass on `blocks-dev` by [@abidlabs](https://github.com/abidlabs) in [PR 861](https://github.com/gradio-app/gradio/pull/861) +- initialise chatbot with empty array of messages by [@pngwn](https://github.com/pngwn) in [PR 867](https://github.com/gradio-app/gradio/pull/867) +- add test for output to input by [@pngwn](https://github.com/pngwn) in [PR 866](https://github.com/gradio-app/gradio/pull/866) +- More Interface -> Blocks features by [@aliabid94](https://github.com/aliabid94) in [PR 864](https://github.com/gradio-app/gradio/pull/864) +- Fixing external.py in blocks-dev to reflect the new HF Spaces paths by [@abidlabs](https://github.com/abidlabs) in [PR 879](https://github.com/gradio-app/gradio/pull/879) +- backend_default_value_refactoring by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 871](https://github.com/gradio-app/gradio/pull/871) +- fix default_value by [@pngwn](https://github.com/pngwn) in [PR 869](https://github.com/gradio-app/gradio/pull/869) +- fix buttons by [@aliabid94](https://github.com/aliabid94) in [PR 883](https://github.com/gradio-app/gradio/pull/883) +- Checking and updating more demos to use 3.0 syntax by [@abidlabs](https://github.com/abidlabs) in [PR 892](https://github.com/gradio-app/gradio/pull/892) +- Blocks Tests by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 902](https://github.com/gradio-app/gradio/pull/902) +- Interface fix by [@pngwn](https://github.com/pngwn) in [PR 901](https://github.com/gradio-app/gradio/pull/901) +- Quick fix: Issue 893 by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 907](https://github.com/gradio-app/gradio/pull/907) +- 3d Image Component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 775](https://github.com/gradio-app/gradio/pull/775) +- fix endpoint url in prod by [@pngwn](https://github.com/pngwn) in [PR 911](https://github.com/gradio-app/gradio/pull/911) +- rename Model3d to Image3D by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 912](https://github.com/gradio-app/gradio/pull/912) +- update pypi to 2.9.1 by [@abidlabs](https://github.com/abidlabs) in [PR 916](https://github.com/gradio-app/gradio/pull/916) +- blocks-with-fix by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 917](https://github.com/gradio-app/gradio/pull/917) +- Restore Interpretation, Live, Auth, Queueing by [@aliabid94](https://github.com/aliabid94) in [PR 915](https://github.com/gradio-app/gradio/pull/915) +- Allow `Blocks` instances to be used like a `Block` in other `Blocks` by [@abidlabs](https://github.com/abidlabs) in [PR 919](https://github.com/gradio-app/gradio/pull/919) +- Redesign 1 by [@pngwn](https://github.com/pngwn) in [PR 918](https://github.com/gradio-app/gradio/pull/918) +- blocks-components-tests by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 904](https://github.com/gradio-app/gradio/pull/904) +- fix unit + browser tests by [@pngwn](https://github.com/pngwn) in [PR 926](https://github.com/gradio-app/gradio/pull/926) +- blocks-move-test-data by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 927](https://github.com/gradio-app/gradio/pull/927) +- remove debounce from form inputs by [@pngwn](https://github.com/pngwn) in [PR 932](https://github.com/gradio-app/gradio/pull/932) +- reimplement webcam video by [@pngwn](https://github.com/pngwn) in [PR 928](https://github.com/gradio-app/gradio/pull/928) +- blocks-move-test-data by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 941](https://github.com/gradio-app/gradio/pull/941) +- allow audio components to take a string value by [@pngwn](https://github.com/pngwn) in [PR 930](https://github.com/gradio-app/gradio/pull/930) +- static mode for textbox by [@pngwn](https://github.com/pngwn) in [PR 929](https://github.com/gradio-app/gradio/pull/929) +- fix file upload text by [@pngwn](https://github.com/pngwn) in [PR 931](https://github.com/gradio-app/gradio/pull/931) +- tabbed-interface-rewritten by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 958](https://github.com/gradio-app/gradio/pull/958) +- Gan demo fix by [@abidlabs](https://github.com/abidlabs) in [PR 965](https://github.com/gradio-app/gradio/pull/965) +- Blocks analytics by [@abidlabs](https://github.com/abidlabs) in [PR 947](https://github.com/gradio-app/gradio/pull/947) +- Blocks page load by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 963](https://github.com/gradio-app/gradio/pull/963) +- add frontend for page load events by [@pngwn](https://github.com/pngwn) in [PR 967](https://github.com/gradio-app/gradio/pull/967) +- fix i18n and some tweaks by [@pngwn](https://github.com/pngwn) in [PR 966](https://github.com/gradio-app/gradio/pull/966) +- add jinja2 to reqs by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 969](https://github.com/gradio-app/gradio/pull/969) +- Cleaning up `Launchable()` by [@abidlabs](https://github.com/abidlabs) in [PR 968](https://github.com/gradio-app/gradio/pull/968) +- Fix #944 by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 971](https://github.com/gradio-app/gradio/pull/971) +- New Blocks Demo: neural instrument cloning by [@abidlabs](https://github.com/abidlabs) in [PR 975](https://github.com/gradio-app/gradio/pull/975) +- Add huggingface_hub client library by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 973](https://github.com/gradio-app/gradio/pull/973) +- State and variables by [@aliabid94](https://github.com/aliabid94) in [PR 977](https://github.com/gradio-app/gradio/pull/977) +- update-components by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 986](https://github.com/gradio-app/gradio/pull/986) +- ensure dataframe updates as expected by [@pngwn](https://github.com/pngwn) in [PR 981](https://github.com/gradio-app/gradio/pull/981) +- test-guideline by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 990](https://github.com/gradio-app/gradio/pull/990) +- Issue #785: add footer by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 972](https://github.com/gradio-app/gradio/pull/972) +- indentation fix by [@abidlabs](https://github.com/abidlabs) in [PR 993](https://github.com/gradio-app/gradio/pull/993) +- missing quote by [@aliabd](https://github.com/aliabd) in [PR 996](https://github.com/gradio-app/gradio/pull/996) +- added interactive parameter to components by [@abidlabs](https://github.com/abidlabs) in [PR 992](https://github.com/gradio-app/gradio/pull/992) +- custom-components by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 985](https://github.com/gradio-app/gradio/pull/985) +- Refactor component shortcuts by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 995](https://github.com/gradio-app/gradio/pull/995) +- Plot Component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 805](https://github.com/gradio-app/gradio/pull/805) +- updated PyPi version to 2.9.2 by [@abidlabs](https://github.com/abidlabs) in [PR 1002](https://github.com/gradio-app/gradio/pull/1002) +- Release 2.9.3 by [@abidlabs](https://github.com/abidlabs) in [PR 1003](https://github.com/gradio-app/gradio/pull/1003) +- Image3D Examples Fix by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1001](https://github.com/gradio-app/gradio/pull/1001) +- release 2.9.4 by [@abidlabs](https://github.com/abidlabs) in [PR 1006](https://github.com/gradio-app/gradio/pull/1006) +- templates import hotfix by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1008](https://github.com/gradio-app/gradio/pull/1008) +- Progress indicator bar by [@aliabid94](https://github.com/aliabid94) in [PR 997](https://github.com/gradio-app/gradio/pull/997) +- Fixed image input for absolute path by [@JefferyChiang](https://github.com/JefferyChiang) in [PR 1004](https://github.com/gradio-app/gradio/pull/1004) +- Model3D + Plot Components by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1010](https://github.com/gradio-app/gradio/pull/1010) +- Gradio Guides: Creating CryptoPunks with GANs by [@NimaBoscarino](https://github.com/NimaBoscarino) in [PR 1000](https://github.com/gradio-app/gradio/pull/1000) +- [BIG PR] Gradio blocks & redesigned components by [@abidlabs](https://github.com/abidlabs) in [PR 880](https://github.com/gradio-app/gradio/pull/880) +- fixed failing test on main by [@abidlabs](https://github.com/abidlabs) in [PR 1023](https://github.com/gradio-app/gradio/pull/1023) +- Use smaller ASR model in external test by [@abidlabs](https://github.com/abidlabs) in [PR 1024](https://github.com/gradio-app/gradio/pull/1024) +- updated PyPi version to 2.9.0b by [@abidlabs](https://github.com/abidlabs) in [PR 1026](https://github.com/gradio-app/gradio/pull/1026) +- Fixing import issues so that the package successfully installs on colab notebooks by [@abidlabs](https://github.com/abidlabs) in [PR 1027](https://github.com/gradio-app/gradio/pull/1027) +- Update website tracker slackbot by [@aliabd](https://github.com/aliabd) in [PR 1037](https://github.com/gradio-app/gradio/pull/1037) +- textbox-autoheight by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1009](https://github.com/gradio-app/gradio/pull/1009) +- Model3D Examples fixes by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1035](https://github.com/gradio-app/gradio/pull/1035) +- GAN Gradio Guide: Adjustments to iframe heights by [@NimaBoscarino](https://github.com/NimaBoscarino) in [PR 1042](https://github.com/gradio-app/gradio/pull/1042) +- added better default labels to form components by [@abidlabs](https://github.com/abidlabs) in [PR 1040](https://github.com/gradio-app/gradio/pull/1040) +- Slackbot web tracker fix by [@aliabd](https://github.com/aliabd) in [PR 1043](https://github.com/gradio-app/gradio/pull/1043) +- Plot fixes by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1044](https://github.com/gradio-app/gradio/pull/1044) +- Small fixes to the demos by [@abidlabs](https://github.com/abidlabs) in [PR 1030](https://github.com/gradio-app/gradio/pull/1030) +- fixing demo issue with website by [@aliabd](https://github.com/aliabd) in [PR 1047](https://github.com/gradio-app/gradio/pull/1047) +- [hotfix] HighlightedText by [@aliabid94](https://github.com/aliabid94) in [PR 1046](https://github.com/gradio-app/gradio/pull/1046) +- Update text by [@ronvoluted](https://github.com/ronvoluted) in [PR 1050](https://github.com/gradio-app/gradio/pull/1050) +- Update CONTRIBUTING.md by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1052](https://github.com/gradio-app/gradio/pull/1052) +- fix(ui): Increase contrast for footer by [@ronvoluted](https://github.com/ronvoluted) in [PR 1048](https://github.com/gradio-app/gradio/pull/1048) +- UI design update by [@gary149](https://github.com/gary149) in [PR 1041](https://github.com/gradio-app/gradio/pull/1041) +- updated PyPi version to 2.9.0b8 by [@abidlabs](https://github.com/abidlabs) in [PR 1059](https://github.com/gradio-app/gradio/pull/1059) +- Running, testing, and fixing demos by [@abidlabs](https://github.com/abidlabs) in [PR 1060](https://github.com/gradio-app/gradio/pull/1060) +- Form layout by [@pngwn](https://github.com/pngwn) in [PR 1054](https://github.com/gradio-app/gradio/pull/1054) +- inputless-interfaces by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1038](https://github.com/gradio-app/gradio/pull/1038) +- Update PULL_REQUEST_TEMPLATE.md by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1068](https://github.com/gradio-app/gradio/pull/1068) +- Upgrading node memory to 4gb in website Docker by [@aliabd](https://github.com/aliabd) in [PR 1069](https://github.com/gradio-app/gradio/pull/1069) +- Website reload error by [@aliabd](https://github.com/aliabd) in [PR 1079](https://github.com/gradio-app/gradio/pull/1079) +- fixed favicon issue by [@abidlabs](https://github.com/abidlabs) in [PR 1064](https://github.com/gradio-app/gradio/pull/1064) +- remove-queue-from-events by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1056](https://github.com/gradio-app/gradio/pull/1056) +- Enable vertex colors for OBJs files by [@radames](https://github.com/radames) in [PR 1074](https://github.com/gradio-app/gradio/pull/1074) +- Dark text by [@ronvoluted](https://github.com/ronvoluted) in [PR 1049](https://github.com/gradio-app/gradio/pull/1049) +- Scroll to output by [@pngwn](https://github.com/pngwn) in [PR 1077](https://github.com/gradio-app/gradio/pull/1077) +- Explicitly list pnpm version 6 in contributing guide by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1085](https://github.com/gradio-app/gradio/pull/1085) +- hotfix for encrypt issue by [@abidlabs](https://github.com/abidlabs) in [PR 1096](https://github.com/gradio-app/gradio/pull/1096) +- Release 2.9b9 by [@abidlabs](https://github.com/abidlabs) in [PR 1098](https://github.com/gradio-app/gradio/pull/1098) +- tweak node circleci settings by [@pngwn](https://github.com/pngwn) in [PR 1091](https://github.com/gradio-app/gradio/pull/1091) +- Website Reload Error by [@aliabd](https://github.com/aliabd) in [PR 1099](https://github.com/gradio-app/gradio/pull/1099) +- Website Reload: README in demos docker by [@aliabd](https://github.com/aliabd) in [PR 1100](https://github.com/gradio-app/gradio/pull/1100) +- Flagging fixes by [@abidlabs](https://github.com/abidlabs) in [PR 1081](https://github.com/gradio-app/gradio/pull/1081) +- Backend for optional labels by [@abidlabs](https://github.com/abidlabs) in [PR 1080](https://github.com/gradio-app/gradio/pull/1080) +- Optional labels fe by [@pngwn](https://github.com/pngwn) in [PR 1105](https://github.com/gradio-app/gradio/pull/1105) +- clean-deprecated-parameters by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1090](https://github.com/gradio-app/gradio/pull/1090) +- Blocks rendering fix by [@abidlabs](https://github.com/abidlabs) in [PR 1102](https://github.com/gradio-app/gradio/pull/1102) +- Redos #1106 by [@abidlabs](https://github.com/abidlabs) in [PR 1112](https://github.com/gradio-app/gradio/pull/1112) +- Interface types: handle input-only, output-only, and unified interfaces by [@abidlabs](https://github.com/abidlabs) in [PR 1108](https://github.com/gradio-app/gradio/pull/1108) +- Hotfix + New pypi release 2.9b11 by [@abidlabs](https://github.com/abidlabs) in [PR 1118](https://github.com/gradio-app/gradio/pull/1118) +- issue-checkbox by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1122](https://github.com/gradio-app/gradio/pull/1122) +- issue-checkbox-hotfix by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1127](https://github.com/gradio-app/gradio/pull/1127) +- Fix demos in website by [@aliabd](https://github.com/aliabd) in [PR 1130](https://github.com/gradio-app/gradio/pull/1130) +- Guide for Gradio ONNX model zoo on Huggingface by [@AK391](https://github.com/AK391) in [PR 1073](https://github.com/gradio-app/gradio/pull/1073) +- ONNX guide fixes by [@aliabd](https://github.com/aliabd) in [PR 1131](https://github.com/gradio-app/gradio/pull/1131) +- Stacked form inputs css by [@gary149](https://github.com/gary149) in [PR 1134](https://github.com/gradio-app/gradio/pull/1134) +- made default value in textbox empty string by [@abidlabs](https://github.com/abidlabs) in [PR 1135](https://github.com/gradio-app/gradio/pull/1135) +- Examples UI by [@gary149](https://github.com/gary149) in [PR 1121](https://github.com/gradio-app/gradio/pull/1121) +- Chatbot custom color support by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1092](https://github.com/gradio-app/gradio/pull/1092) +- highlighted text colors by [@pngwn](https://github.com/pngwn) in [PR 1119](https://github.com/gradio-app/gradio/pull/1119) +- pin to pnpm 6 for now by [@pngwn](https://github.com/pngwn) in [PR 1147](https://github.com/gradio-app/gradio/pull/1147) +- Restore queue in Blocks by [@aliabid94](https://github.com/aliabid94) in [PR 1137](https://github.com/gradio-app/gradio/pull/1137) +- add select event for tabitems by [@pngwn](https://github.com/pngwn) in [PR 1154](https://github.com/gradio-app/gradio/pull/1154) +- max_lines + autoheight for textbox by [@pngwn](https://github.com/pngwn) in [PR 1153](https://github.com/gradio-app/gradio/pull/1153) +- use color palette for chatbot by [@pngwn](https://github.com/pngwn) in [PR 1152](https://github.com/gradio-app/gradio/pull/1152) +- Timeseries improvements by [@pngwn](https://github.com/pngwn) in [PR 1149](https://github.com/gradio-app/gradio/pull/1149) +- move styling for interface panels to frontend by [@pngwn](https://github.com/pngwn) in [PR 1146](https://github.com/gradio-app/gradio/pull/1146) +- html tweaks by [@pngwn](https://github.com/pngwn) in [PR 1145](https://github.com/gradio-app/gradio/pull/1145) +- Issue #768: Support passing none to resize and crop image by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1144](https://github.com/gradio-app/gradio/pull/1144) +- image gallery component + img css by [@aliabid94](https://github.com/aliabid94) in [PR 1140](https://github.com/gradio-app/gradio/pull/1140) +- networking tweak by [@abidlabs](https://github.com/abidlabs) in [PR 1143](https://github.com/gradio-app/gradio/pull/1143) +- Allow enabling queue per event listener by [@aliabid94](https://github.com/aliabid94) in [PR 1155](https://github.com/gradio-app/gradio/pull/1155) +- config hotfix and v. 2.9b23 by [@abidlabs](https://github.com/abidlabs) in [PR 1158](https://github.com/gradio-app/gradio/pull/1158) +- Custom JS calls by [@aliabid94](https://github.com/aliabid94) in [PR 1082](https://github.com/gradio-app/gradio/pull/1082) +- Small fixes: queue default fix, ffmpeg installation message by [@abidlabs](https://github.com/abidlabs) in [PR 1159](https://github.com/gradio-app/gradio/pull/1159) +- formatting by [@abidlabs](https://github.com/abidlabs) in [PR 1161](https://github.com/gradio-app/gradio/pull/1161) +- enable flex grow for gr-box by [@radames](https://github.com/radames) in [PR 1165](https://github.com/gradio-app/gradio/pull/1165) +- 1148 loading by [@pngwn](https://github.com/pngwn) in [PR 1164](https://github.com/gradio-app/gradio/pull/1164) +- Put enable_queue kwarg back in launch() by [@aliabid94](https://github.com/aliabid94) in [PR 1167](https://github.com/gradio-app/gradio/pull/1167) +- A few small fixes by [@abidlabs](https://github.com/abidlabs) in [PR 1171](https://github.com/gradio-app/gradio/pull/1171) +- Hotfix for dropdown component by [@abidlabs](https://github.com/abidlabs) in [PR 1172](https://github.com/gradio-app/gradio/pull/1172) +- use secondary buttons in interface by [@pngwn](https://github.com/pngwn) in [PR 1173](https://github.com/gradio-app/gradio/pull/1173) +- 1183 component height by [@pngwn](https://github.com/pngwn) in [PR 1185](https://github.com/gradio-app/gradio/pull/1185) +- 962 dataframe by [@pngwn](https://github.com/pngwn) in [PR 1186](https://github.com/gradio-app/gradio/pull/1186) +- update-contributing by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1188](https://github.com/gradio-app/gradio/pull/1188) +- Table tweaks by [@pngwn](https://github.com/pngwn) in [PR 1195](https://github.com/gradio-app/gradio/pull/1195) +- wrap tab content in column by [@pngwn](https://github.com/pngwn) in [PR 1200](https://github.com/gradio-app/gradio/pull/1200) +- WIP: Add dark mode support by [@gary149](https://github.com/gary149) in [PR 1187](https://github.com/gradio-app/gradio/pull/1187) +- Restored /api/predict/ endpoint for Interfaces by [@abidlabs](https://github.com/abidlabs) in [PR 1199](https://github.com/gradio-app/gradio/pull/1199) +- hltext-label by [@pngwn](https://github.com/pngwn) in [PR 1204](https://github.com/gradio-app/gradio/pull/1204) +- add copy functionality to json by [@pngwn](https://github.com/pngwn) in [PR 1205](https://github.com/gradio-app/gradio/pull/1205) +- Update component config by [@aliabid94](https://github.com/aliabid94) in [PR 1089](https://github.com/gradio-app/gradio/pull/1089) +- fix placeholder prompt by [@pngwn](https://github.com/pngwn) in [PR 1215](https://github.com/gradio-app/gradio/pull/1215) +- ensure webcam video value is propagated correctly by [@pngwn](https://github.com/pngwn) in [PR 1218](https://github.com/gradio-app/gradio/pull/1218) +- Automatic word-break in highlighted text, combine_adjacent support by [@aliabid94](https://github.com/aliabid94) in [PR 1209](https://github.com/gradio-app/gradio/pull/1209) +- async-function-support by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1190](https://github.com/gradio-app/gradio/pull/1190) +- Sharing fix for assets by [@aliabid94](https://github.com/aliabid94) in [PR 1208](https://github.com/gradio-app/gradio/pull/1208) +- Hotfixes for course demos by [@abidlabs](https://github.com/abidlabs) in [PR 1222](https://github.com/gradio-app/gradio/pull/1222) +- Allow Custom CSS by [@aliabid94](https://github.com/aliabid94) in [PR 1170](https://github.com/gradio-app/gradio/pull/1170) +- share-hotfix by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1226](https://github.com/gradio-app/gradio/pull/1226) +- tweaks by [@pngwn](https://github.com/pngwn) in [PR 1229](https://github.com/gradio-app/gradio/pull/1229) +- white space for class concatenation by [@radames](https://github.com/radames) in [PR 1228](https://github.com/gradio-app/gradio/pull/1228) +- Tweaks by [@pngwn](https://github.com/pngwn) in [PR 1230](https://github.com/gradio-app/gradio/pull/1230) +- css tweaks by [@pngwn](https://github.com/pngwn) in [PR 1235](https://github.com/gradio-app/gradio/pull/1235) +- ensure defaults height match for media inputs by [@pngwn](https://github.com/pngwn) in [PR 1236](https://github.com/gradio-app/gradio/pull/1236) +- Default Label label value by [@radames](https://github.com/radames) in [PR 1239](https://github.com/gradio-app/gradio/pull/1239) +- update-shortcut-syntax by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1234](https://github.com/gradio-app/gradio/pull/1234) +- Update version.txt by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1244](https://github.com/gradio-app/gradio/pull/1244) +- Layout bugs by [@pngwn](https://github.com/pngwn) in [PR 1246](https://github.com/gradio-app/gradio/pull/1246) +- Update demo by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1253](https://github.com/gradio-app/gradio/pull/1253) +- Button default name by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1243](https://github.com/gradio-app/gradio/pull/1243) +- Labels spacing by [@gary149](https://github.com/gary149) in [PR 1254](https://github.com/gradio-app/gradio/pull/1254) +- add global loader for gradio app by [@pngwn](https://github.com/pngwn) in [PR 1251](https://github.com/gradio-app/gradio/pull/1251) +- ui apis for dalle-mini by [@pngwn](https://github.com/pngwn) in [PR 1258](https://github.com/gradio-app/gradio/pull/1258) +- Add precision to Number, backend only by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1125](https://github.com/gradio-app/gradio/pull/1125) +- Website Design Changes by [@abidlabs](https://github.com/abidlabs) in [PR 1015](https://github.com/gradio-app/gradio/pull/1015) +- Small fixes for multiple demos compatible with 3.0 by [@radames](https://github.com/radames) in [PR 1257](https://github.com/gradio-app/gradio/pull/1257) +- Issue #1160: Model 3D component not destroyed correctly by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1219](https://github.com/gradio-app/gradio/pull/1219) +- Fixes to components by [@abidlabs](https://github.com/abidlabs) in [PR 1260](https://github.com/gradio-app/gradio/pull/1260) +- layout docs by [@abidlabs](https://github.com/abidlabs) in [PR 1263](https://github.com/gradio-app/gradio/pull/1263) +- Static forms by [@pngwn](https://github.com/pngwn) in [PR 1264](https://github.com/gradio-app/gradio/pull/1264) +- Cdn assets by [@pngwn](https://github.com/pngwn) in [PR 1265](https://github.com/gradio-app/gradio/pull/1265) +- update logo by [@gary149](https://github.com/gary149) in [PR 1266](https://github.com/gradio-app/gradio/pull/1266) +- fix slider by [@aliabid94](https://github.com/aliabid94) in [PR 1268](https://github.com/gradio-app/gradio/pull/1268) +- maybe fix auth in iframes by [@pngwn](https://github.com/pngwn) in [PR 1261](https://github.com/gradio-app/gradio/pull/1261) +- Improves "Getting Started" guide by [@abidlabs](https://github.com/abidlabs) in [PR 1269](https://github.com/gradio-app/gradio/pull/1269) +- Add embedded demos to website by [@aliabid94](https://github.com/aliabid94) in [PR 1270](https://github.com/gradio-app/gradio/pull/1270) +- Label hotfixes by [@abidlabs](https://github.com/abidlabs) in [PR 1281](https://github.com/gradio-app/gradio/pull/1281) +- General tweaks by [@pngwn](https://github.com/pngwn) in [PR 1276](https://github.com/gradio-app/gradio/pull/1276) +- only affect links within the document by [@pngwn](https://github.com/pngwn) in [PR 1282](https://github.com/gradio-app/gradio/pull/1282) +- release 3.0b9 by [@abidlabs](https://github.com/abidlabs) in [PR 1283](https://github.com/gradio-app/gradio/pull/1283) +- Dm by [@pngwn](https://github.com/pngwn) in [PR 1284](https://github.com/gradio-app/gradio/pull/1284) +- Website fixes by [@aliabd](https://github.com/aliabd) in [PR 1286](https://github.com/gradio-app/gradio/pull/1286) +- Create Streamables by [@aliabid94](https://github.com/aliabid94) in [PR 1279](https://github.com/gradio-app/gradio/pull/1279) +- ensure table works on mobile by [@pngwn](https://github.com/pngwn) in [PR 1277](https://github.com/gradio-app/gradio/pull/1277) +- changes by [@aliabid94](https://github.com/aliabid94) in [PR 1287](https://github.com/gradio-app/gradio/pull/1287) +- demo alignment on landing page by [@aliabd](https://github.com/aliabd) in [PR 1288](https://github.com/gradio-app/gradio/pull/1288) +- New meta img by [@aliabd](https://github.com/aliabd) in [PR 1289](https://github.com/gradio-app/gradio/pull/1289) +- updated PyPi version to 3.0 by [@abidlabs](https://github.com/abidlabs) in [PR 1290](https://github.com/gradio-app/gradio/pull/1290) +- Fix site by [@aliabid94](https://github.com/aliabid94) in [PR 1291](https://github.com/gradio-app/gradio/pull/1291) +- Mobile responsive guides by [@aliabd](https://github.com/aliabd) in [PR 1293](https://github.com/gradio-app/gradio/pull/1293) +- Update readme by [@abidlabs](https://github.com/abidlabs) in [PR 1292](https://github.com/gradio-app/gradio/pull/1292) +- gif by [@abidlabs](https://github.com/abidlabs) in [PR 1296](https://github.com/gradio-app/gradio/pull/1296) +- Allow decoding headerless b64 string [@1lint](https://github.com/1lint) in [PR 4031](https://github.com/gradio-app/gradio/pull/4031) + +### Contributors Shoutout: + +- [@JefferyChiang](https://github.com/JefferyChiang) made their first contribution in [PR 1004](https://github.com/gradio-app/gradio/pull/1004) +- [@NimaBoscarino](https://github.com/NimaBoscarino) made their first contribution in [PR 1000](https://github.com/gradio-app/gradio/pull/1000) +- [@ronvoluted](https://github.com/ronvoluted) made their first contribution in [PR 1050](https://github.com/gradio-app/gradio/pull/1050) +- [@radames](https://github.com/radames) made their first contribution in [PR 1074](https://github.com/gradio-app/gradio/pull/1074) +- [@freddyaboulton](https://github.com/freddyaboulton) made their first contribution in [PR 1085](https://github.com/gradio-app/gradio/pull/1085) +- [@liteli1987gmail](https://github.com/liteli1987gmail) & [@chenglu](https://github.com/chenglu) made their first contribution in [PR 4767](https://github.com/gradio-app/gradio/pull/4767) \ No newline at end of file diff --git a/testbed/gradio-app__gradio/CITATION.cff b/testbed/gradio-app__gradio/CITATION.cff new file mode 100644 index 0000000000000000000000000000000000000000..0768f8c42506e23405e4c66661b327ed32028208 --- /dev/null +++ b/testbed/gradio-app__gradio/CITATION.cff @@ -0,0 +1,45 @@ +cff-version: 1.2.0 +message: Please cite this project using these metadata. +title: "Gradio: Hassle-free sharing and testing of ML models in the wild" +abstract: >- + Accessibility is a major challenge of machine learning (ML). + Typical ML models are built by specialists and require + specialized hardware/software as well as ML experience to + validate. This makes it challenging for non-technical + collaborators and endpoint users (e.g. physicians) to easily + provide feedback on model development and to gain trust in + ML. The accessibility challenge also makes collaboration + more difficult and limits the ML researcher's exposure to + realistic data and scenarios that occur in the wild. To + improve accessibility and facilitate collaboration, we + developed an open-source Python package, Gradio, which + allows researchers to rapidly generate a visual interface + for their ML models. Gradio makes accessing any ML model as + easy as sharing a URL. Our development of Gradio is informed + by interviews with a number of machine learning researchers + who participate in interdisciplinary collaborations. Their + feedback identified that Gradio should support a variety of + interfaces and frameworks, allow for easy sharing of the + interface, allow for input manipulation and interactive + inference by the domain expert, as well as allow embedding + the interface in iPython notebooks. We developed these + features and carried out a case study to understand Gradio's + usefulness and usability in the setting of a machine + learning collaboration between a researcher and a + cardiologist. +authors: + - family-names: Abid + given-names: Abubakar + - family-names: Abdalla + given-names: Ali + - family-names: Abid + given-names: Ali + - family-names: Khan + given-names: Dawood + - family-names: Alfozan + given-names: Abdulrahman + - family-names: Zou + given-names: James +doi: 10.48550/arXiv.1906.02569 +date-released: 2019-06-06 +url: https://arxiv.org/abs/1906.02569 diff --git a/testbed/gradio-app__gradio/CONTRIBUTING.md b/testbed/gradio-app__gradio/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..585772b9d5337f849310dca6731bc1afa8c35ca7 --- /dev/null +++ b/testbed/gradio-app__gradio/CONTRIBUTING.md @@ -0,0 +1,138 @@ +# Contributing to Gradio + +Prerequisites: + +- [Python 3.8+](https://www.python.org/downloads/) +- [Node.js v16.14+](https://nodejs.dev/en/download/package-manager/) (only needed if you are making changes to the frontend) +- [pnpm 8.1+](https://pnpm.io/8.x/installation) (only needed if you are making changes to the frontend) + +More than 80 awesome developers have contributed to the `gradio` library, and we'd be thrilled if you would like be the next `gradio` contributor! Start by cloning this repo and installing Gradio locally: + +### Install Gradio locally from the `main` branch + +- Clone this repo +- Navigate to the repo folder and run + +```bash +bash scripts/install_gradio.sh +``` + +- Build the front end + +``` +bash scripts/build_frontend.sh +``` + +### Install development requirements + +In order to be able to run the Python linter, formatter, and unit tests, do the following: + +- Navigate to the repo folder and install test requirements (note that it is highly recommended to use a virtual environment running **Python 3.9** since the versions are pinned) + +``` +bash scripts/install_test_requirements.sh +``` + +- If you have a different Python version and conflicting packages during the installation, please first run: + +``` +bash scripts/create_test_requirements.sh +``` + +### Using dev containers + +Instead of the above steps, you can alternatively use dev containers. This is supported on all platforms (macOS/Windows/Linux). + +Prerequisites: + +- An editor which supports dev containers, like VS Code +- Docker support on the host computer: + - macOS: [Docker Desktop 2.0+](https://www.docker.com/products/docker-desktop/) + - Windows: [Docker Desktop 2.0+](https://www.docker.com/products/docker-desktop/) + - Linux: [Docker CE/EE 18.06+](https://docs.docker.com/get-docker/) and [Docker Compose 1.21+](https://docs.docker.com/compose/install/) +- If using VS Code, the [Dev Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension + +Steps: + +- Clone repository +- Open it in editor +- For VS Code, execute `Dev Containers: Reopen in container` command + +For detailed instructions, please see the [Dev Containers tutorial](https://code.visualstudio.com/docs/devcontainers/tutorial). + +### Extra tidbits + +- You can run gradio scripts in reload mode which will watch for changes in the `gradio` folder and reload the app if changes are made. + +``` +gradio app.py +``` + +- To develop the frontend app, you should also follow [js/README.md](js/README.md). + +- To run all of the tests, do: + +``` +bash scripts/run_all_tests.sh +``` + +### Structure of the Repository + +It's helpful to know the overall structure of the repository so that you can focus on the part of the source code you'd like to contribute to + +- `/gradio`: contains the Python source code for the library + - `/gradio/interface.py`: contains the Python source code for the core `Interface` class + - `/gradio/blocks.py`: contains the Python source code for the core `Blocks` class + - `/gradio/components.py`: contains the Python source code for the `components`, you can add your custom components here. +- `/js`: contains the HTML/JS/CSS source code for the library ([start here for frontend changes](/js/README.md)) +- `/test`: contains Python unit tests for the library +- `/demo`: contains demos that are used in the documentation, you can find `Gradio` examples over here. +- `/website`: contains the code for the Gradio website (www.gradio.app). See the README in the `/website` folder for more details + +### Continuous Integration and Testing + +All PRs must pass the continuous integration tests before merging. To test locally, you can run `python -m unittest` from the repo directory. + +## Submitting PRs + +All PRs should be against `main`. Direct commits to main are blocked, and PRs require an approving review to merge into main. By convention, the Gradio maintainers will review PRs when: + +- An initial review has been requested, and +- A description of the change (with a link to the GitHub PR) has been added to CHANGELOG.md, and +- A maintainer (@abidlabs, @aliabid94, @aliabd, @AK391, @dawoodkhan82, @pngwn, @freddyaboulton) is tagged in the PR comments and asked to complete a review + +We ask that you make sure initial CI checks are passing before requesting a review. One of the Gradio maintainers will merge the PR when all the checks are passing. + +Do not forget the format the backend before pushing. + +``` +bash scripts/format_backend.sh +``` + +``` +bash scripts/format_frontend.sh +``` + +## CI checks + +Currently the following checks are run in CI: + +### Gradio library (`gradio` package) + +``` +bash scripts/lint_backend.sh +bash scripts/type_check_backend.sh +python -m pytest -m "not flaky" --ignore=client +python -m pytest -m "flaky" --ignore=client +``` + +### Gradio client (`gradio_client` package) + +``` +cd client/python +bash scripts/lint.sh +python -m pytest -m "not flaky" +python -m pytest -m "flaky" +``` + +_Could these guidelines be clearer? Feel free to open a PR to help us faciltiate open-source contributions!_ diff --git a/testbed/gradio-app__gradio/LICENSE b/testbed/gradio-app__gradio/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/testbed/gradio-app__gradio/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/testbed/gradio-app__gradio/SECURITY.md b/testbed/gradio-app__gradio/SECURITY.md new file mode 100644 index 0000000000000000000000000000000000000000..d5619fb774c461d8a7f6ec806b92a6efa28004ca --- /dev/null +++ b/testbed/gradio-app__gradio/SECURITY.md @@ -0,0 +1,5 @@ +# Security Policy + +## Reporting a Vulnerability + +If you discover a security vulnerability, we would be very grateful if you could email us at team@gradio.app. This is the preferred approach instead of opening a public issue. We take all vulnerability reports seriously, and will work to patch the vulnerability immediately. Whenever possible, we will credit the person or people who report the security vulnerabilities after it has been patched. diff --git a/testbed/gradio-app__gradio/build_pypi.sh b/testbed/gradio-app__gradio/build_pypi.sh new file mode 100644 index 0000000000000000000000000000000000000000..f54d6798bc243d9480bdd55858d57803083f0580 --- /dev/null +++ b/testbed/gradio-app__gradio/build_pypi.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -e + +cd "$(dirname ${0})" + +# You should update the version in package.json before running this script +FILE="gradio/package.json" +new_version=$(python -c "import json; f = open('$FILE', 'r'); data = json.load(f); print(data['version']); f.close();") +GRADIO_VERSION=$new_version + +rm -rf gradio/templates/frontend +rm -rf gradio/templates/cdn +pnpm i --frozen-lockfile --ignore-scripts +GRADIO_VERSION=$new_version pnpm build +GRADIO_VERSION=$new_version pnpm build:cdn +aws s3 cp gradio/templates/cdn "s3://gradio/${new_version}/" --recursive --region us-west-2 +cp gradio/templates/cdn/index.html gradio/templates/frontend/share.html + +rm -rf dist/* +rm -rf build/* +python3 -m build diff --git a/testbed/gradio-app__gradio/globals.d.ts b/testbed/gradio-app__gradio/globals.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..da2de542a724d45f07dfeaed6162654740f08255 --- /dev/null +++ b/testbed/gradio-app__gradio/globals.d.ts @@ -0,0 +1,35 @@ +declare global { + interface Window { + __gradio_mode__: "app" | "website"; + __gradio_space__: string | null; + launchGradio: Function; + launchGradioFromSpaces: Function; + gradio_config: Config; + scoped_css_attach: (link: HTMLLinkElement) => void; + __is_colab__: boolean; + parentIFrame?: { + scrollTo: (x: number, y: number) => void; + }; + } +} + +export interface Config { + auth_required: boolean | undefined; + auth_message: string; + components: any[]; + css: string | null; + dependencies: any[]; + dev_mode: boolean; + enable_queue: boolean; + layout: any; + mode: "blocks" | "interface"; + root: string; + theme: string; + title: string; + version: string; + space_id: string | null; + is_colab: boolean; + show_api: boolean; + stylesheets: string[]; + path: string; +} diff --git a/testbed/gradio-app__gradio/js/.npmrc b/testbed/gradio-app__gradio/js/.npmrc new file mode 100644 index 0000000000000000000000000000000000000000..fa4e095233fbfba3ff88d57fb03190fd96b72d99 --- /dev/null +++ b/testbed/gradio-app__gradio/js/.npmrc @@ -0,0 +1 @@ +strict-peer-dependencies=false \ No newline at end of file diff --git a/testbed/gradio-app__gradio/js/README.md b/testbed/gradio-app__gradio/js/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1fdadfe708db3f2cab5c7b693b1b3d5c66219697 --- /dev/null +++ b/testbed/gradio-app__gradio/js/README.md @@ -0,0 +1,108 @@ +# gradio-ui + +This folder contains all of the Gradio UI and component source code. + +- [set up](#setup) +- [running the application](#running-the-application) +- [local development](#local-development) +- [building for production](#building-for-production) +- [quality checks](#quality-checks) +- [ci checks](#ci-checks) + +## setup + +This folder is managed as 'monorepo' a multi-package repository which make dependency management very simple. In order to do this we use `pnpm` as our package manager. + +Make sure [`pnpm`](https://pnpm.io/) is installed by [following the installation instructions for your system](https://pnpm.io/installation). + +You will also need `node` which you probably already have + +## running the application + +Install all dependencies: + +```bash +pnpm i +``` + +This will install the dependencies for all packages and link any local packages + +## local development + +To develop locally, open two terminal tabs from the root of the repository. + +Run the python test server, from the root directory: + +```bash +cd demo/kitchen_sink +python run.py +``` + +This will start a development server on port `7860` that the web app is expecting. + +Run the web app: + +```bash +pnpm dev +``` + +## building for production + +Run the build: + +```bash +pnpm build +``` + +This will create the necessary files in `js/app/public` and also in `gradio/templates/frontend`. + +## quality checks + +The repos currently has two quality checks that can be run locally and are run in CI. + +### formatting + +Formatting is handled by [`prettier`](https://prettier.io/) to ensure consistent formatting and prevent style-focused conversations. Formatting failures will fails CI and should be reoslve before merging. + +To check formatting: + +```bash +pnpm format:check +``` + +If you have formatting failures then you can run the following command to fix them: + +```bash +pnpm format:write +``` + +### type checking + +We use [TypeScript](https://www.typescriptlang.org/) to provide static types to javascript code. These checks are also run in CI. + +to typecheck the code: + +```bash +pnpm ts:check +``` + +## ci checks + +Currently the following checks are run in CI: + +### static checks + +- Format check (`pnpm format:check`) +- Build css (`pnpm css`) +- Build client (`pnpm build`) +- Type check (`pnpm ts:check`) +- Unit tests (`pnpm test:run`) + +### functional test + +``` +pip install -r demo/outbreak_forecast/requirements.txt +pnpm exec playwright install chromium +pnpm exec playwright install-deps chromium +pnpm test:browser:full +``` diff --git a/testbed/gradio-app__gradio/js/jsx.d.ts b/testbed/gradio-app__gradio/js/jsx.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..803c0429b845fc6317a996d430c78801767158eb --- /dev/null +++ b/testbed/gradio-app__gradio/js/jsx.d.ts @@ -0,0 +1,6 @@ +declare namespace svelteHTML { + interface HTMLAttributes { + webkitdirectory?: boolean | string; + mozdirectory?: boolean | string; + } +} diff --git a/testbed/gradio-app__gradio/js/lite/CHANGELOG.md b/testbed/gradio-app__gradio/js/lite/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..483cb576e8d2de0e9537e522b3550c4ca48ec6d3 --- /dev/null +++ b/testbed/gradio-app__gradio/js/lite/CHANGELOG.md @@ -0,0 +1,39 @@ +# @gradio/lite + +## 0.3.1 + +### Features + +- [#5226](https://github.com/gradio-app/gradio/pull/5226) [`64039707`](https://github.com/gradio-app/gradio/commit/640397075d17307dd4f0713d063ef3d009a87aa0) - add gradio as a devdep of @gradio/lite. Thanks [@pngwn](https://github.com/pngwn)! + +## 0.3.0 + +### Minor Changes + +- [#4785](https://github.com/gradio-app/gradio/pull/4785) [`da0e9447`](https://github.com/gradio-app/gradio/commit/da0e94479a235de35844a636efb5833cb1fe9aeb) Thanks [@whitphx](https://github.com/whitphx)! - Add methods to execute mounted Python files + +### Patch Changes + +- [#4788](https://github.com/gradio-app/gradio/pull/4788) [`8d0d4e0a`](https://github.com/gradio-app/gradio/commit/8d0d4e0a8ebe2425aef24a6f21b88598684b0965) Thanks [@whitphx](https://github.com/whitphx)! - Generate a prebuilt themed CSS file at build time + +- [#4826](https://github.com/gradio-app/gradio/pull/4826) [`f0150c62`](https://github.com/gradio-app/gradio/commit/f0150c6260d657b150b73f0eecabd10b19d297c8) Thanks [@whitphx](https://github.com/whitphx)! - Unload the local modules before re-executing a Python script so the edits on the modules are reflected + +- [#4779](https://github.com/gradio-app/gradio/pull/4779) [`80b49965`](https://github.com/gradio-app/gradio/commit/80b4996595d70167313d9abf29fb4f35abe66a0f) Thanks [@whitphx](https://github.com/whitphx)! - Add file system APIs and an imperative package install method + +- [#4784](https://github.com/gradio-app/gradio/pull/4784) [`f757febe`](https://github.com/gradio-app/gradio/commit/f757febe181f0555aa01d4d349f92081819e2691) Thanks [@whitphx](https://github.com/whitphx)! - Remove the development code embedded in a dev HTML file so it will not be in a final bundle + +- [#4785](https://github.com/gradio-app/gradio/pull/4785) [`da0e9447`](https://github.com/gradio-app/gradio/commit/da0e94479a235de35844a636efb5833cb1fe9aeb) Thanks [@whitphx](https://github.com/whitphx)! - Add controller.unmount() + +- [#4846](https://github.com/gradio-app/gradio/pull/4846) [`76acf3cb`](https://github.com/gradio-app/gradio/commit/76acf3cb0b258c0e6bb38d611d766e5e54b68437) Thanks [@whitphx](https://github.com/whitphx)! - Fix the package name spec of markdown-it on the Wasm worker + +## 0.2.0 + +### Minor Changes + +- [#4732](https://github.com/gradio-app/gradio/pull/4732) [`1dc3c1a9`](https://github.com/gradio-app/gradio/commit/1dc3c1a9a2063daffc00d9231c1498d983ebc3bf) Thanks [@whitphx](https://github.com/whitphx)! - Add an imperative API to reurn the Python code and refresh the frontend + +## 0.1.1 + +### Patch Changes + +- [#4731](https://github.com/gradio-app/gradio/pull/4731) [`f9171288`](https://github.com/gradio-app/gradio/commit/f9171288d4cf0174952628276385fb553556c38a) Thanks [@whitphx](https://github.com/whitphx)! - Load the worker file from a different origin, e.g. CDN \ No newline at end of file diff --git a/testbed/gradio-app__gradio/js/lite/index.html b/testbed/gradio-app__gradio/js/lite/index.html new file mode 100644 index 0000000000000000000000000000000000000000..33d0eceee5d5b26df36f233b05967092b40b1d84 --- /dev/null +++ b/testbed/gradio-app__gradio/js/lite/index.html @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + +
+ + + + diff --git a/testbed/gradio-app__gradio/js/lite/package.json b/testbed/gradio-app__gradio/js/lite/package.json new file mode 100644 index 0000000000000000000000000000000000000000..e0ef1f6c328990d03c1dd00ce0b78ed62f403a5a --- /dev/null +++ b/testbed/gradio-app__gradio/js/lite/package.json @@ -0,0 +1,18 @@ +{ + "name": "@gradio/lite", + "version": "0.3.1", + "description": "Serverless Gradio", + "type": "module", + "main": "dist/index.js", + "author": "Gradio Team", + "license": "Apache-2.0", + "files": [ + "dist" + ], + "scripts": { + "build": "pnpm --filter @gradio/app build:lite" + }, + "devDependencies": { + "gradio": "workspace:^" + } +} diff --git a/testbed/gradio-app__gradio/js/timeseries/CHANGELOG.md b/testbed/gradio-app__gradio/js/timeseries/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..323928fadd300ad710b01580b0bc95b39133d2fc --- /dev/null +++ b/testbed/gradio-app__gradio/js/timeseries/CHANGELOG.md @@ -0,0 +1,62 @@ +# @gradio/timeseries + +## 0.0.6 + +### Patch Changes + +- Updated dependencies [[`75ddeb390`](https://github.com/gradio-app/gradio/commit/75ddeb390d665d4484667390a97442081b49a423)]: + - @gradio/upload@0.3.0 + +## 0.0.5 + +### Patch Changes + +- Updated dependencies [[`afac0006`](https://github.com/gradio-app/gradio/commit/afac0006337ce2840cf497cd65691f2f60ee5912)]: + - @gradio/statustracker@0.2.0 + - @gradio/theme@0.1.0 + - @gradio/utils@0.1.1 + - @gradio/atoms@0.1.2 + - @gradio/upload@0.2.1 + +## 0.0.4 + +### Patch Changes + +- Updated dependencies [[`abf1c57d`](https://github.com/gradio-app/gradio/commit/abf1c57d7d85de0df233ee3b38aeb38b638477db), [`79d8f9d8`](https://github.com/gradio-app/gradio/commit/79d8f9d891901683c5a1b7486efb44eab2478c96)]: + - @gradio/icons@0.1.0 + - @gradio/utils@0.1.0 + - @gradio/upload@0.2.0 + - @gradio/atoms@0.1.1 + - @gradio/statustracker@0.1.1 + +## 0.0.3 + +### Highlights + +#### Improve startup performance and markdown support ([#5279](https://github.com/gradio-app/gradio/pull/5279) [`fe057300`](https://github.com/gradio-app/gradio/commit/fe057300f0672c62dab9d9b4501054ac5d45a4ec)) + +##### Improved markdown support + +We now have better support for markdown in `gr.Markdown` and `gr.Dataframe`. Including syntax highlighting and Github Flavoured Markdown. We also have more consistent markdown behaviour and styling. + +##### Various performance improvements + +These improvements will be particularly beneficial to large applications. + +- Rather than attaching events manually, they are now delegated, leading to a significant performance improvement and addressing a performance regression introduced in a recent version of Gradio. App startup for large applications is now around twice as fast. +- Optimised the mounting of individual components, leading to a modest performance improvement during startup (~30%). +- Corrected an issue that was causing markdown to re-render infinitely. +- Ensured that the `gr.3DModel` does re-render prematurely. + +Thanks [@pngwn](https://github.com/pngwn)! + +### Features + +- [#5215](https://github.com/gradio-app/gradio/pull/5215) [`fbdad78a`](https://github.com/gradio-app/gradio/commit/fbdad78af4c47454cbb570f88cc14bf4479bbceb) - Lazy load interactive or static variants of a component individually, rather than loading both variants regardless. This change will improve performance for many applications. Thanks [@pngwn](https://github.com/pngwn)! + +## 0.0.2 + +### Patch Changes + +- Updated dependencies [[`667875b2`](https://github.com/gradio-app/gradio/commit/667875b2441753e74d25bd9d3c8adedd8ede11cd)]: + - @gradio/upload@0.0.3 diff --git a/testbed/gradio-app__gradio/js/timeseries/example/Timeseries.svelte b/testbed/gradio-app__gradio/js/timeseries/example/Timeseries.svelte new file mode 100644 index 0000000000000000000000000000000000000000..6b6a4a08c938f1b38a8a090480c8e3566e8747af --- /dev/null +++ b/testbed/gradio-app__gradio/js/timeseries/example/Timeseries.svelte @@ -0,0 +1,19 @@ + + +
+ {value} +
+ + diff --git a/testbed/gradio-app__gradio/js/timeseries/example/index.ts b/testbed/gradio-app__gradio/js/timeseries/example/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..2b6cf6974344dfa72a0dbcdfad0c4621b04f71c7 --- /dev/null +++ b/testbed/gradio-app__gradio/js/timeseries/example/index.ts @@ -0,0 +1 @@ +export { default } from "./Timeseries.svelte"; diff --git a/testbed/gradio-app__gradio/js/timeseries/interactive/InteractiveTimeseries.svelte b/testbed/gradio-app__gradio/js/timeseries/interactive/InteractiveTimeseries.svelte new file mode 100644 index 0000000000000000000000000000000000000000..06590ca971d40911ddc20a0dc6c04a5456323c66 --- /dev/null +++ b/testbed/gradio-app__gradio/js/timeseries/interactive/InteractiveTimeseries.svelte @@ -0,0 +1,184 @@ + + + + + + + {#if _value} +
+ + (value = make_dict(x, y))} + {colors} + /> +
+ {:else if value === undefined || value === null} + handle_load(detail)} + include_file_metadata={false} + > + + + {/if} +
+ + diff --git a/testbed/gradio-app__gradio/js/timeseries/interactive/index.ts b/testbed/gradio-app__gradio/js/timeseries/interactive/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..c752390a853044393ee242d0942d7b5a6f8655fb --- /dev/null +++ b/testbed/gradio-app__gradio/js/timeseries/interactive/index.ts @@ -0,0 +1 @@ +export { default } from "./InteractiveTimeseries.svelte"; diff --git a/testbed/gradio-app__gradio/js/timeseries/package.json b/testbed/gradio-app__gradio/js/timeseries/package.json new file mode 100644 index 0000000000000000000000000000000000000000..4e201a7f581fa8b2d4ff9cf346b7f2bf00f2a20d --- /dev/null +++ b/testbed/gradio-app__gradio/js/timeseries/package.json @@ -0,0 +1,34 @@ +{ + "name": "@gradio/timeseries", + "version": "0.0.6", + "description": "Gradio UI packages", + "type": "module", + "main": "index.svelte", + "author": "", + "license": "ISC", + "private": true, + "main_changeset": true, + "exports": { + "./package.json": "./package.json", + "./interactive": "./interactive/index.ts", + "./static": "./static/index.ts", + "./example": "./example/index.ts" + }, + "dependencies": { + "@gradio/atoms": "workspace:^", + "@gradio/icons": "workspace:^", + "@gradio/statustracker": "workspace:^", + "@gradio/theme": "workspace:^", + "@gradio/tooltip": "workspace:^", + "@gradio/upload": "workspace:^", + "@gradio/utils": "workspace:^", + "d3-dsv": "^3.0.1", + "d3-scale": "^4.0.2", + "d3-shape": "^3.2.0" + }, + "devDependencies": { + "@types/d3-dsv": "^3.0.0", + "@types/d3-scale": "^4.0.3", + "@types/d3-shape": "^3.1.1" + } +} diff --git a/testbed/gradio-app__gradio/js/timeseries/shared/Chart.svelte b/testbed/gradio-app__gradio/js/timeseries/shared/Chart.svelte new file mode 100644 index 0000000000000000000000000000000000000000..31cbdcdb42bff89d078a48b5f434c1bb3ecb0449 --- /dev/null +++ b/testbed/gradio-app__gradio/js/timeseries/shared/Chart.svelte @@ -0,0 +1,222 @@ + + +
+
+ {#each _y as { name }} +
+ + {name} +
+ {/each} +
+ + + {#each x_ticks as tick} + y_ticks[y_ticks.length - 1] + ? y_domain[1] + : y_ticks[y_ticks.length - 1] + )} + stroke="#aaa" + /> + + {tick} + + {/each} + + {#each y_ticks as tick} + x_ticks[x_ticks.length - 1] + ? x_domain[1] + : x_ticks[x_ticks.length - 1] + )} + stroke="#aaa" + /> + + + {tick} + + {/each} + + {#if y_domain[1] > y_ticks[y_ticks.length - 1]} + + + {y_domain[1]} + + {/if} + + + {#each _y as { name, values }} + {@const color = color_map[name]} + {#each values as { x, y }} + + {/each} + [scale_x(x), scale_y(y)]) + )} + fill="none" + stroke={color} + stroke-width="3" + /> + {/each} + + {#each _y as { name, values }} + {@const color = color_map[name]} + {#each values as { x, y }} + + {/each} + {/each} + + +
+ {_x.name} +
+
+ + diff --git a/testbed/gradio-app__gradio/js/timeseries/shared/index.ts b/testbed/gradio-app__gradio/js/timeseries/shared/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..f7253133a52f4d2d980ecb6a66553cd0b9376b32 --- /dev/null +++ b/testbed/gradio-app__gradio/js/timeseries/shared/index.ts @@ -0,0 +1 @@ +export { default } from "./Chart.svelte"; diff --git a/testbed/gradio-app__gradio/js/timeseries/shared/utils.ts b/testbed/gradio-app__gradio/js/timeseries/shared/utils.ts new file mode 100644 index 0000000000000000000000000000000000000000..a57aa319d4468b6329f9dd7a1c6ea9c137fd4dc5 --- /dev/null +++ b/testbed/gradio-app__gradio/js/timeseries/shared/utils.ts @@ -0,0 +1,74 @@ +interface XYValue { + x: number; + y: number; +} + +interface ObjectValue { + values: XYValue[]; +} + +export function get_domains( + values: ObjectValue[] | { values: number[] } +): [number, number] { + let _vs: number[]; + if (Array.isArray(values)) { + _vs = values.reduce((acc, { values }) => { + return [...acc, ...values.map(({ y }) => y)]; + }, []); + } else { + _vs = values.values; + } + return [Math.min(..._vs), Math.max(..._vs)]; +} + +interface Row { + name: string; + values: number[]; +} + +interface RowPoint { + name: string; + values: { x: number; y: number }[]; +} + +interface TransformedValues { + x: Row; + y: RowPoint[]; +} + +export function transform_values( + values: Record[], + x?: string, + y?: string[] +): TransformedValues { + const transformed_values = Object.entries( + values[0] + ).reduce( + (acc, next, i) => { + if ((!x && i === 0) || (x && next[0] === x)) { + acc.x.name = next[0]; + } else if (!y || (y && y.includes(next[0]))) { + acc.y.push({ name: next[0], values: [] }); + } + return acc; + }, + { x: { name: "", values: [] }, y: [] } + ); + + for (let i = 0; i < values.length; i++) { + const _a = Object.entries(values[i]); + for (let j = 0; j < _a.length; j++) { + let [name, x] = _a[j]; + if (name === transformed_values.x.name) { + transformed_values.x.values.push(parseFloat(x)); + } else { + transformed_values.y[j - 1].values.push({ + y: parseFloat(_a[j][1]), + x: parseFloat(_a[0][1]) + }); + } + } + } + + return transformed_values; +} diff --git a/testbed/gradio-app__gradio/js/timeseries/static/StaticTimeseries.svelte b/testbed/gradio-app__gradio/js/timeseries/static/StaticTimeseries.svelte new file mode 100644 index 0000000000000000000000000000000000000000..5510b04768070c8383bf3665954f52a0b87158ef --- /dev/null +++ b/testbed/gradio-app__gradio/js/timeseries/static/StaticTimeseries.svelte @@ -0,0 +1,69 @@ + + + + + + + {#if static_data} + + {:else} + + + + {/if} + diff --git a/testbed/gradio-app__gradio/js/timeseries/static/index.ts b/testbed/gradio-app__gradio/js/timeseries/static/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..7be8b360985fdca10dba32bf7d4c856f14322acc --- /dev/null +++ b/testbed/gradio-app__gradio/js/timeseries/static/index.ts @@ -0,0 +1 @@ +export { default } from "./StaticTimeseries.svelte"; diff --git a/testbed/gradio-app__gradio/js/tooltip/README.md b/testbed/gradio-app__gradio/js/tooltip/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e5b427fdb3b640a5482b4336ea6d4bbd861d81b5 --- /dev/null +++ b/testbed/gradio-app__gradio/js/tooltip/README.md @@ -0,0 +1,11 @@ +# `@gradio/button` + +```html + + + +``` diff --git a/testbed/gradio-app__gradio/js/tooltip/src/tooltip.ts b/testbed/gradio-app__gradio/js/tooltip/src/tooltip.ts new file mode 100644 index 0000000000000000000000000000000000000000..82d3a5830df179b5a778b88c615da4e8b540ace7 --- /dev/null +++ b/testbed/gradio-app__gradio/js/tooltip/src/tooltip.ts @@ -0,0 +1,50 @@ +import type { ActionReturn } from "svelte/action"; +import Tooltip from "./Tooltip.svelte"; + +interface ActionArgs { + color: string; + text: string; +} + +export function tooltip( + element: HTMLElement | SVGElement, + { color, text }: ActionArgs +): ActionReturn { + let tooltipComponent: Tooltip; + function mouse_over(event: MouseEvent): MouseEvent { + tooltipComponent = new Tooltip({ + props: { + text, + x: event.pageX, + y: event.pageY, + color + }, + target: document.body + }); + + return event; + } + function mouseMove(event: MouseEvent): void { + tooltipComponent.$set({ + x: event.pageX, + y: event.pageY + }); + } + function mouseLeave(): void { + tooltipComponent.$destroy(); + } + + const el = element as HTMLElement; + + el.addEventListener("mouseover", mouse_over); + el.addEventListener("mouseleave", mouseLeave); + el.addEventListener("mousemove", mouseMove); + + return { + destroy() { + el.removeEventListener("mouseover", mouse_over); + el.removeEventListener("mouseleave", mouseLeave); + el.removeEventListener("mousemove", mouseMove); + } + }; +} diff --git a/testbed/gradio-app__gradio/js/tootils/CHANGELOG.md b/testbed/gradio-app__gradio/js/tootils/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..46fad2b091a1be1f023500f7e3f4e3aa849fb1b1 --- /dev/null +++ b/testbed/gradio-app__gradio/js/tootils/CHANGELOG.md @@ -0,0 +1,22 @@ +# @gradio/tootils + +## 0.0.2 + +### Highlights + +#### Improve startup performance and markdown support ([#5279](https://github.com/gradio-app/gradio/pull/5279) [`fe057300`](https://github.com/gradio-app/gradio/commit/fe057300f0672c62dab9d9b4501054ac5d45a4ec)) + +##### Improved markdown support + +We now have better support for markdown in `gr.Markdown` and `gr.Dataframe`. Including syntax highlighting and Github Flavoured Markdown. We also have more consistent markdown behaviour and styling. + +##### Various performance improvements + +These improvements will be particularly beneficial to large applications. + +- Rather than attaching events manually, they are now delegated, leading to a significant performance improvement and addressing a performance regression introduced in a recent version of Gradio. App startup for large applications is now around twice as fast. +- Optimised the mounting of individual components, leading to a modest performance improvement during startup (~30%). +- Corrected an issue that was causing markdown to re-render infinitely. +- Ensured that the `gr.3DModel` does re-render prematurely. + + Thanks [@pngwn](https://github.com/pngwn)! \ No newline at end of file diff --git a/testbed/gradio-app__gradio/js/tootils/README.md b/testbed/gradio-app__gradio/js/tootils/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e5b427fdb3b640a5482b4336ea6d4bbd861d81b5 --- /dev/null +++ b/testbed/gradio-app__gradio/js/tootils/README.md @@ -0,0 +1,11 @@ +# `@gradio/button` + +```html + + + +``` diff --git a/testbed/gradio-app__gradio/js/tootils/package.json b/testbed/gradio-app__gradio/js/tootils/package.json new file mode 100644 index 0000000000000000000000000000000000000000..57bd0cd053d1daf96f6cb9885f1075e06651c321 --- /dev/null +++ b/testbed/gradio-app__gradio/js/tootils/package.json @@ -0,0 +1,10 @@ +{ + "name": "@gradio/tootils", + "version": "0.0.2", + "description": "Internal test utilities", + "type": "module", + "main": "src/index.ts", + "author": "", + "license": "ISC", + "private": true +} diff --git a/testbed/gradio-app__gradio/js/upload/CHANGELOG.md b/testbed/gradio-app__gradio/js/upload/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..aafe2b2b8f5fb74d2581a940db9798097f5983d3 --- /dev/null +++ b/testbed/gradio-app__gradio/js/upload/CHANGELOG.md @@ -0,0 +1,96 @@ +# @gradio/upload + +## 0.3.0 + +### Features + +- [#5554](https://github.com/gradio-app/gradio/pull/5554) [`75ddeb390`](https://github.com/gradio-app/gradio/commit/75ddeb390d665d4484667390a97442081b49a423) - Accessibility Improvements. Thanks [@hannahblair](https://github.com/hannahblair)! + +## 0.2.1 + +### Patch Changes + +- Updated dependencies []: + - @gradio/atoms@0.1.2 + +## 0.2.0 + +### Features + +- [#5373](https://github.com/gradio-app/gradio/pull/5373) [`79d8f9d8`](https://github.com/gradio-app/gradio/commit/79d8f9d891901683c5a1b7486efb44eab2478c96) - Adds `height` and `zoom_speed` parameters to `Model3D` component, as well as a button to reset the camera position. Thanks [@abidlabs](https://github.com/abidlabs)! + +## 0.1.0 + +### Highlights + +#### Improve startup performance and markdown support ([#5279](https://github.com/gradio-app/gradio/pull/5279) [`fe057300`](https://github.com/gradio-app/gradio/commit/fe057300f0672c62dab9d9b4501054ac5d45a4ec)) + +##### Improved markdown support + +We now have better support for markdown in `gr.Markdown` and `gr.Dataframe`. Including syntax highlighting and Github Flavoured Markdown. We also have more consistent markdown behaviour and styling. + +##### Various performance improvements + +These improvements will be particularly beneficial to large applications. + +- Rather than attaching events manually, they are now delegated, leading to a significant performance improvement and addressing a performance regression introduced in a recent version of Gradio. App startup for large applications is now around twice as fast. +- Optimised the mounting of individual components, leading to a modest performance improvement during startup (~30%). +- Corrected an issue that was causing markdown to re-render infinitely. +- Ensured that the `gr.3DModel` does re-render prematurely. + +Thanks [@pngwn](https://github.com/pngwn)! + +### Features + +- [#5216](https://github.com/gradio-app/gradio/pull/5216) [`4b58ea6d`](https://github.com/gradio-app/gradio/commit/4b58ea6d98e7a43b3f30d8a4cb6f379bc2eca6a8) - Update i18n tokens and locale files. Thanks [@hannahblair](https://github.com/hannahblair)! + +### Fixes + +- [#5285](https://github.com/gradio-app/gradio/pull/5285) [`cdfd4217`](https://github.com/gradio-app/gradio/commit/cdfd42174a9c777eaee9c1209bf8e90d8c7791f2) - Tweaks to `icon` parameter in `gr.Button()`. Thanks [@abidlabs](https://github.com/abidlabs)! + +## 0.0.3 + +### Fixes + +- [#5077](https://github.com/gradio-app/gradio/pull/5077) [`667875b2`](https://github.com/gradio-app/gradio/commit/667875b2441753e74d25bd9d3c8adedd8ede11cd) - Live audio streaming output + +highlight: + +#### Now supports loading streamed outputs + +Allows users to use generators to stream audio out, yielding consecutive chunks of audio. Requires `streaming=True` to be set on the output audio. + +```python +import gradio as gr +from pydub import AudioSegment + +def stream_audio(audio_file): + audio = AudioSegment.from_mp3(audio_file) + i = 0 + chunk_size = 3000 + + while chunk_size*i < len(audio): + chunk = audio[chunk_size*i:chunk_size*(i+1)] + i += 1 + if chunk: + file = f"/tmp/{i}.mp3" + chunk.export(file, format="mp3") + yield file + +demo = gr.Interface( + fn=stream_audio, + inputs=gr.Audio(type="filepath", label="Audio file to stream"), + outputs=gr.Audio(autoplay=True, streaming=True), +) + +demo.queue().launch() +``` + +From the backend, streamed outputs are served from the `/stream/` endpoint instead of the `/file/` endpoint. Currently just used to serve audio streaming output. The output JSON will have `is_stream`: `true`, instead of `is_file`: `true` in the file data object. Thanks [@aliabid94](https://github.com/aliabid94)! + +## 0.0.2 + +### Patch Changes + +- Updated dependencies []: + - @gradio/atoms@0.0.2 \ No newline at end of file diff --git a/testbed/gradio-app__gradio/js/upload/package.json b/testbed/gradio-app__gradio/js/upload/package.json new file mode 100644 index 0000000000000000000000000000000000000000..c78c452798890ba4e3ff1822827a5699b05e245e --- /dev/null +++ b/testbed/gradio-app__gradio/js/upload/package.json @@ -0,0 +1,15 @@ +{ + "name": "@gradio/upload", + "version": "0.3.0", + "description": "Gradio UI packages", + "type": "module", + "main": "src/index.ts", + "author": "", + "license": "ISC", + "private": true, + "dependencies": { + "@gradio/atoms": "workspace:^", + "@gradio/icons": "workspace:^" + }, + "main_changeset": true +} diff --git a/testbed/gradio-app__gradio/js/upload/src/ModifyUpload.svelte b/testbed/gradio-app__gradio/js/upload/src/ModifyUpload.svelte new file mode 100644 index 0000000000000000000000000000000000000000..a0eb424ba5e53d4b0402c4510a5199622600c474 --- /dev/null +++ b/testbed/gradio-app__gradio/js/upload/src/ModifyUpload.svelte @@ -0,0 +1,62 @@ + + +
+ {#if editable} + dispatch("edit")} + /> + {/if} + + {#if undoable} + dispatch("undo")} + /> + {/if} + + { + dispatch("clear"); + event.stopPropagation(); + }} + /> +
+ + diff --git a/testbed/gradio-app__gradio/js/upload/src/Upload.svelte b/testbed/gradio-app__gradio/js/upload/src/Upload.svelte new file mode 100644 index 0000000000000000000000000000000000000000..5302a51e3f823f72806e6ded1725be3325bb9126 --- /dev/null +++ b/testbed/gradio-app__gradio/js/upload/src/Upload.svelte @@ -0,0 +1,135 @@ + + + + + diff --git a/testbed/gradio-app__gradio/js/upload/src/index.ts b/testbed/gradio-app__gradio/js/upload/src/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..e6539f0187b27f27b64ea3c8870bd002be1b3b3c --- /dev/null +++ b/testbed/gradio-app__gradio/js/upload/src/index.ts @@ -0,0 +1,8 @@ +export { default as Upload } from "./Upload.svelte"; +export { default as ModifyUpload } from "./ModifyUpload.svelte"; +export type { FileData } from "./types"; +export { + normalise_file, + get_fetchable_url_or_file, + blobToBase64 +} from "./utils"; diff --git a/testbed/gradio-app__gradio/js/upload/src/utils.ts b/testbed/gradio-app__gradio/js/upload/src/utils.ts new file mode 100644 index 0000000000000000000000000000000000000000..687451b532e27a1f866555c58ff0714c47f799eb --- /dev/null +++ b/testbed/gradio-app__gradio/js/upload/src/utils.ts @@ -0,0 +1,87 @@ +import type { FileData } from "./types"; + +export function normalise_file( + file: string | FileData | null, + root: string, + root_url: string | null +): FileData | null; + +export function normalise_file( + file: FileData[] | null, + root: string, + root_url: string | null +): FileData[] | null; + +export function normalise_file( + file: FileData[] | FileData | null, + root: string, + root_url: string | null +): FileData[] | FileData | null; + +export function normalise_file( + file: FileData[] | FileData | string | null, + root: string, + root_url: string | null +): FileData[] | FileData | null { + if (file == null) return null; + if (typeof file === "string") { + return { + name: "file_data", + data: file + }; + } else if (Array.isArray(file)) { + const normalized_file: (FileData | null)[] = []; + + for (const x of file) { + if (x === null) { + normalized_file.push(null); + } else { + normalized_file.push(normalise_file(x, root, root_url)); + } + } + + return normalized_file as FileData[]; + } else if (file.is_file) { + file.data = get_fetchable_url_or_file(file.name, root, root_url); + } else if (file.is_stream) { + if (root_url == null) { + file.data = root + "/stream/" + file.name; + } else { + file.data = "/proxy=" + root_url + "stream/" + file.name; + } + } + return file; +} + +function is_url(str: string): boolean { + try { + const url = new URL(str); + return url.protocol === "http:" || url.protocol === "https:"; + } catch { + return false; + } +} + +export function get_fetchable_url_or_file( + path: string | null, + root: string, + root_url: string | null +): string { + if (path == null) { + return root_url ? `/proxy=${root_url}file=` : `${root}/file=`; + } + if (is_url(path)) { + return path; + } + return root_url ? `/proxy=${root_url}file=${path}` : `${root}/file=${path}`; +} + +export const blobToBase64 = (blob: File): Promise => { + const reader = new FileReader(); + reader.readAsDataURL(blob); + return new Promise((resolve) => { + reader.onloadend = (): void => { + resolve(reader.result as string); + }; + }); +}; diff --git a/testbed/gradio-app__gradio/js/uploadbutton/CHANGELOG.md b/testbed/gradio-app__gradio/js/uploadbutton/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..236d3fd5d9a68224eab25cc6080056c07e1fb9f8 --- /dev/null +++ b/testbed/gradio-app__gradio/js/uploadbutton/CHANGELOG.md @@ -0,0 +1,60 @@ +# @gradio/uploadbutton + +## 0.0.6 + +### Patch Changes + +- Updated dependencies [[`c57f1b75e`](https://github.com/gradio-app/gradio/commit/c57f1b75e272c76b0af4d6bd0c7f44743ff34f26), [`40de3d217`](https://github.com/gradio-app/gradio/commit/40de3d2178b61ebe424b6f6228f94c0c6f679bea), [`ea0e00b20`](https://github.com/gradio-app/gradio/commit/ea0e00b207b4b90a10e9d054c4202d4e705a29ba), [`75ddeb390`](https://github.com/gradio-app/gradio/commit/75ddeb390d665d4484667390a97442081b49a423)]: + - @gradio/client@0.4.0 + - @gradio/button@0.2.0 + - @gradio/upload@0.3.0 + +## 0.0.5 + +### Patch Changes + +- Updated dependencies [[`26fef8c7`](https://github.com/gradio-app/gradio/commit/26fef8c7f85a006c7e25cdbed1792df19c512d02)]: + - @gradio/client@0.3.1 + - @gradio/utils@0.1.1 + - @gradio/button@0.1.3 + - @gradio/upload@0.2.1 + +## 0.0.4 + +### Patch Changes + +- Updated dependencies [[`119c8343`](https://github.com/gradio-app/gradio/commit/119c834331bfae60d4742c8f20e9cdecdd67e8c2), [`abf1c57d`](https://github.com/gradio-app/gradio/commit/abf1c57d7d85de0df233ee3b38aeb38b638477db), [`79d8f9d8`](https://github.com/gradio-app/gradio/commit/79d8f9d891901683c5a1b7486efb44eab2478c96)]: + - @gradio/client@0.3.0 + - @gradio/utils@0.1.0 + - @gradio/upload@0.2.0 + - @gradio/button@0.1.2 + +## 0.0.3 + +### Highlights + +#### Improve startup performance and markdown support ([#5279](https://github.com/gradio-app/gradio/pull/5279) [`fe057300`](https://github.com/gradio-app/gradio/commit/fe057300f0672c62dab9d9b4501054ac5d45a4ec)) + +##### Improved markdown support + +We now have better support for markdown in `gr.Markdown` and `gr.Dataframe`. Including syntax highlighting and Github Flavoured Markdown. We also have more consistent markdown behaviour and styling. + +##### Various performance improvements + +These improvements will be particularly beneficial to large applications. + +- Rather than attaching events manually, they are now delegated, leading to a significant performance improvement and addressing a performance regression introduced in a recent version of Gradio. App startup for large applications is now around twice as fast. +- Optimised the mounting of individual components, leading to a modest performance improvement during startup (~30%). +- Corrected an issue that was causing markdown to re-render infinitely. +- Ensured that the `gr.3DModel` does re-render prematurely. + +Thanks [@pngwn](https://github.com/pngwn)! + +## 0.0.2 + +### Patch Changes + +- Updated dependencies [[`61129052`](https://github.com/gradio-app/gradio/commit/61129052ed1391a75c825c891d57fa0ad6c09fc8), [`667875b2`](https://github.com/gradio-app/gradio/commit/667875b2441753e74d25bd9d3c8adedd8ede11cd), [`67265a58`](https://github.com/gradio-app/gradio/commit/67265a58027ef1f9e4c0eb849a532f72eaebde48), [`8b4eb8ca`](https://github.com/gradio-app/gradio/commit/8b4eb8cac9ea07bde31b44e2006ca2b7b5f4de36), [`37caa2e0`](https://github.com/gradio-app/gradio/commit/37caa2e0fe95d6cab8beb174580fb557904f137f)]: + - @gradio/client@0.2.0 + - @gradio/upload@0.0.3 + - @gradio/button@0.1.0 diff --git a/testbed/gradio-app__gradio/js/uploadbutton/UploadButton.test.ts b/testbed/gradio-app__gradio/js/uploadbutton/UploadButton.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..79b9336f4fa2a887126f539f9b07c4f5b2f237d1 --- /dev/null +++ b/testbed/gradio-app__gradio/js/uploadbutton/UploadButton.test.ts @@ -0,0 +1,68 @@ +import { test, describe, expect, vi, afterEach, assert } from "vitest"; +import { spy, spyOn } from "tinyspy"; +import { cleanup, render, wait_for_event } from "@gradio/tootils"; +import event from "@testing-library/user-event"; +import { setupi18n } from "../app/src/i18n"; +import UploadButton from "./interactive"; + +describe("UploadButton", () => { + afterEach(() => { + cleanup(); + vi.restoreAllMocks(); + }); + + test("Uploads with blob", async () => { + vi.mock("@gradio/client", async () => { + return { + upload_files: vi.fn((f) => new Promise((res) => res({}))) + }; + }); + + const api = await import("@gradio/client"); + + setupi18n(); + + const { getByTestId } = await render(UploadButton, { + label: "file", + value: null, + root: "http://localhost:7860", + file_count: "1" + }); + + const item = getByTestId("file-upload-button"); // container.querySelectorAll("input")[0]; + + const file = new File(["hello"], "my-audio.wav", { type: "audio/wav" }); + await event.upload(item, file); + + expect(api.upload_files).toHaveBeenCalled(); + }); + + test("upload sets change event", async () => { + vi.mock("@gradio/client", async () => { + return { + upload_files: vi.fn((f) => new Promise((res) => res({}))) + }; + }); + + await import("@gradio/client"); + setupi18n(); + const { component, getByTestId, wait_for_event } = await render( + UploadButton, + { + label: "file", + value: null, + root: "http://localhost:7860", + file_count: "1" + } + ); + + const item = getByTestId("file-upload-button"); //container.querySelectorAll("input")[0]; + const file = new File(["hello"], "my-audio.wav", { type: "audio/wav" }); + event.upload(item, file); + const mock = await wait_for_event("change"); + expect(mock.callCount).toBe(1); + const [data] = component.$capture_state().value; + expect(data).toBeTruthy(); + assert.equal(data.name, "my-audio.wav"); + }); +}); diff --git a/testbed/gradio-app__gradio/js/uploadbutton/interactive/InteractiveUploadButton.svelte b/testbed/gradio-app__gradio/js/uploadbutton/interactive/InteractiveUploadButton.svelte new file mode 100644 index 0000000000000000000000000000000000000000..032ec0c44d9ef251671355601f4f6c14df99c15a --- /dev/null +++ b/testbed/gradio-app__gradio/js/uploadbutton/interactive/InteractiveUploadButton.svelte @@ -0,0 +1,81 @@ + + + gradio.dispatch("click")} + on:load={handle_upload} +> + {$_(label)} + diff --git a/testbed/gradio-app__gradio/js/uploadbutton/package.json b/testbed/gradio-app__gradio/js/uploadbutton/package.json new file mode 100644 index 0000000000000000000000000000000000000000..55631e82405bcc68a80a64d044ea18ed3d98d844 --- /dev/null +++ b/testbed/gradio-app__gradio/js/uploadbutton/package.json @@ -0,0 +1,23 @@ +{ + "name": "@gradio/uploadbutton", + "version": "0.0.6", + "description": "Gradio UI packages", + "type": "module", + "main": "index.svelte", + "author": "", + "license": "ISC", + "private": true, + "main_changeset": true, + "exports": { + "./package.json": "./package.json", + "./interactive": "./interactive/index.ts", + "./static": "./static/index.ts", + "./example": "./example/index.ts" + }, + "dependencies": { + "@gradio/button": "workspace:^", + "@gradio/client": "workspace:^", + "@gradio/upload": "workspace:^", + "@gradio/utils": "workspace:^" + } +} diff --git a/testbed/gradio-app__gradio/js/uploadbutton/shared/UploadButton.svelte b/testbed/gradio-app__gradio/js/uploadbutton/shared/UploadButton.svelte new file mode 100644 index 0000000000000000000000000000000000000000..b8f2074c8687706b38a9d5d068645027df40a883 --- /dev/null +++ b/testbed/gradio-app__gradio/js/uploadbutton/shared/UploadButton.svelte @@ -0,0 +1,112 @@ + + + + + + + + + diff --git a/testbed/gradio-app__gradio/js/uploadbutton/shared/index.ts b/testbed/gradio-app__gradio/js/uploadbutton/shared/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..e614821f9b10f2d70e821fb0098eda3e490b11df --- /dev/null +++ b/testbed/gradio-app__gradio/js/uploadbutton/shared/index.ts @@ -0,0 +1 @@ +export { default } from "./UploadButton.svelte"; diff --git a/testbed/gradio-app__gradio/js/uploadbutton/static/StaticUploadButton.svelte b/testbed/gradio-app__gradio/js/uploadbutton/static/StaticUploadButton.svelte new file mode 100644 index 0000000000000000000000000000000000000000..e57208583eb3c34f135076f02cf921496c5905f4 --- /dev/null +++ b/testbed/gradio-app__gradio/js/uploadbutton/static/StaticUploadButton.svelte @@ -0,0 +1,82 @@ + + + gradio.dispatch("click")} + on:load={handle_upload} +> + {$_(label)} + diff --git a/testbed/gradio-app__gradio/js/uploadbutton/static/index.ts b/testbed/gradio-app__gradio/js/uploadbutton/static/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..d203f33ca3cae7dc6b260df3e7a9b3ac5aec5757 --- /dev/null +++ b/testbed/gradio-app__gradio/js/uploadbutton/static/index.ts @@ -0,0 +1 @@ +export { default } from "./StaticUploadButton.svelte"; diff --git a/testbed/gradio-app__gradio/js/utils/CHANGELOG.md b/testbed/gradio-app__gradio/js/utils/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..8f49785a99302c42d81a9c1ef0d42aedffa77177 --- /dev/null +++ b/testbed/gradio-app__gradio/js/utils/CHANGELOG.md @@ -0,0 +1,44 @@ +# @gradio/utils + +## 0.1.1 + +### Patch Changes + +- Updated dependencies [[`afac0006`](https://github.com/gradio-app/gradio/commit/afac0006337ce2840cf497cd65691f2f60ee5912)]: + - @gradio/theme@0.1.0 + +## 0.1.0 + +### Highlights + +#### Like/Dislike Button for Chatbot ([#5391](https://github.com/gradio-app/gradio/pull/5391) [`abf1c57d`](https://github.com/gradio-app/gradio/commit/abf1c57d7d85de0df233ee3b38aeb38b638477db)) + +Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! + +## 0.0.3 + +### Highlights + +#### Improve startup performance and markdown support ([#5279](https://github.com/gradio-app/gradio/pull/5279) [`fe057300`](https://github.com/gradio-app/gradio/commit/fe057300f0672c62dab9d9b4501054ac5d45a4ec)) + +##### Improved markdown support + +We now have better support for markdown in `gr.Markdown` and `gr.Dataframe`. Including syntax highlighting and Github Flavoured Markdown. We also have more consistent markdown behaviour and styling. + +##### Various performance improvements + +These improvements will be particularly beneficial to large applications. + +- Rather than attaching events manually, they are now delegated, leading to a significant performance improvement and addressing a performance regression introduced in a recent version of Gradio. App startup for large applications is now around twice as fast. +- Optimised the mounting of individual components, leading to a modest performance improvement during startup (~30%). +- Corrected an issue that was causing markdown to re-render infinitely. +- Ensured that the `gr.3DModel` does re-render prematurely. + +Thanks [@pngwn](https://github.com/pngwn)! + +## 0.0.2 + +### Patch Changes + +- Updated dependencies [[`41c83070`](https://github.com/gradio-app/gradio/commit/41c83070b01632084e7d29123048a96c1e261407)]: + - @gradio/theme@0.0.2 diff --git a/testbed/gradio-app__gradio/js/utils/README.md b/testbed/gradio-app__gradio/js/utils/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b1508da2493faee96f91f25a4d635b9439bfeba2 --- /dev/null +++ b/testbed/gradio-app__gradio/js/utils/README.md @@ -0,0 +1 @@ +# `@gradio/table` diff --git a/testbed/gradio-app__gradio/js/utils/package.json b/testbed/gradio-app__gradio/js/utils/package.json new file mode 100644 index 0000000000000000000000000000000000000000..1b7a8ca70ce5bf8e422d5dc9daef132980258ecd --- /dev/null +++ b/testbed/gradio-app__gradio/js/utils/package.json @@ -0,0 +1,14 @@ +{ + "name": "@gradio/utils", + "version": "0.1.1", + "description": "Gradio UI packages", + "type": "module", + "main": "src/index.ts", + "author": "", + "license": "ISC", + "private": true, + "dependencies": { + "@gradio/theme": "workspace:^" + }, + "main_changeset": true +} diff --git a/testbed/gradio-app__gradio/js/utils/src/color.ts b/testbed/gradio-app__gradio/js/utils/src/color.ts new file mode 100644 index 0000000000000000000000000000000000000000..86d3fe6a5c35c450cc6517b5f3d4767ffda1bc2c --- /dev/null +++ b/testbed/gradio-app__gradio/js/utils/src/color.ts @@ -0,0 +1,5 @@ +import { colors, ordered_colors } from "@gradio/theme"; + +export const get_next_color = (index: number): keyof typeof colors => { + return ordered_colors[index % ordered_colors.length]; +}; diff --git a/testbed/gradio-app__gradio/js/utils/src/index.ts b/testbed/gradio-app__gradio/js/utils/src/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..481e1f1ee7552e8f54d631e1bb816ebffbfd34dc --- /dev/null +++ b/testbed/gradio-app__gradio/js/utils/src/index.ts @@ -0,0 +1,3 @@ +export * from "./color"; +export * from "./utils"; +export * from "./types"; diff --git a/testbed/gradio-app__gradio/js/utils/src/types.ts b/testbed/gradio-app__gradio/js/utils/src/types.ts new file mode 100644 index 0000000000000000000000000000000000000000..904c904c2c2189ce13afeba8b9c1fa79b933e14b --- /dev/null +++ b/testbed/gradio-app__gradio/js/utils/src/types.ts @@ -0,0 +1 @@ +export { type Gradio } from "../../app/src/gradio_helper"; diff --git a/testbed/gradio-app__gradio/js/utils/src/utils.ts b/testbed/gradio-app__gradio/js/utils/src/utils.ts new file mode 100644 index 0000000000000000000000000000000000000000..da23bd7f8ccced7ef63ceb69e0917144c23a88f3 --- /dev/null +++ b/testbed/gradio-app__gradio/js/utils/src/utils.ts @@ -0,0 +1,152 @@ +import { type ActionReturn } from "svelte/action"; +export interface SelectData { + index: number | [number, number]; + value: any; + selected?: boolean; +} + +export interface LikeData { + index: number | [number, number]; + value: any; + liked?: boolean; +} + +export interface ShareData { + description: string; + title?: string; +} + +export class ShareError extends Error { + constructor(message: string) { + super(message); + this.name = "ShareError"; + } +} + +export async function uploadToHuggingFace( + data: string, + type: "base64" | "url" +): Promise { + if (window.__gradio_space__ == null) { + throw new ShareError("Must be on Spaces to share."); + } + let blob: Blob; + let contentType: string; + let filename: string; + if (type === "url") { + const response = await fetch(data); + blob = await response.blob(); + contentType = response.headers.get("content-type") || ""; + filename = response.headers.get("content-disposition") || ""; + } else { + blob = dataURLtoBlob(data); + contentType = data.split(";")[0].split(":")[1]; + filename = "file" + contentType.split("/")[1]; + } + + const file = new File([blob], filename, { type: contentType }); + + // Send file to endpoint + const uploadResponse = await fetch("https://huggingface.co/uploads", { + method: "POST", + body: file, + headers: { + "Content-Type": file.type, + "X-Requested-With": "XMLHttpRequest" + } + }); + + // Check status of response + if (!uploadResponse.ok) { + if ( + uploadResponse.headers.get("content-type")?.includes("application/json") + ) { + const error = await uploadResponse.json(); + throw new ShareError(`Upload failed: ${error.error}`); + } + throw new ShareError(`Upload failed.`); + } + + // Return response if needed + const result = await uploadResponse.text(); + return result; +} + +function dataURLtoBlob(dataurl: string): Blob { + var arr = dataurl.split(","), + mime = (arr[0].match(/:(.*?);/) as RegExpMatchArray)[1], + bstr = atob(arr[1]), + n = bstr.length, + u8arr = new Uint8Array(n); + while (n--) { + u8arr[n] = bstr.charCodeAt(n); + } + return new Blob([u8arr], { type: mime }); +} + +export function copy(node: HTMLDivElement): ActionReturn { + node.addEventListener("click", handle_copy); + + async function handle_copy(event: MouseEvent): Promise { + const path = event.composedPath() as HTMLButtonElement[]; + + const [copy_button] = path.filter( + (e) => e?.tagName === "BUTTON" && e.classList.contains("copy_code_button") + ); + + if (copy_button) { + event.stopImmediatePropagation(); + + const copy_text = copy_button.parentElement!.innerText.trim(); + const copy_sucess_button = Array.from( + copy_button.children + )[1] as HTMLDivElement; + + const copied = await copy_to_clipboard(copy_text); + + if (copied) copy_feedback(copy_sucess_button); + + function copy_feedback(_copy_sucess_button: HTMLDivElement): void { + _copy_sucess_button.style.opacity = "1"; + setTimeout(() => { + _copy_sucess_button.style.opacity = "0"; + }, 2000); + } + } + } + + return { + destroy(): void { + node.removeEventListener("click", handle_copy); + } + }; +} + +async function copy_to_clipboard(value: string): Promise { + let copied = false; + if ("clipboard" in navigator) { + await navigator.clipboard.writeText(value); + copied = true; + } else { + const textArea = document.createElement("textarea"); + textArea.value = value; + + textArea.style.position = "absolute"; + textArea.style.left = "-999999px"; + + document.body.prepend(textArea); + textArea.select(); + + try { + document.execCommand("copy"); + copied = true; + } catch (error) { + console.error(error); + copied = false; + } finally { + textArea.remove(); + } + } + + return copied; +} diff --git a/testbed/gradio-app__gradio/js/video/CHANGELOG.md b/testbed/gradio-app__gradio/js/video/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..6501192f12cae4d3e69033fe686ca7afd39cb5f0 --- /dev/null +++ b/testbed/gradio-app__gradio/js/video/CHANGELOG.md @@ -0,0 +1,87 @@ +# @gradio/video + +## 0.0.9 + +### Patch Changes + +- Updated dependencies [[`75ddeb390`](https://github.com/gradio-app/gradio/commit/75ddeb390d665d4484667390a97442081b49a423)]: + - @gradio/image@0.3.0 + - @gradio/upload@0.3.0 + +## 0.0.8 + +### Patch Changes + +- Updated dependencies [[`e0d61b8ba`](https://github.com/gradio-app/gradio/commit/e0d61b8baa0f6293f53b9bdb1647d42f9ae2583a)]: + - @gradio/image@0.2.4 + +## 0.0.7 + +### Patch Changes + +- Updated dependencies [[`dc86e4a7`](https://github.com/gradio-app/gradio/commit/dc86e4a7e1c40b910c74558e6f88fddf9b3292bc)]: + - @gradio/image@0.2.3 + +## 0.0.6 + +### Patch Changes + +- Updated dependencies [[`afac0006`](https://github.com/gradio-app/gradio/commit/afac0006337ce2840cf497cd65691f2f60ee5912)]: + - @gradio/statustracker@0.2.0 + - @gradio/image@0.2.2 + - @gradio/utils@0.1.1 + - @gradio/atoms@0.1.2 + - @gradio/upload@0.2.1 + +## 0.0.5 + +### Patch Changes + +- Updated dependencies [[`abf1c57d`](https://github.com/gradio-app/gradio/commit/abf1c57d7d85de0df233ee3b38aeb38b638477db), [`79d8f9d8`](https://github.com/gradio-app/gradio/commit/79d8f9d891901683c5a1b7486efb44eab2478c96)]: + - @gradio/icons@0.1.0 + - @gradio/utils@0.1.0 + - @gradio/upload@0.2.0 + - @gradio/atoms@0.1.1 + - @gradio/image@0.2.1 + - @gradio/statustracker@0.1.1 + +## 0.0.4 + +### Highlights + +#### Improve startup performance and markdown support ([#5279](https://github.com/gradio-app/gradio/pull/5279) [`fe057300`](https://github.com/gradio-app/gradio/commit/fe057300f0672c62dab9d9b4501054ac5d45a4ec)) + +##### Improved markdown support + +We now have better support for markdown in `gr.Markdown` and `gr.Dataframe`. Including syntax highlighting and Github Flavoured Markdown. We also have more consistent markdown behaviour and styling. + +##### Various performance improvements + +These improvements will be particularly beneficial to large applications. + +- Rather than attaching events manually, they are now delegated, leading to a significant performance improvement and addressing a performance regression introduced in a recent version of Gradio. App startup for large applications is now around twice as fast. +- Optimised the mounting of individual components, leading to a modest performance improvement during startup (~30%). +- Corrected an issue that was causing markdown to re-render infinitely. +- Ensured that the `gr.3DModel` does re-render prematurely. + +Thanks [@pngwn](https://github.com/pngwn)! + +### Features + +- [#5215](https://github.com/gradio-app/gradio/pull/5215) [`fbdad78a`](https://github.com/gradio-app/gradio/commit/fbdad78af4c47454cbb570f88cc14bf4479bbceb) - Lazy load interactive or static variants of a component individually, rather than loading both variants regardless. This change will improve performance for many applications. Thanks [@pngwn](https://github.com/pngwn)! + +## 0.0.3 + +### Fixes + +- [#5140](https://github.com/gradio-app/gradio/pull/5140) [`cd1353fa`](https://github.com/gradio-app/gradio/commit/cd1353fa3eb1b015f5860ca5d5a8e8d1aa4a831c) - Fixes the display of minutes in the video player. Thanks [@abidlabs](https://github.com/abidlabs)! + +## 0.0.2 + +### Patch Changes + +- Updated dependencies [[`44ac8ad0`](https://github.com/gradio-app/gradio/commit/44ac8ad08d82ea12c503dde5c78f999eb0452de2)]: + - @gradio/image@0.1.0 + - @gradio/utils@0.0.2 + - @gradio/atoms@0.0.2 + - @gradio/upload@0.0.2 diff --git a/testbed/gradio-app__gradio/js/video/README.md b/testbed/gradio-app__gradio/js/video/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e5b427fdb3b640a5482b4336ea6d4bbd861d81b5 --- /dev/null +++ b/testbed/gradio-app__gradio/js/video/README.md @@ -0,0 +1,11 @@ +# `@gradio/button` + +```html + + + +``` diff --git a/testbed/gradio-app__gradio/js/video/Video.stories.svelte b/testbed/gradio-app__gradio/js/video/Video.stories.svelte new file mode 100644 index 0000000000000000000000000000000000000000..425456e21c642f908818bec594b5318b9db52d3d --- /dev/null +++ b/testbed/gradio-app__gradio/js/video/Video.stories.svelte @@ -0,0 +1,41 @@ + + + + + + + diff --git a/testbed/gradio-app__gradio/js/video/Video.test.ts b/testbed/gradio-app__gradio/js/video/Video.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..e54dca8fa6b6083acbdf96d119fd0d8f38f7e23e --- /dev/null +++ b/testbed/gradio-app__gradio/js/video/Video.test.ts @@ -0,0 +1,285 @@ +import { + test, + describe, + assert, + afterEach, + vi, + beforeAll, + beforeEach, + expect +} from "vitest"; +import { spy, spyOn } from "tinyspy"; +import { cleanup, render } from "@gradio/tootils"; +import { setupi18n } from "../app/src/i18n"; + +import InteractiveVideo from "./interactive"; +import StaticVideo from "./static"; + +import type { LoadingStatus } from "@gradio/statustracker"; + +const loading_status = { + eta: 0, + queue_position: 1, + queue_size: 1, + status: "complete" as LoadingStatus["status"], + scroll_to_output: false, + visible: true, + fn_index: 0, + show_progress: "full" as LoadingStatus["show_progress"] +}; + +describe("Video", () => { + beforeAll(() => { + window.HTMLMediaElement.prototype.play = vi.fn(); + window.HTMLMediaElement.prototype.pause = vi.fn(); + }); + beforeEach(setupi18n); + afterEach(() => cleanup()); + + test("renders provided value and label", async () => { + const { getByTestId, queryAllByText } = await render(InteractiveVideo, { + show_label: true, + loading_status, + value: [ + { + name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav", + data: null, + is_file: true + } + ], + label: "Test Label", + root: "foo", + root_url: null, + streaming: false, + pending: false, + name: "bar", + source: "upload" + }); + let vid = getByTestId("Test Label-player") as HTMLVideoElement; + assert.equal( + vid.src, + "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav" + ); + assert.equal(queryAllByText("Test Label").length, 1); + }); + + test("hides label", async () => { + const { queryAllByText } = await render(InteractiveVideo, { + show_label: false, + loading_status, + value: { + name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav", + data: null, + is_file: true + }, + label: "Video Component", + root: "foo", + root_url: null, + streaming: false, + pending: false, + name: "bar", + source: "upload" + }); + assert.equal(queryAllByText("Video Component").length, 1); + }); + + test("static Video sets value", async () => { + const { getByTestId } = await render(StaticVideo, { + show_label: true, + loading_status, + value: [ + { + name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav", + data: null, + is_file: true + } + ], + root: "foo", + root_url: null, + streaming: false, + pending: false, + name: "bar", + source: "upload" + }); + let vid = getByTestId("test-player") as HTMLVideoElement; + assert.equal( + vid.src, + "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav" + ); + }); + + test("when autoplay is true `media.play` should be called in static mode", async () => { + const { getByTestId } = await render(StaticVideo, { + show_label: true, + loading_status, + mode: "static", + value: [ + { + name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav", + data: null, + is_file: true + } + ], + root: "foo", + root_url: null, + streaming: false, + pending: false, + source: "upload", + autoplay: true + }); + const startButton = getByTestId("test-player") as HTMLVideoElement; + const fn = spyOn(startButton, "play"); + startButton.dispatchEvent(new Event("loadeddata")); + assert.equal(fn.callCount, 1); + }); + + test("when autoplay is true `media.play` should be called in dynamic mode", async () => { + const { getByTestId } = await render(InteractiveVideo, { + show_label: true, + loading_status, + value: [ + { + name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav", + data: null, + is_file: true + } + ], + root: "foo", + root_url: null, + streaming: false, + pending: false, + source: "upload", + autoplay: true + }); + const startButton = getByTestId("test-player") as HTMLVideoElement; + const fn = spyOn(startButton, "play"); + startButton.dispatchEvent(new Event("loadeddata")); + assert.equal(fn.callCount, 1); + }); + + test("when autoplay is true `media.play` should be called in static mode when the Video data is updated", async () => { + const { component, getByTestId } = await render(StaticVideo, { + show_label: true, + loading_status, + mode: "static", + value: [ + { + name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav", + data: null, + is_file: true + } + ], + root: "foo", + root_url: null, + streaming: false, + pending: false, + source: "upload", + autoplay: true + }); + const startButton = getByTestId("test-player") as HTMLVideoElement; + const fn = spyOn(startButton, "play"); + startButton.dispatchEvent(new Event("loadeddata")); + component.$set({ + value: [ + { + name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav", + data: null, + is_file: true + } + ] + }); + startButton.dispatchEvent(new Event("loadeddata")); + assert.equal(fn.callCount, 2); + }); + + test("when autoplay is true `media.play` should be called in dynamic mode when the Video data is updated", async () => { + const { component, getByTestId } = await render(InteractiveVideo, { + show_label: true, + loading_status, + mode: "dynamic", + value: [ + { + name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav", + data: null, + is_file: true + } + ], + root: "foo", + root_url: null, + streaming: false, + pending: false, + source: "upload", + autoplay: true + }); + const startButton = getByTestId("test-player") as HTMLVideoElement; + const fn = spyOn(startButton, "play"); + startButton.dispatchEvent(new Event("loadeddata")); + component.$set({ + value: [ + { + name: "https://gradio-builds.s3.amazonaws.com/demo-files/audio_sample.wav", + data: null, + is_file: true + } + ] + }); + startButton.dispatchEvent(new Event("loadeddata")); + assert.equal(fn.callCount, 2); + }); + test("renders video and download button", async () => { + const data = [ + { + data: null, + name: "https://raw.githubusercontent.com/gradio-app/gradio/main/gradio/demo/video_component/files/a.mp4", + is_file: true + } + ]; + const results = await render(StaticVideo, { + mode: "static", + label: "video", + show_label: true, + value: data, + root: "foo" + }); + + const downloadButton = results.getAllByTestId("download-div")[0]; + expect( + downloadButton.getElementsByTagName("a")[0].getAttribute("href") + ).toBe(data[0].name); + expect( + downloadButton.getElementsByTagName("button").length + ).toBeGreaterThan(0); + }); + + test("video change event trigger fires when value is changed and only fires once", async () => { + const { component, listen } = await render(InteractiveVideo, { + show_label: true, + loading_status, + mode: "dynamic", + value: [ + { + name: "https://raw.githubusercontent.com/gradio-app/gradio/main/gradio/demo/video_component/files/a.mp4", + data: null, + is_file: true + } + ], + root: "foo", + root_url: null, + streaming: false, + pending: false, + source: "upload", + autoplay: true + }); + + const mock = listen("change"); + + (component.value = [ + { + name: "https://raw.githubusercontent.com/gradio-app/gradio/main/gradio/demo/video_component/files/b.mp4", + data: null, + is_file: true + } + ]), + assert.equal(mock.callCount, 1); + }); +}); diff --git a/testbed/gradio-app__gradio/js/video/example/Video.svelte b/testbed/gradio-app__gradio/js/video/example/Video.svelte new file mode 100644 index 0000000000000000000000000000000000000000..c4664316771ae53acdad6426496ab1d75fa2328b --- /dev/null +++ b/testbed/gradio-app__gradio/js/video/example/Video.svelte @@ -0,0 +1,65 @@ + + +{#if playable()} +
+ + + + {#if show_share_button} + { + if (!value) return ""; + let url = await uploadToHuggingFace(value.data, "url"); + return url; + }} + /> + {/if} +
+{/if} + + diff --git a/testbed/gradio-app__gradio/js/video/static/index.ts b/testbed/gradio-app__gradio/js/video/static/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..bbd5cbd962d5042b18fc71daf2f44fe23000a488 --- /dev/null +++ b/testbed/gradio-app__gradio/js/video/static/index.ts @@ -0,0 +1 @@ +export { default } from "./StaticVideo.svelte"; diff --git a/testbed/gradio-app__gradio/js/wasm/CHANGELOG.md b/testbed/gradio-app__gradio/js/wasm/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..7944db722d9d016427d24723d15159e6437a1dcb --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/CHANGELOG.md @@ -0,0 +1,13 @@ +# @gradio/wasm + +## 0.0.3 + +### Features + +- [#5598](https://github.com/gradio-app/gradio/pull/5598) [`6b1714386`](https://github.com/gradio-app/gradio/commit/6b17143868bdd2c1400af1199a01c1c0d5c27477) - Upgrade Pyodide to 0.24.0 and install the native orjson package. Thanks [@whitphx](https://github.com/whitphx)! + +## 0.0.2 + +### Fixes + +- [#5538](https://github.com/gradio-app/gradio/pull/5538) [`b5c6f7b08`](https://github.com/gradio-app/gradio/commit/b5c6f7b086a6419f27c757ad9b2ac9ea679b749b) - chore(deps): update dependency pyodide to ^0.24.0. Thanks [@renovate](https://github.com/apps/renovate)! \ No newline at end of file diff --git a/testbed/gradio-app__gradio/js/wasm/package.json b/testbed/gradio-app__gradio/js/wasm/package.json new file mode 100644 index 0000000000000000000000000000000000000000..9e2c022f0634b1cca7a7d07ff3861fe2c1a12c5a --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/package.json @@ -0,0 +1,30 @@ +{ + "name": "@gradio/wasm", + "version": "0.0.3", + "description": "Gradio Wasm package", + "type": "module", + "main": "./dist/index.js", + "exports": { + ".": "./dist/index.js", + "./package.json": "./package.json" + }, + "private": true, + "keywords": [], + "author": "", + "license": "ISC", + "scripts": { + "dev:client": "tsc -w --incremental", + "dev:worker": "vite build --config vite.worker.config.js --watch --emptyOutDir=false", + "dev": "run-p dev:*", + "build:client": "tsc", + "build:worker": "vite build --config vite.worker.config.js", + "build": "run-s build:worker build:client" + }, + "devDependencies": { + "pyodide": "^0.24.0" + }, + "dependencies": { + "@types/path-browserify": "^1.0.0", + "path-browserify": "^1.0.1" + } +} diff --git a/testbed/gradio-app__gradio/js/wasm/src/cross-origin-worker.ts b/testbed/gradio-app__gradio/js/wasm/src/cross-origin-worker.ts new file mode 100644 index 0000000000000000000000000000000000000000..2128c2b4d08e1a55f996a7acd8e1a53425aa3160 --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/cross-origin-worker.ts @@ -0,0 +1,30 @@ +// A hack to load a worker script from a different origin. +// Vite's built-in Web Workers feature does not support inlining the worker code +// into the main bundle and always emits it to a separate file, +// which is not compatible with the cross-origin worker. +// So we use this hack to load the separate worker code from a domain different from the parent page. +// Vite deals with the special syntax `new Worker(new URL("worker.ts", import.meta.url))` for the worker build, +// so this `CrossOriginWorkerMaker` class must be defined in a separate file and +// be imported as the `Worker` alias into the file where the syntax is used to load the worker. +// This implementation was based on https://github.com/whitphx/stlite/blob/v0.34.0/packages/kernel/src/kernel.ts, +// and this technique was introduced originally for Webpack at https://github.com/webpack/webpack/discussions/14648#discussioncomment-1589272 +export class CrossOriginWorkerMaker { + public readonly worker: Worker; + + constructor(url: URL) { + try { + // This is the normal way to load a worker script, which is the best straightforward if possible. + this.worker = new Worker(url); + } catch (e) { + console.debug( + `Failed to load a worker script from ${url.toString()}. Trying to load a cross-origin worker...` + ); + const workerBlob = new Blob([`importScripts("${url.toString()}");`], { + type: "text/javascript" + }); + const workerBlobUrl = URL.createObjectURL(workerBlob); + this.worker = new Worker(workerBlobUrl); + URL.revokeObjectURL(workerBlobUrl); + } + } +} diff --git a/testbed/gradio-app__gradio/js/wasm/src/index.ts b/testbed/gradio-app__gradio/js/wasm/src/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..201102d49e08cced6f4644eca3b5c0b6666650ff --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/index.ts @@ -0,0 +1 @@ +export { WorkerProxy, type WorkerProxyOptions } from "./worker-proxy"; diff --git a/testbed/gradio-app__gradio/js/wasm/src/message-types.ts b/testbed/gradio-app__gradio/js/wasm/src/message-types.ts new file mode 100644 index 0000000000000000000000000000000000000000..75bf7dc13b4925db334678f9963046f8e6ec3abc --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/message-types.ts @@ -0,0 +1,108 @@ +export interface HttpRequest { + method: "GET" | "POST" | "PUT" | "DELETE"; + path: string; + query_string: string; + headers: Record; + body?: Uint8Array; +} +export interface HttpResponse { + status: number; + headers: Record; + body: Uint8Array; +} +export interface EmscriptenFile { + data: string | ArrayBufferView; + opts?: Record; +} +export interface EmscriptenFileUrl { + url: string; + opts?: Record; +} + +export interface InMessageBase { + type: string; + data: unknown; +} + +export interface InMessageInit extends InMessageBase { + type: "init"; + data: { + gradioWheelUrl: string; + gradioClientWheelUrl: string; + files: Record; + requirements: string[]; + }; +} +export interface InMessageRunPythonCode extends InMessageBase { + type: "run-python-code"; + data: { + code: string; + }; +} +export interface InMessageRunPythonFile extends InMessageBase { + type: "run-python-file"; + data: { + path: string; + }; +} +export interface InMessageHttpRequest extends InMessageBase { + type: "http-request"; + data: { + request: HttpRequest; + }; +} +export interface InMessageFileWrite extends InMessageBase { + type: "file:write"; + data: { + path: string; + data: string | ArrayBufferView; + opts?: Record; + }; +} +export interface InMessageFileRename extends InMessageBase { + type: "file:rename"; + data: { + oldPath: string; + newPath: string; + }; +} +export interface InMessageFileUnlink extends InMessageBase { + type: "file:unlink"; + data: { + path: string; + }; +} +export interface InMessageInstall extends InMessageBase { + type: "install"; + data: { + requirements: string[]; + }; +} + +export interface InMessageEcho extends InMessageBase { + // For debug + type: "echo"; + data: unknown; +} + +export type InMessage = + | InMessageInit + | InMessageRunPythonCode + | InMessageRunPythonFile + | InMessageHttpRequest + | InMessageFileWrite + | InMessageFileRename + | InMessageFileUnlink + | InMessageInstall + | InMessageEcho; + +export interface ReplyMessageSuccess { + type: "reply:success"; + data: T; +} +export interface ReplyMessageError { + type: "reply:error"; + error: Error; +} + +export type ReplyMessage = ReplyMessageSuccess | ReplyMessageError; diff --git a/testbed/gradio-app__gradio/js/wasm/src/webworker/declarations.d.ts b/testbed/gradio-app__gradio/js/wasm/src/webworker/declarations.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..6c8c2da78af4a3f9bd5087464ef9dfd6e52d7a98 --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/webworker/declarations.d.ts @@ -0,0 +1,2 @@ +// Declarations for the WebWorker files where some variables are dynamically loaded through importScript. +declare let loadPyodide: any; diff --git a/testbed/gradio-app__gradio/js/wasm/src/webworker/file.test.ts b/testbed/gradio-app__gradio/js/wasm/src/webworker/file.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..27d3b2141a03735e4437c4a935d37a2670c155d2 --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/webworker/file.test.ts @@ -0,0 +1,90 @@ +// @vitest-environment node + +import path from "path"; +import { loadPyodide, PyodideInterface } from "pyodide"; +import { describe, it, expect, beforeEach, beforeAll } from "vitest"; +import { writeFileWithParents, renameWithParents } from "./file"; + +describe("writeFileWithParents()", () => { + let pyodide: PyodideInterface; + + beforeAll(async () => { + pyodide = await loadPyodide({ + indexURL: path.resolve(__dirname, "../../node_modules/pyodide") + }); + }); + + const testCases: { paths: string[] }[] = [ + { paths: ["foo.py"] }, + { paths: ["foo/bar.py"] }, + { paths: ["foo/bar/baz.py", "foo/hoge.py"] }, + { paths: ["foo/bar/baz/pii.py"] }, + { paths: ["foo/bar/baz/boo.py", "foo/bar/hoge.py"] }, + { paths: ["/boo/foo.py"] } + ]; + testCases.forEach(({ paths }) => { + it(`writes files (${paths})`, () => { + for (const path of paths) { + expect(pyodide.FS.analyzePath(path).exists).toBe(false); + + writeFileWithParents(pyodide, path, "# Test"); + + expect(pyodide.FS.analyzePath(path).exists).toBe(true); + expect(pyodide.FS.readFile(path, { encoding: "utf8" })).toEqual( + "# Test" + ); + } + }); + }); + + it("can write binary files", () => { + const path = "foo/bar.dat"; + const uint8View = new Uint8Array([0, 1, 2, 3]); // Random data + writeFileWithParents(pyodide, path, uint8View); + expect(pyodide.FS.readFile(path)).toEqual(uint8View); + }); +}); + +describe("renameWithParents", () => { + let pyodide: PyodideInterface; + + beforeAll(async () => { + pyodide = await loadPyodide({ + indexURL: path.resolve(__dirname, "../../node_modules/pyodide") + }); + }); + + const testCases: { oldPath: string; newPath: string }[] = [ + { oldPath: "foo.py", newPath: "bar.py" }, // Same dir, without a parent path + { oldPath: "foo.py", newPath: "bar/baz.py" }, // To a nested dir + { oldPath: "baz/foo.py", newPath: "bar.py" }, // From a nested dir + { oldPath: "foo/bar.py", newPath: "foo/baz.py" }, // Same dir with a parent path + { oldPath: "foo/bar.py", newPath: "baz/qux.py" } // With parent paths, different dirs + ]; + testCases.forEach(({ oldPath, newPath }) => { + it(`renames "${oldPath}" to "${newPath}"`, () => { + writeFileWithParents(pyodide, oldPath, "# Test"); + expect(pyodide.FS.analyzePath(oldPath).exists).toBe(true); + + renameWithParents(pyodide, oldPath, newPath); + + expect(pyodide.FS.analyzePath(oldPath).exists).toBe(false); + expect(pyodide.FS.analyzePath(newPath).exists).toBe(true); + expect(pyodide.FS.readFile(newPath, { encoding: "utf8" })).toEqual( + "# Test" + ); + }); + }); + + ["foo.py", "foo/bar.py"].forEach((path) => { + it(`does nothing when the source and the destination are the same`, () => { + writeFileWithParents(pyodide, path, "# Test"); + expect(pyodide.FS.analyzePath(path).exists).toBe(true); + + renameWithParents(pyodide, path, path); + + expect(pyodide.FS.analyzePath(path).exists).toBe(true); + expect(pyodide.FS.readFile(path, { encoding: "utf8" })).toEqual("# Test"); + }); + }); +}); diff --git a/testbed/gradio-app__gradio/js/wasm/src/webworker/file.ts b/testbed/gradio-app__gradio/js/wasm/src/webworker/file.ts new file mode 100644 index 0000000000000000000000000000000000000000..b277a9b1417eb70877687a754535e171314c1992 --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/webworker/file.ts @@ -0,0 +1,49 @@ +import path from "path-browserify"; +import type { PyodideInterface } from "pyodide"; + +function ensureParent(pyodide: PyodideInterface, filePath: string): void { + const normalized = path.normalize(filePath); + + const dirPath = path.dirname(normalized); + + const dirNames = dirPath.split("/"); + + const chDirNames: string[] = []; + for (const dirName of dirNames) { + chDirNames.push(dirName); + const dirPath = chDirNames.join("/"); + + if (pyodide.FS.analyzePath(dirPath).exists) { + if (pyodide.FS.isDir(dirPath)) { + throw new Error(`"${dirPath}" already exists and is not a directory.`); + } + continue; + } + + try { + pyodide.FS.mkdir(dirPath); + } catch (err) { + console.error(`Failed to create a directory "${dirPath}"`); + throw err; + } + } +} + +export function writeFileWithParents( + pyodide: PyodideInterface, + filePath: string, + data: string | ArrayBufferView, + opts?: Parameters[2] +): void { + ensureParent(pyodide, filePath); + pyodide.FS.writeFile(filePath, data, opts); +} + +export function renameWithParents( + pyodide: PyodideInterface, + oldPath: string, + newPath: string +): void { + ensureParent(pyodide, newPath); + pyodide.FS.rename(oldPath, newPath); +} diff --git a/testbed/gradio-app__gradio/js/wasm/src/webworker/http.ts b/testbed/gradio-app__gradio/js/wasm/src/webworker/http.ts new file mode 100644 index 0000000000000000000000000000000000000000..c9e7221a314df2a37654541f0c728818fcce6b4a --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/webworker/http.ts @@ -0,0 +1,133 @@ +import type { PyProxy } from "pyodide/ffi"; +import type { HttpRequest, HttpResponse } from "../message-types"; + +// Inspired by https://github.com/rstudio/shinylive/blob/v0.1.2/src/messageporthttp.ts + +// A reference to an ASGI application instance in Python +// Ref: https://asgi.readthedocs.io/en/latest/specs/main.html#applications +type ASGIApplication = ( + scope: Record, + receive: () => Promise, + send: (event: PyProxy) => Promise +) => Promise; + +type ReceiveEvent = RequestReceiveEvent | DisconnectReceiveEvent; +// https://asgi.readthedocs.io/en/latest/specs/www.html#request-receive-event +interface RequestReceiveEvent { + type: "http.request"; + body?: Uint8Array; // `bytes` in Python + more_body: boolean; +} +// https://asgi.readthedocs.io/en/latest/specs/www.html#disconnect-receive-event +interface DisconnectReceiveEvent { + type: "http.disconnect"; +} + +type SendEvent = ResponseStartSendEvent | ResponseBodySendEvent; +// https://asgi.readthedocs.io/en/latest/specs/www.html#response-start-send-event +interface ResponseStartSendEvent { + type: "http.response.start"; + status: number; + headers: Iterable<[Uint8Array, Uint8Array]>; + trailers: boolean; +} +// https://asgi.readthedocs.io/en/latest/specs/www.html#response-body-send-event +interface ResponseBodySendEvent { + type: "http.response.body"; + body: Uint8Array; // `bytes` in Python + more_body: boolean; +} + +function headersToASGI(headers: HttpRequest["headers"]): [string, string][] { + const result: [string, string][] = []; + for (const [key, value] of Object.entries(headers)) { + result.push([key, value]); + } + return result; +} + +export function uint8ArrayToString(buf: Uint8Array): string { + let result = ""; + for (let i = 0; i < buf.length; i++) { + result += String.fromCharCode(buf[i]); + } + return result; +} + +function asgiHeadersToRecord(headers: any): Record { + headers = headers.map(([key, val]: [Uint8Array, Uint8Array]) => { + return [uint8ArrayToString(key), uint8ArrayToString(val)]; + }); + return Object.fromEntries(headers); +} + +export const makeHttpRequest = ( + asgiApp: ASGIApplication, + request: HttpRequest +): Promise => + new Promise((resolve, reject) => { + let sent = false; + async function receiveFromJs(): Promise { + if (sent) { + // NOTE: I implemented this block just referring to the spec. However, it is not reached in practice so it's not combat-proven. + return { + type: "http.disconnect" + }; + } + + const event: RequestReceiveEvent = { + type: "http.request", + more_body: false + }; + if (request.body) { + event.body = request.body; + } + + console.debug("receive", event); + sent = true; + return event; + } + + let status: number; + let headers: { [key: string]: string }; + let body: Uint8Array = new Uint8Array(); + async function sendToJs(proxiedEvent: PyProxy): Promise { + const event = Object.fromEntries(proxiedEvent.toJs()) as SendEvent; + console.debug("send", event); + if (event.type === "http.response.start") { + status = event.status; + headers = asgiHeadersToRecord(event.headers); + } else if (event.type === "http.response.body") { + body = new Uint8Array([...body, ...event.body]); + if (!event.more_body) { + const response: HttpResponse = { + status, + headers, + body + }; + console.debug("HTTP response", response); + resolve(response); + } + } else { + throw new Error(`Unhandled ASGI event: ${JSON.stringify(event)}`); + } + } + + // https://asgi.readthedocs.io/en/latest/specs/www.html#http-connection-scope + const scope = { + type: "http", + asgi: { + version: "3.0", + spec_version: "2.1" + }, + http_version: "1.1", + scheme: "http", + method: request.method, + path: request.path, + query_string: request.query_string, + root_path: "", + headers: headersToASGI(request.headers) + }; + + asgiApp(scope, receiveFromJs, sendToJs); + }); diff --git a/testbed/gradio-app__gradio/js/wasm/src/webworker/index.ts b/testbed/gradio-app__gradio/js/wasm/src/webworker/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..75e41da4cd597254f747106650bd5c4b73ce3244 --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/webworker/index.ts @@ -0,0 +1,315 @@ +/// +/* eslint-env worker */ + +import type { PyodideInterface } from "pyodide"; +import type { + InMessage, + InMessageInit, + ReplyMessageError, + ReplyMessageSuccess +} from "../message-types"; +import { writeFileWithParents, renameWithParents } from "./file"; +import { verifyRequirements } from "./requirements"; +import { makeHttpRequest } from "./http"; +import scriptRunnerPySource from "./py/script_runner.py?raw"; +import unloadModulesPySource from "./py/unload_modules.py?raw"; + +importScripts("https://cdn.jsdelivr.net/pyodide/v0.24.0/full/pyodide.js"); + +let pyodide: PyodideInterface; + +let pyodideReadyPromise: undefined | Promise = undefined; + +let call_asgi_app_from_js: ( + scope: unknown, + receive: () => Promise, + send: (event: any) => Promise +) => Promise; +let run_script: (path: string) => void; +let unload_local_modules: (target_dir_path?: string) => void; + +async function loadPyodideAndPackages( + options: InMessageInit["data"] +): Promise { + console.debug("Loading Pyodide."); + pyodide = await loadPyodide({ + stdout: console.debug, + stderr: console.error + }); + console.debug("Pyodide is loaded."); + + console.debug("Mounting files.", options.files); + await Promise.all( + Object.keys(options.files).map(async (path) => { + const file = options.files[path]; + + let data: string | ArrayBufferView; + if ("url" in file) { + console.debug(`Fetch a file from ${file.url}`); + data = await fetch(file.url) + .then((res) => res.arrayBuffer()) + .then((buffer) => new Uint8Array(buffer)); + } else { + data = file.data; + } + const { opts } = options.files[path]; + + console.debug(`Write a file "${path}"`); + writeFileWithParents(pyodide, path, data, opts); + }) + ); + console.debug("Files are mounted."); + + console.debug("Loading micropip"); + await pyodide.loadPackage("micropip"); + const micropip = pyodide.pyimport("micropip"); + console.debug("micropip is loaded."); + + const gradioWheelUrls = [ + options.gradioWheelUrl, + options.gradioClientWheelUrl + ]; + console.debug("Loading Gradio wheels.", gradioWheelUrls); + await micropip.add_mock_package("ffmpy", "0.3.0"); + await micropip.add_mock_package("aiohttp", "3.8.4"); + await pyodide.loadPackage(["ssl", "distutils", "setuptools"]); + await micropip.install(["markdown-it-py[linkify]~=2.2.0"]); // On 3rd June 2023, markdown-it-py 3.0.0 has been released. The `gradio` package depends on its `>=2.0.0` version so its 3.x will be resolved. However, it conflicts with `mdit-py-plugins`'s dependency `markdown-it-py >=1.0.0,<3.0.0` and micropip currently can't resolve it. So we explicitly install the compatible version of the library here. + await micropip.install.callKwargs(gradioWheelUrls, { + keep_going: true + }); + console.debug("Gradio wheels are loaded."); + + console.debug("Install packages.", options.requirements); + await micropip.install.callKwargs(options.requirements, { keep_going: true }); + console.debug("Packages are installed."); + + console.debug("Mock os module methods."); + // `os.link` is used in `aiofiles` (https://github.com/Tinche/aiofiles/blob/v23.1.0/src/aiofiles/os.py#L31), + // which is imported from `gradio.ranged_response` (https://github.com/gradio-app/gradio/blob/v3.32.0/gradio/ranged_response.py#L12). + // However, it's not available on Wasm. + await pyodide.runPythonAsync(` +import os + +os.link = lambda src, dst: None +`); + console.debug("os module methods are mocked."); + + console.debug("Import gradio package."); + // Importing the gradio package takes a long time, so we do it separately. + // This is necessary for accurate performance profiling. + await pyodide.runPythonAsync(`import gradio`); + console.debug("gradio package is imported."); + + console.debug("Define a ASGI wrapper function."); + // TODO: Unlike Streamlit, user's code is executed in the global scope, + // so we should not define this function in the global scope. + await pyodide.runPythonAsync(` +# Based on Shiny's App.call_pyodide(). +# https://github.com/rstudio/py-shiny/blob/v0.3.3/shiny/_app.py#L224-L258 +async def _call_asgi_app_from_js(scope, receive, send): + # TODO: Pretty sure there are objects that need to be destroy()'d here? + scope = scope.to_py() + + # ASGI requires some values to be byte strings, not character strings. Those are + # not that easy to create in JavaScript, so we let the JS side pass us strings + # and we convert them to bytes here. + if "headers" in scope: + # JS doesn't have \`bytes\` so we pass as strings and convert here + scope["headers"] = [ + [value.encode("latin-1") for value in header] + for header in scope["headers"] + ] + if "query_string" in scope and scope["query_string"]: + scope["query_string"] = scope["query_string"].encode("latin-1") + if "raw_path" in scope and scope["raw_path"]: + scope["raw_path"] = scope["raw_path"].encode("latin-1") + + async def rcv(): + event = await receive() + return event.to_py() + + async def snd(event): + await send(event) + + app = gradio.wasm_utils.get_registered_app() + if app is None: + raise RuntimeError("Gradio app has not been launched.") + + await app(scope, rcv, snd) +`); + call_asgi_app_from_js = pyodide.globals.get("_call_asgi_app_from_js"); + console.debug("The ASGI wrapper function is defined."); + + console.debug("Mock async libraries."); + // FastAPI uses `anyio.to_thread.run_sync` internally which, however, doesn't work in Wasm environments where the `threading` module is not supported. + // So we mock `anyio.to_thread.run_sync` here not to use threads. + await pyodide.runPythonAsync(` +async def mocked_anyio_to_thread_run_sync(func, *args, cancellable=False, limiter=None): + return func(*args) + +import anyio.to_thread +anyio.to_thread.run_sync = mocked_anyio_to_thread_run_sync + `); + console.debug("Async libraries are mocked."); + + console.debug("Set matplotlib backend."); + // Ref: https://github.com/streamlit/streamlit/blob/1.22.0/lib/streamlit/web/bootstrap.py#L111 + // This backend setting is required to use matplotlib in Wasm environment. + await pyodide.runPythonAsync(` +import matplotlib +matplotlib.use("agg") +`); + console.debug("matplotlib backend is set."); + + console.debug("Set up Python utility functions."); + await pyodide.runPythonAsync(scriptRunnerPySource); + run_script = pyodide.globals.get("_run_script"); + await pyodide.runPythonAsync(unloadModulesPySource); + unload_local_modules = pyodide.globals.get("unload_local_modules"); + console.debug("Python utility functions are set up."); +} + +self.onmessage = async (event: MessageEvent): Promise => { + const msg = event.data; + console.debug("worker.onmessage", msg); + + const messagePort = event.ports[0]; + + try { + if (msg.type === "init") { + pyodideReadyPromise = loadPyodideAndPackages(msg.data); + + const replyMessage: ReplyMessageSuccess = { + type: "reply:success", + data: null + }; + messagePort.postMessage(replyMessage); + return; + } + + if (pyodideReadyPromise == null) { + throw new Error("Pyodide Initialization is not started."); + } + + await pyodideReadyPromise; + + switch (msg.type) { + case "echo": { + const replyMessage: ReplyMessageSuccess = { + type: "reply:success", + data: msg.data + }; + messagePort.postMessage(replyMessage); + break; + } + case "run-python-code": { + unload_local_modules(); + + await pyodide.runPythonAsync(msg.data.code); + + const replyMessage: ReplyMessageSuccess = { + type: "reply:success", + data: null // We don't send back the execution result because it's not needed for our purpose, and sometimes the result is of type `pyodide.ffi.PyProxy` which cannot be cloned across threads and causes an error. + }; + messagePort.postMessage(replyMessage); + break; + } + case "run-python-file": { + unload_local_modules(); + + run_script(msg.data.path); + + const replyMessage: ReplyMessageSuccess = { + type: "reply:success", + data: null + }; + messagePort.postMessage(replyMessage); + break; + } + case "http-request": { + const request = msg.data.request; + const response = await makeHttpRequest(call_asgi_app_from_js, request); + const replyMessage: ReplyMessageSuccess = { + type: "reply:success", + data: { + response + } + }; + messagePort.postMessage(replyMessage); + break; + } + case "file:write": { + const { path, data: fileData, opts } = msg.data; + + console.debug(`Write a file "${path}"`); + writeFileWithParents(pyodide, path, fileData, opts); + + const replyMessage: ReplyMessageSuccess = { + type: "reply:success", + data: null + }; + messagePort.postMessage(replyMessage); + break; + } + case "file:rename": { + const { oldPath, newPath } = msg.data; + + console.debug(`Rename "${oldPath}" to ${newPath}`); + renameWithParents(pyodide, oldPath, newPath); + + const replyMessage: ReplyMessageSuccess = { + type: "reply:success", + data: null + }; + messagePort.postMessage(replyMessage); + break; + } + case "file:unlink": { + const { path } = msg.data; + + console.debug(`Remove "${path}`); + pyodide.FS.unlink(path); + + const replyMessage: ReplyMessageSuccess = { + type: "reply:success", + data: null + }; + messagePort.postMessage(replyMessage); + break; + } + case "install": { + const { requirements } = msg.data; + + const micropip = pyodide.pyimport("micropip"); + + console.debug("Install the requirements:", requirements); + verifyRequirements(requirements); // Blocks the not allowed wheel URL schemes. + await micropip.install + .callKwargs(requirements, { keep_going: true }) + .then(() => { + if (requirements.includes("matplotlib")) { + return pyodide.runPythonAsync(` + from stlite_server.bootstrap import _fix_matplotlib_crash + _fix_matplotlib_crash() + `); + } + }) + .then(() => { + console.debug("Successfully installed"); + + const replyMessage: ReplyMessageSuccess = { + type: "reply:success", + data: null + }; + messagePort.postMessage(replyMessage); + }); + } + } + } catch (error) { + const replyMessage: ReplyMessageError = { + type: "reply:error", + error: error as Error + }; + messagePort.postMessage(replyMessage); + } +}; diff --git a/testbed/gradio-app__gradio/js/wasm/src/webworker/py/.editorconfig b/testbed/gradio-app__gradio/js/wasm/src/webworker/py/.editorconfig new file mode 100644 index 0000000000000000000000000000000000000000..74d649822aa4ae7f35c8493c5f3caf5c76e36b8f --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/webworker/py/.editorconfig @@ -0,0 +1,3 @@ +[*.py] +indent_style = space +indent_size = 4 diff --git a/testbed/gradio-app__gradio/js/wasm/src/webworker/py/script_runner.py b/testbed/gradio-app__gradio/js/wasm/src/webworker/py/script_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..9bf014c62747006b9fc9cfd0a453987a09c2bf18 --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/webworker/py/script_runner.py @@ -0,0 +1,120 @@ +import tokenize +import types +import sys + +# BSD 3-Clause License +# +# - Copyright (c) 2008-Present, IPython Development Team +# - Copyright (c) 2001-2007, Fernando Perez +# - Copyright (c) 2001, Janko Hauser +# - Copyright (c) 2001, Nathaniel Gray +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. + +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Code modified from IPython (BSD license) +# Source: https://github.com/ipython/ipython/blob/master/IPython/utils/syspathcontext.py#L42 +class modified_sys_path: + """A context for prepending a directory to sys.path for a second.""" + + def __init__(self, script_path: str): + self._script_path = script_path + self._added_path = False + + def __enter__(self): + if self._script_path not in sys.path: + sys.path.insert(0, self._script_path) + self._added_path = True + + def __exit__(self, type, value, traceback): + if self._added_path: + try: + sys.path.remove(self._script_path) + except ValueError: + # It's already removed. + pass + + # Returning False causes any exceptions to be re-raised. + return False + + +# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +def _new_module(name: str) -> types.ModuleType: + """Create a new module with the given name.""" + return types.ModuleType(name) + + +def _run_script(script_path: str) -> None: + # This function is based on the following code from Streamlit: + # https://github.com/streamlit/streamlit/blob/1.24.0/lib/streamlit/runtime/scriptrunner/script_runner.py#L519-L554 + + with tokenize.open(script_path) as f: + filebody = f.read() + + # NOTE: In Streamlit, the bytecode caching mechanism has been introduced. + # However, we skipped it here for simplicity and because Gradio doesn't need to rerun the script so frequently, + # while we may do it in the future. + bytecode = compile( # type: ignore + filebody, + # Pass in the file path so it can show up in exceptions. + script_path, + # We're compiling entire blocks of Python, so we need "exec" + # mode (as opposed to "eval" or "single"). + mode="exec", + # Don't inherit any flags or "future" statements. + flags=0, + dont_inherit=1, + # Use the default optimization options. + optimize=-1, + ) + + module = _new_module("__main__") + + # Install the fake module as the __main__ module. This allows + # the pickle module to work inside the user's code, since it now + # can know the module where the pickled objects stem from. + # IMPORTANT: This means we can't use "if __name__ == '__main__'" in + # our code, as it will point to the wrong module!!! + sys.modules["__main__"] = module + + # Add special variables to the module's globals dict. + module.__dict__["__file__"] = script_path + + with modified_sys_path(script_path): + exec(bytecode, module.__dict__) diff --git a/testbed/gradio-app__gradio/js/wasm/src/webworker/py/unload_modules.py b/testbed/gradio-app__gradio/js/wasm/src/webworker/py/unload_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..569a685e486c1ee880f231f611aa9b166d6f4834 --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/webworker/py/unload_modules.py @@ -0,0 +1,165 @@ +# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022) +# Copyright (c) Yuichiro Tachibana (2023) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import fnmatch +import os +import sys +import types +from typing import Optional, Set + +LOGGER = logging.getLogger(__name__) + +# +# Copied from https://github.com/streamlit/streamlit/blob/1.24.0/lib/streamlit/file_util.py +# + +def file_is_in_folder_glob(filepath, folderpath_glob) -> bool: + """Test whether a file is in some folder with globbing support. + + Parameters + ---------- + filepath : str + A file path. + folderpath_glob: str + A path to a folder that may include globbing. + + """ + # Make the glob always end with "/*" so we match files inside subfolders of + # folderpath_glob. + if not folderpath_glob.endswith("*"): + if folderpath_glob.endswith("/"): + folderpath_glob += "*" + else: + folderpath_glob += "/*" + + file_dir = os.path.dirname(filepath) + "/" + return fnmatch.fnmatch(file_dir, folderpath_glob) + + +def get_directory_size(directory: str) -> int: + """Return the size of a directory in bytes.""" + total_size = 0 + for dirpath, _, filenames in os.walk(directory): + for f in filenames: + fp = os.path.join(dirpath, f) + total_size += os.path.getsize(fp) + return total_size + + +def file_in_pythonpath(filepath) -> bool: + """Test whether a filepath is in the same folder of a path specified in the PYTHONPATH env variable. + + + Parameters + ---------- + filepath : str + An absolute file path. + + Returns + ------- + boolean + True if contained in PYTHONPATH, False otherwise. False if PYTHONPATH is not defined or empty. + + """ + pythonpath = os.environ.get("PYTHONPATH", "") + if len(pythonpath) == 0: + return False + + absolute_paths = [os.path.abspath(path) for path in pythonpath.split(os.pathsep)] + return any( + file_is_in_folder_glob(os.path.normpath(filepath), path) + for path in absolute_paths + ) + +# +# Copied from https://github.com/streamlit/streamlit/blob/1.24.0/lib/streamlit/watcher/local_sources_watcher.py +# + +def get_module_paths(module: types.ModuleType) -> Set[str]: + paths_extractors = [ + # https://docs.python.org/3/reference/datamodel.html + # __file__ is the pathname of the file from which the module was loaded + # if it was loaded from a file. + # The __file__ attribute may be missing for certain types of modules + lambda m: [m.__file__], + # https://docs.python.org/3/reference/import.html#__spec__ + # The __spec__ attribute is set to the module spec that was used + # when importing the module. one exception is __main__, + # where __spec__ is set to None in some cases. + # https://www.python.org/dev/peps/pep-0451/#id16 + # "origin" in an import context means the system + # (or resource within a system) from which a module originates + # ... It is up to the loader to decide on how to interpret + # and use a module's origin, if at all. + lambda m: [m.__spec__.origin], + # https://www.python.org/dev/peps/pep-0420/ + # Handling of "namespace packages" in which the __path__ attribute + # is a _NamespacePath object with a _path attribute containing + # the various paths of the package. + lambda m: [p for p in m.__path__._path], + ] + + all_paths = set() + for extract_paths in paths_extractors: + potential_paths = [] + try: + potential_paths = extract_paths(module) + except AttributeError: + # Some modules might not have __file__ or __spec__ attributes. + pass + except Exception as e: + LOGGER.warning(f"Examining the path of {module.__name__} raised: {e}") + + all_paths.update( + [os.path.abspath(str(p)) for p in potential_paths if _is_valid_path(p)] + ) + return all_paths + + +def _is_valid_path(path: Optional[str]) -> bool: + return isinstance(path, str) and (os.path.isfile(path) or os.path.isdir(path)) + + +# +# Original code +# + +def unload_local_modules(target_dir_path: str = "."): + """ Unload all modules that are in the target directory or in a subdirectory of it. + It is necessary to unload modules before re-executing a script that imports the modules, + so that the new version of the modules is loaded. + The module unloading feature is extracted from Streamlit's LocalSourcesWatcher (https://github.com/streamlit/streamlit/blob/1.24.0/lib/streamlit/watcher/local_sources_watcher.py) + and packaged as a standalone function. + """ + target_dir_path = os.path.abspath(target_dir_path) + loaded_modules = {} # filepath -> module_name + + # Copied from `LocalSourcesWatcher.update_watched_modules()` + module_paths = { + name: get_module_paths(module) + for name, module in dict(sys.modules).items() + } + + # Copied from `LocalSourcesWatcher._register_necessary_watchers()` + for name, paths in module_paths.items(): + for path in paths: + if file_is_in_folder_glob(path, target_dir_path) or file_in_pythonpath(path): + loaded_modules[path] = name + + # Copied from `LocalSourcesWatcher.on_file_changed()` + for module_name in loaded_modules.values(): + if module_name is not None and module_name in sys.modules: + del sys.modules[module_name] diff --git a/testbed/gradio-app__gradio/js/wasm/src/webworker/requirements.test.ts b/testbed/gradio-app__gradio/js/wasm/src/webworker/requirements.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..813140285ab97f0ed09dd782dbd4d2528f8c43fe --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/webworker/requirements.test.ts @@ -0,0 +1,29 @@ +import { describe, it, expect } from "vitest"; +import { verifyRequirements } from "./requirements"; + +describe("verifyRequirements", () => { + const allowedRequirements = [ + [ + "http://files.pythonhosted.org/packages/62/9c/0467dea0a064a998f94c33d03988f33efc744de1a2a550b56b38910cafa2/streamlit-1.13.0-py2.py3-none-any.whl" + ], + [ + "https://files.pythonhosted.org/packages/62/9c/0467dea0a064a998f94c33d03988f33efc744de1a2a550b56b38910cafa2/streamlit-1.13.0-py2.py3-none-any.whl" + ] + ]; + allowedRequirements.forEach((requirements) => { + it(`allows http: and https: schemes (requirements=${ + requirements[0].split(":")[0] + })`, () => { + expect(() => verifyRequirements(requirements)).not.toThrow(); + }); + }); + + const notAllowedRequirements = [["emfs:/tmp/foo.whl"], ["file:/tmp/foo.whl"]]; + notAllowedRequirements.forEach((requirements) => { + it(`throws an error if the requirements include a not allowed scheme (requirements=${JSON.stringify( + requirements + )})`, () => { + expect(() => verifyRequirements(requirements)).toThrow(); + }); + }); +}); diff --git a/testbed/gradio-app__gradio/js/wasm/src/webworker/requirements.ts b/testbed/gradio-app__gradio/js/wasm/src/webworker/requirements.ts new file mode 100644 index 0000000000000000000000000000000000000000..de45cdf388a6289fc26aeaa3e12d59ac61bd54f2 --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/webworker/requirements.ts @@ -0,0 +1,18 @@ +export function verifyRequirements(requirements: string[]): void { + requirements.forEach((req) => { + let url: URL; + try { + url = new URL(req); + } catch { + // `req` is not a URL -> OK + return; + } + + // Ref: The scheme checker in the micropip implementation is https://github.com/pyodide/micropip/blob/v0.1.0/micropip/_compat_in_pyodide.py#L23-L26 + if (url.protocol === "emfs:" || url.protocol === "file:") { + throw new Error( + `"emfs:" and "file:" protocols are not allowed for the requirement (${req})` + ); + } + }); +} diff --git a/testbed/gradio-app__gradio/js/wasm/src/worker-proxy.ts b/testbed/gradio-app__gradio/js/wasm/src/worker-proxy.ts new file mode 100644 index 0000000000000000000000000000000000000000..f199060d64bda7d3d43955e61ccad5ed23047861 --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/src/worker-proxy.ts @@ -0,0 +1,164 @@ +import { CrossOriginWorkerMaker as Worker } from "./cross-origin-worker"; +import type { + EmscriptenFile, + EmscriptenFileUrl, + HttpRequest, + HttpResponse, + InMessage, + ReplyMessage +} from "./message-types"; + +export interface WorkerProxyOptions { + gradioWheelUrl: string; + gradioClientWheelUrl: string; + files: Record; + requirements: string[]; +} + +export class WorkerProxy { + private worker: globalThis.Worker; + + constructor(options: WorkerProxyOptions) { + console.debug("WorkerProxy.constructor(): Create a new worker."); + // Loading a worker here relies on Vite's support for WebWorkers (https://vitejs.dev/guide/features.html#web-workers), + // assuming that this module is imported from the Gradio frontend (`@gradio/app`), which is bundled with Vite. + // HACK: Use `CrossOriginWorkerMaker` imported as `Worker` here. + // Read the comment in `cross-origin-worker.ts` for the detail. + const workerMaker = new Worker(new URL("./webworker.js", import.meta.url)); + this.worker = workerMaker.worker; + + this.postMessageAsync({ + type: "init", + data: { + gradioWheelUrl: options.gradioWheelUrl, + gradioClientWheelUrl: options.gradioClientWheelUrl, + files: options.files, + requirements: options.requirements + } + }).then(() => { + console.debug("WorkerProxy.constructor(): Initialization is done."); + }); + } + + public async runPythonCode(code: string): Promise { + await this.postMessageAsync({ + type: "run-python-code", + data: { + code + } + }); + } + + public async runPythonFile(path: string): Promise { + await this.postMessageAsync({ + type: "run-python-file", + data: { + path + } + }); + } + + // A wrapper for this.worker.postMessage(). Unlike that function, which + // returns void immediately, this function returns a promise, which resolves + // when a ReplyMessage is received from the worker. + // The original implementation is in https://github.com/rstudio/shinylive/blob/v0.1.2/src/pyodide-proxy.ts#L404-L418 + private postMessageAsync(msg: InMessage): Promise { + return new Promise((resolve, reject) => { + const channel = new MessageChannel(); + + channel.port1.onmessage = (e) => { + channel.port1.close(); + const msg = e.data as ReplyMessage; + if (msg.type === "reply:error") { + reject(msg.error); + return; + } + + resolve(msg.data); + }; + + this.worker.postMessage(msg, [channel.port2]); + }); + } + + public async httpRequest(request: HttpRequest): Promise { + console.debug("WorkerProxy.httpRequest()", request); + const result = await this.postMessageAsync({ + type: "http-request", + data: { + request + } + }); + const response = (result as { response: HttpResponse }).response; + + if (Math.floor(response.status / 100) !== 2) { + let bodyText: string; + let bodyJson: unknown; + try { + bodyText = new TextDecoder().decode(response.body); + } catch (e) { + bodyText = "(failed to decode body)"; + } + try { + bodyJson = JSON.parse(bodyText); + } catch (e) { + bodyJson = "(failed to parse body as JSON)"; + } + console.error("Wasm HTTP error", { + request, + response, + bodyText, + bodyJson + }); + } + + return response; + } + + public writeFile( + path: string, + data: string | ArrayBufferView, + opts?: Record + ): Promise { + return this.postMessageAsync({ + type: "file:write", + data: { + path, + data, + opts + } + }) as Promise; + } + + public renameFile(oldPath: string, newPath: string): Promise { + return this.postMessageAsync({ + type: "file:rename", + data: { + oldPath, + newPath + } + }) as Promise; + } + + public unlink(path: string): Promise { + return this.postMessageAsync({ + type: "file:unlink", + data: { + path + } + }) as Promise; + } + + public install(requirements: string[]): Promise { + return this.postMessageAsync({ + type: "install", + data: { + requirements + } + }) as Promise; + } + + public terminate(): void { + this.worker.terminate(); + } +} diff --git a/testbed/gradio-app__gradio/js/wasm/tsconfig.json b/testbed/gradio-app__gradio/js/wasm/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..06207186c8010d73adfffc49b0ef54a8605db137 --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/tsconfig.json @@ -0,0 +1,105 @@ +{ + "compilerOptions": { + /* Visit https://aka.ms/tsconfig to read more about this file */ + + /* Projects */ + // "incremental": true, /* Save .tsbuildinfo files to allow for incremental compilation of projects. */ + // "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */ + // "tsBuildInfoFile": "./.tsbuildinfo", /* Specify the path to .tsbuildinfo incremental compilation file. */ + // "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects. */ + // "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */ + // "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */ + + /* Language and Environment */ + "target": "es2016" /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */, + // "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */ + // "jsx": "preserve", /* Specify what JSX code is generated. */ + // "experimentalDecorators": true, /* Enable experimental support for TC39 stage 2 draft decorators. */ + // "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */ + // "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */ + // "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */ + // "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */ + // "reactNamespace": "", /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */ + // "noLib": true, /* Disable including any library files, including the default lib.d.ts. */ + // "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */ + // "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */ + + /* Modules */ + "module": "ESNext" /* Specify what module code is generated. */, + // "rootDir": "./", /* Specify the root folder within your source files. */ + // "moduleResolution": "node", /* Specify how TypeScript looks up a file from a given module specifier. */ + // "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */ + // "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */ + // "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */ + // "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */ + // "types": [], /* Specify type package names to be included without being referenced in a source file. */ + // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */ + // "moduleSuffixes": [], /* List of file name suffixes to search when resolving a module. */ + // "resolveJsonModule": true, /* Enable importing .json files. */ + // "noResolve": true, /* Disallow 'import's, 'require's or ''s from expanding the number of files TypeScript should add to a project. */ + + /* JavaScript Support */ + // "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */ + // "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */ + // "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */ + + /* Emit */ + "declaration": true /* Generate .d.ts files from TypeScript and JavaScript files in your project. */, + "declarationMap": true /* Create sourcemaps for d.ts files. */, + // "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */ + // "sourceMap": true, /* Create source map files for emitted JavaScript files. */ + // "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */ + "outDir": "./dist" /* Specify an output folder for all emitted files. */, + // "removeComments": true, /* Disable emitting comments. */ + // "noEmit": true, /* Disable emitting files from a compilation. */ + // "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */ + // "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types. */ + // "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */ + // "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */ + // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ + // "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */ + // "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */ + // "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */ + // "newLine": "crlf", /* Set the newline character for emitting files. */ + // "stripInternal": true, /* Disable emitting declarations that have '@internal' in their JSDoc comments. */ + // "noEmitHelpers": true, /* Disable generating custom helper functions like '__extends' in compiled output. */ + // "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */ + // "preserveConstEnums": true, /* Disable erasing 'const enum' declarations in generated code. */ + // "declarationDir": "./", /* Specify the output directory for generated declaration files. */ + // "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */ + + /* Interop Constraints */ + // "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */ + // "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */ + "esModuleInterop": true /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */, + // "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */ + "forceConsistentCasingInFileNames": true /* Ensure that casing is correct in imports. */, + + /* Type Checking */ + "strict": true /* Enable all strict type-checking options. */, + // "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied 'any' type. */ + // "strictNullChecks": true, /* When type checking, take into account 'null' and 'undefined'. */ + // "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */ + // "strictBindCallApply": true, /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */ + // "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */ + // "noImplicitThis": true, /* Enable error reporting when 'this' is given the type 'any'. */ + // "useUnknownInCatchVariables": true, /* Default catch clause variables as 'unknown' instead of 'any'. */ + // "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */ + // "noUnusedLocals": true, /* Enable error reporting when local variables aren't read. */ + // "noUnusedParameters": true, /* Raise an error when a function parameter isn't read. */ + // "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */ + // "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */ + // "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */ + // "noUncheckedIndexedAccess": true, /* Add 'undefined' to a type when accessed using an index. */ + // "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */ + // "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type. */ + // "allowUnusedLabels": true, /* Disable error reporting for unused labels. */ + // "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */ + + /* Completeness */ + // "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */ + "skipLibCheck": true /* Skip type checking all .d.ts files. */ + }, + "include": ["src/**/*"], + "exclude": ["src/webworker/**/*"] // The worker code is bundled by Vite separately. See its config file. +} diff --git a/testbed/gradio-app__gradio/js/wasm/vite.worker.config.js b/testbed/gradio-app__gradio/js/wasm/vite.worker.config.js new file mode 100644 index 0000000000000000000000000000000000000000..3d26320fc013460225f7988162f2b6478f175e3e --- /dev/null +++ b/testbed/gradio-app__gradio/js/wasm/vite.worker.config.js @@ -0,0 +1,34 @@ +import path from "path"; +import { defineConfig } from "vite"; + +/** + * We bundle the worker file before packaging, while other files are only TS-transpiled. + * The consumer of this package, `@gradio/app`, will be bundled with Vite, + * and Vite only supports module-type WebWorkers (`new Worker("...", { type: "module" })`) to handle `import` in the worker file, + * because in the dev mode it doesn't bundle the worker file and just relies on the browser's native support for module-type workers to resolve the imports. + * However, we need to use `importScripts()` in the worker to load Pyodide from the CDN, which is only supported by classic WebWorkers (`new Worker("...")`), + * while we still want to use `import` in the worker to modularize the code. + * So, we bundle the worker file to resolve `import`s here before exporting, preserving `importScripts()` in the bundled file, + * and load the bundled worker file on `@gradio/app` as a classic WebWorker. + * + * Note: We tried the following approaches, but they failed: + * 1. Just TS-transpile the worker file like other files into `worker.js`, and use it like `new Worker("worker.js")`. + * It failed because `tsc` reserves `importScripts()` and also appends `export {};` to the end of the file to specify it as a module (`https://github.com/microsoft/TypeScript/issues/41513`), + * however, `importScripts()` is only supported by classic WebWorkers, and `export {};` is not supported by classic WebWorkers. + * 2. Use ESM import instead of `importScripts()`, which is (experimentally?) supported by Pyodide since v0.20.0 (https://pyodide.org/en/stable/project/changelog.html#javascript-package), + * using `import { loadPyodide } from "https://cdn.jsdelivr.net/pyodide/v0.23.2/full/pyodide.js";` in the worker file, instead of `importScripts(...)`. + * It was successful in the dev mode, but failed in the prod mode, which has this problem: https://github.com/pyodide/pyodide/issues/2217#issuecomment-1328344562. + */ + +export default defineConfig({ + build: { + outDir: "dist", + rollupOptions: { + input: path.join(__dirname, "src/webworker/index.ts"), + // Ref: https://github.com/rollup/rollup/issues/2616#issuecomment-1431551704 + output: { + entryFileNames: "webworker.js" + } + } + } +}); diff --git a/testbed/gradio-app__gradio/package.json b/testbed/gradio-app__gradio/package.json new file mode 100644 index 0000000000000000000000000000000000000000..b6d08cfadfadcd28e4b392b2ae56947d006493c6 --- /dev/null +++ b/testbed/gradio-app__gradio/package.json @@ -0,0 +1,108 @@ +{ + "name": "gradio-ui", + "version": "0.0.1", + "description": "Gradio UI packages", + "scripts": { + "dev": "pnpm css && pnpm --filter @gradio/client build && pnpm --filter @gradio/app dev", + "css": "pnpm --filter @gradio/theme generate", + "build": "pnpm css && pnpm --filter @gradio/client build && pnpm --filter @gradio/app build:local --emptyOutDir", + "build:cdn": "pnpm --filter @gradio/client build && pnpm --filter @gradio/app build:cdn --emptyOutDir", + "build:website": "pnpm --filter @gradio/app build:website --emptyOutDir", + "build:cdn-local": "TEST_CDN=TRUE pnpm build:cdn", + "preview:cdn-server": "sirv ./gradio/templates/cdn --single --port=4321 --cors", + "preview:cdn-app": "pnpm --filter @gradio/cdn-test dev", + "preview:cdn-local": "run-p preview:cdn-server preview:cdn-app", + "format:check": "prettier --ignore-path .config/.prettierignore --check --config .config/.prettierrc.json --plugin prettier-plugin-svelte .", + "format:write": "prettier --ignore-path .config/.prettierignore --write --config .config/.prettierrc.json --plugin prettier-plugin-svelte .", + "lint": "ESLINT_USE_FLAT_CONFIG=true eslint -c .config/eslint.config.js js client/js", + "ts:check": "svelte-check --tsconfig tsconfig.json --threshold error", + "test": "pnpm --filter @gradio/client build && vitest dev --config .config/vitest.config.ts", + "test:run": "pnpm --filter @gradio/client build && vitest run --config .config/vitest.config.ts --reporter=verbose", + "test:node": "TEST_MODE=node pnpm vitest run --config .config/vitest.config.ts", + "test:browser": "pnpm --filter @gradio/app test:browser", + "test:browser:full": "run-s build test:browser", + "test:browser:verbose": "pnpm test:browser", + "test:browser:dev": "pnpm --filter @gradio/app test:browser:dev", + "storybook": "storybook dev -p 6006 --config-dir js/storybook", + "build-storybook": "storybook build --config-dir js/storybook", + "ci:version": "changeset version && pnpm i --lockfile-only && node ./.changeset/fix_changelogs.cjs", + "ci:publish": "pnpm publish --no-git-checks --access public -r --filter=@gradio/*", + "chromatic": "chromatic", + "test:ct": "playwright test -c ./.config/playwright-ct.config.ts", + "website": "pnpm --filter @gradio/website build" + }, + "type": "module", + "author": "", + "license": "ISC", + "private": true, + "dependencies": { + "@changesets/changelog-github": "^0.4.8", + "@changesets/cli": "^2.26.1", + "@changesets/get-github-info": "^0.5.2", + "@csstools/postcss-global-data": "^2.0.0", + "@gradio/tootils": "workspace:^", + "@manypkg/get-packages": "^2.2.0", + "@playwright/experimental-ct-svelte": "^1.37.1", + "@playwright/test": "^1.37.1", + "@sveltejs/vite-plugin-svelte": "^2.4.2", + "@tailwindcss/forms": "^0.5.0", + "@testing-library/dom": "^9.0.0", + "@testing-library/jest-dom": "^6.0.0", + "@testing-library/user-event": "^14.0.0", + "@types/node": "^20.3.1", + "@types/testing-library__jest-dom": "^5.14.6", + "@typescript-eslint/eslint-plugin": "^6.2.1", + "@typescript-eslint/parser": "^6.2.1", + "autoprefixer": "^10.4.4", + "babylonjs": "^5.17.1", + "babylonjs-loaders": "^5.17.1", + "eslint": "^8.46.0", + "eslint-plugin-svelte": "^2.32.4", + "globals": "^13.20.0", + "jsdom": "^22.1.0", + "kleur": "^4.1.5", + "msw": "^1.2.2", + "node-html-parser": "^6.0.0", + "npm-run-all": "^4.1.5", + "plotly.js-dist-min": "^2.10.1", + "polka": "^1.0.0-next.22", + "pollen-css": "^4.6.1", + "postcss": "^8.4.27", + "postcss-custom-media": "10", + "postcss-nested": "^5.0.6", + "postcss-prefix-selector": "^1.16.0", + "prettier": "^3.0.0", + "prettier-plugin-css-order": "^2.0.0", + "prettier-plugin-svelte": "^3.0.0", + "sirv": "^2.0.2", + "sirv-cli": "^2.0.2", + "svelte": "^4.0.0", + "svelte-check": "^3.4.4", + "svelte-i18n": "^3.6.0", + "svelte-preprocess": "^5.0.4", + "tailwindcss": "^3.1.6", + "tinyspy": "^2.0.0", + "typescript": "^5.0.0", + "vite": "^4.3.9", + "vitest": "^0.34.0" + }, + "devDependencies": { + "@storybook/addon-a11y": "^7.0.24", + "@storybook/addon-essentials": "^7.0.23", + "@storybook/addon-interactions": "^7.0.23", + "@storybook/addon-links": "^7.0.23", + "@storybook/addon-styling": "^1.3.1", + "@storybook/addon-svelte-csf": "^4.0.0", + "@storybook/blocks": "^7.0.23", + "@storybook/manager-api": "^7.0.23", + "@storybook/svelte": "^7.0.23", + "@storybook/svelte-vite": "^7.0.23", + "@storybook/testing-library": "^0.2.0", + "@storybook/theming": "^7.0.23", + "chromatic": "^7.0.0", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "storybook": "^7.0.23", + "wikidata-lang": "^4.1.2" + } +} diff --git a/testbed/gradio-app__gradio/pnpm-lock.yaml b/testbed/gradio-app__gradio/pnpm-lock.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21734c9980da44ed56f8964a3a3918a6f3d22c9b --- /dev/null +++ b/testbed/gradio-app__gradio/pnpm-lock.yaml @@ -0,0 +1,15668 @@ +lockfileVersion: '6.1' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + '@changesets/changelog-github': + specifier: ^0.4.8 + version: 0.4.8 + '@changesets/cli': + specifier: ^2.26.1 + version: 2.26.1 + '@changesets/get-github-info': + specifier: ^0.5.2 + version: 0.5.2 + '@csstools/postcss-global-data': + specifier: ^2.0.0 + version: 2.0.0(postcss@8.4.27) + '@gradio/tootils': + specifier: workspace:^ + version: link:js/tootils + '@manypkg/get-packages': + specifier: ^2.2.0 + version: 2.2.0 + '@playwright/experimental-ct-svelte': + specifier: ^1.37.1 + version: 1.37.1(@types/node@20.3.1)(less@4.1.3)(svelte@4.0.0)(vite@4.3.9) + '@playwright/test': + specifier: ^1.37.1 + version: 1.37.1 + '@sveltejs/vite-plugin-svelte': + specifier: ^2.4.2 + version: 2.4.2(svelte@4.0.0)(vite@4.3.9) + '@tailwindcss/forms': + specifier: ^0.5.0 + version: 0.5.0(tailwindcss@3.1.6) + '@testing-library/dom': + specifier: ^9.0.0 + version: 9.0.0 + '@testing-library/jest-dom': + specifier: ^6.0.0 + version: 6.0.0(vitest@0.34.0) + '@testing-library/user-event': + specifier: ^14.0.0 + version: 14.0.0(@testing-library/dom@9.0.0) + '@types/node': + specifier: ^20.3.1 + version: 20.3.1 + '@types/testing-library__jest-dom': + specifier: ^5.14.6 + version: 5.14.6 + '@typescript-eslint/eslint-plugin': + specifier: ^6.2.1 + version: 6.2.1(@typescript-eslint/parser@6.2.1)(eslint@8.46.0)(typescript@5.1.3) + '@typescript-eslint/parser': + specifier: ^6.2.1 + version: 6.2.1(eslint@8.46.0)(typescript@5.1.3) + autoprefixer: + specifier: ^10.4.4 + version: 10.4.4(postcss@8.4.27) + babylonjs: + specifier: ^5.17.1 + version: 5.18.0 + babylonjs-loaders: + specifier: ^5.17.1 + version: 5.18.0 + eslint: + specifier: ^8.46.0 + version: 8.46.0 + eslint-plugin-svelte: + specifier: ^2.32.4 + version: 2.32.4(eslint@8.46.0)(svelte@4.0.0) + globals: + specifier: ^13.20.0 + version: 13.20.0 + jsdom: + specifier: ^22.1.0 + version: 22.1.0 + kleur: + specifier: ^4.1.5 + version: 4.1.5 + msw: + specifier: ^1.2.2 + version: 1.2.2(typescript@5.1.3) + node-html-parser: + specifier: ^6.0.0 + version: 6.0.0 + npm-run-all: + specifier: ^4.1.5 + version: 4.1.5 + plotly.js-dist-min: + specifier: ^2.10.1 + version: 2.11.1 + polka: + specifier: ^1.0.0-next.22 + version: 1.0.0-next.22 + pollen-css: + specifier: ^4.6.1 + version: 4.6.1 + postcss: + specifier: ^8.4.27 + version: 8.4.27 + postcss-custom-media: + specifier: '10' + version: 10.0.0(postcss@8.4.27) + postcss-nested: + specifier: ^5.0.6 + version: 5.0.6(postcss@8.4.27) + postcss-prefix-selector: + specifier: ^1.16.0 + version: 1.16.0(postcss@8.4.27) + prettier: + specifier: ^3.0.0 + version: 3.0.0 + prettier-plugin-css-order: + specifier: ^2.0.0 + version: 2.0.0(postcss@8.4.27)(prettier@3.0.0) + prettier-plugin-svelte: + specifier: ^3.0.0 + version: 3.0.0(prettier@3.0.0)(svelte@4.0.0) + sirv: + specifier: ^2.0.2 + version: 2.0.2 + sirv-cli: + specifier: ^2.0.2 + version: 2.0.2 + svelte: + specifier: ^4.0.0 + version: 4.0.0 + svelte-check: + specifier: ^3.4.4 + version: 3.4.4(@babel/core@7.22.5)(less@4.1.3)(postcss@8.4.27)(svelte@4.0.0) + svelte-i18n: + specifier: ^3.6.0 + version: 3.7.0(svelte@4.0.0) + svelte-preprocess: + specifier: ^5.0.4 + version: 5.0.4(@babel/core@7.22.5)(less@4.1.3)(postcss@8.4.27)(svelte@4.0.0)(typescript@5.1.3) + tailwindcss: + specifier: ^3.1.6 + version: 3.1.6(postcss@8.4.27) + tinyspy: + specifier: ^2.0.0 + version: 2.1.1 + typescript: + specifier: ^5.0.0 + version: 5.1.3 + vite: + specifier: ^4.3.9 + version: 4.3.9(@types/node@20.3.1)(less@4.1.3) + vitest: + specifier: ^0.34.0 + version: 0.34.0(jsdom@22.1.0)(less@4.1.3) + devDependencies: + '@storybook/addon-a11y': + specifier: ^7.0.24 + version: 7.0.24(react-dom@18.2.0)(react@18.2.0) + '@storybook/addon-essentials': + specifier: ^7.0.23 + version: 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/addon-interactions': + specifier: ^7.0.23 + version: 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/addon-links': + specifier: ^7.0.23 + version: 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/addon-styling': + specifier: ^1.3.1 + version: 1.3.1(less@4.1.3)(postcss@8.4.27)(react-dom@18.2.0)(react@18.2.0)(webpack@5.88.1) + '@storybook/addon-svelte-csf': + specifier: ^4.0.0 + version: 4.0.0(@storybook/svelte@7.0.23)(@storybook/theming@7.0.23)(@sveltejs/vite-plugin-svelte@2.4.2)(svelte@4.0.0)(vite@4.3.9) + '@storybook/blocks': + specifier: ^7.0.23 + version: 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/manager-api': + specifier: ^7.0.23 + version: 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/svelte': + specifier: ^7.0.23 + version: 7.0.23(svelte@4.0.0) + '@storybook/svelte-vite': + specifier: ^7.0.23 + version: 7.0.23(react-dom@18.2.0)(react@18.2.0)(typescript@5.1.3)(vite@4.3.9) + '@storybook/testing-library': + specifier: ^0.2.0 + version: 0.2.0 + '@storybook/theming': + specifier: ^7.0.23 + version: 7.0.23(react-dom@18.2.0)(react@18.2.0) + chromatic: + specifier: ^7.0.0 + version: 7.0.0 + react: + specifier: ^18.2.0 + version: 18.2.0 + react-dom: + specifier: ^18.2.0 + version: 18.2.0(react@18.2.0) + storybook: + specifier: ^7.0.23 + version: 7.0.23 + wikidata-lang: + specifier: ^4.1.2 + version: 4.1.2 + + client/js: + dependencies: + bufferutil: + specifier: ^4.0.7 + version: 4.0.7 + semiver: + specifier: ^1.1.0 + version: 1.1.0 + ws: + specifier: ^8.13.0 + version: 8.13.0(bufferutil@4.0.7) + devDependencies: + '@types/ws': + specifier: ^8.5.4 + version: 8.5.4 + esbuild: + specifier: ^0.19.0 + version: 0.19.0 + + client/python/gradio_client: {} + + gradio: {} + + js/_cdn-test: + devDependencies: + vite: + specifier: ^4.0.0 + version: 4.3.9(@types/node@20.3.1)(less@4.1.3) + + js/_spaces-test: + dependencies: + '@gradio/client': + specifier: workspace:^ + version: link:../../client/js + '@gradio/form': + specifier: workspace:^ + version: link:../form + '@gradio/theme': + specifier: workspace:^ + version: link:../theme + devDependencies: + '@sveltejs/adapter-auto': + specifier: ^2.0.0 + version: 2.0.1(@sveltejs/kit@1.16.3) + '@sveltejs/kit': + specifier: ^1.5.0 + version: 1.16.3(svelte@3.57.0)(vite@4.3.5) + prettier: + specifier: ^3.0.0 + version: 3.0.0 + prettier-plugin-svelte: + specifier: ^3.0.0 + version: 3.0.0(prettier@3.0.0)(svelte@3.57.0) + svelte: + specifier: ^3.54.0 + version: 3.57.0 + svelte-check: + specifier: ^3.0.1 + version: 3.1.4(@babel/core@7.22.5)(less@4.1.3)(postcss@8.4.27)(svelte@3.57.0) + typescript: + specifier: ^5.0.0 + version: 5.0.4 + vite: + specifier: ^4.3.0 + version: 4.3.5(@types/node@20.3.1)(less@4.1.3) + + js/_website: + dependencies: + '@sindresorhus/slugify': + specifier: ^2.2.0 + version: 2.2.0 + '@sveltejs/adapter-vercel': + specifier: ^3.0.3 + version: 3.0.3(@sveltejs/kit@1.16.3) + hast-util-to-string: + specifier: ^3.0.0 + version: 3.0.0 + mdsvex: + specifier: ^0.11.0 + version: 0.11.0(svelte@3.59.2) + postcss: + specifier: '>=8.3.3 <9.0.0' + version: 8.4.21 + devDependencies: + '@sveltejs/adapter-auto': + specifier: ^2.0.0 + version: 2.0.1(@sveltejs/kit@1.16.3) + '@sveltejs/adapter-static': + specifier: ^2.0.2 + version: 2.0.2(@sveltejs/kit@1.16.3) + '@sveltejs/kit': + specifier: ^1.5.0 + version: 1.16.3(svelte@3.59.2)(vite@4.3.9) + '@tailwindcss/forms': + specifier: ^0.5.0 + version: 0.5.0(tailwindcss@3.1.6) + '@tailwindcss/typography': + specifier: ^0.5.4 + version: 0.5.4(tailwindcss@3.1.6) + '@types/node': + specifier: ^20.3.2 + version: 20.3.2 + '@types/prismjs': + specifier: ^1.26.0 + version: 1.26.0 + prismjs: + specifier: 1.29.0 + version: 1.29.0 + svelte: + specifier: ^3.59.2 + version: 3.59.2 + svelte-check: + specifier: ^3.0.1 + version: 3.4.4(@babel/core@7.22.5)(less@4.1.3)(postcss@8.4.21)(svelte@3.59.2) + tailwindcss: + specifier: ^3.1.6 + version: 3.1.6(postcss@8.4.21) + tslib: + specifier: ^2.4.1 + version: 2.5.3 + typescript: + specifier: ^5.0.0 + version: 5.0.4 + vite: + specifier: ^4.3.9 + version: 4.3.9(@types/node@20.3.2)(less@4.1.3) + + js/accordion: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/column': + specifier: workspace:^ + version: link:../column + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + + js/annotatedimage: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/app: + dependencies: + '@gradio/accordion': + specifier: workspace:^ + version: link:../accordion + '@gradio/annotatedimage': + specifier: workspace:^ + version: link:../annotatedimage + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/audio': + specifier: workspace:^ + version: link:../audio + '@gradio/box': + specifier: workspace:^ + version: link:../box + '@gradio/button': + specifier: workspace:^ + version: link:../button + '@gradio/chatbot': + specifier: workspace:^ + version: link:../chatbot + '@gradio/checkbox': + specifier: workspace:^ + version: link:../checkbox + '@gradio/checkboxgroup': + specifier: workspace:^ + version: link:../checkboxgroup + '@gradio/client': + specifier: workspace:^ + version: link:../../client/js + '@gradio/code': + specifier: workspace:^ + version: link:../code + '@gradio/colorpicker': + specifier: workspace:^ + version: link:../colorpicker + '@gradio/column': + specifier: workspace:^ + version: link:../column + '@gradio/dataframe': + specifier: workspace:^ + version: link:../dataframe + '@gradio/dropdown': + specifier: workspace:^ + version: link:../dropdown + '@gradio/file': + specifier: workspace:^ + version: link:../file + '@gradio/form': + specifier: workspace:^ + version: link:../form + '@gradio/gallery': + specifier: workspace:^ + version: link:../gallery + '@gradio/group': + specifier: workspace:^ + version: link:../group + '@gradio/highlightedtext': + specifier: workspace:^ + version: link:../highlightedtext + '@gradio/html': + specifier: workspace:^ + version: link:../html + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/image': + specifier: workspace:^ + version: link:../image + '@gradio/json': + specifier: workspace:^ + version: link:../json + '@gradio/label': + specifier: workspace:^ + version: link:../label + '@gradio/markdown': + specifier: workspace:^ + version: link:../markdown + '@gradio/model3d': + specifier: workspace:^ + version: link:../model3D + '@gradio/number': + specifier: workspace:^ + version: link:../number + '@gradio/plot': + specifier: workspace:^ + version: link:../plot + '@gradio/radio': + specifier: workspace:^ + version: link:../radio + '@gradio/row': + specifier: workspace:^ + version: link:../row + '@gradio/slider': + specifier: workspace:^ + version: link:../slider + '@gradio/state': + specifier: workspace:^ + version: link:../state + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/tabitem': + specifier: workspace:^ + version: link:../tabitem + '@gradio/tabs': + specifier: workspace:^ + version: link:../tabs + '@gradio/textbox': + specifier: workspace:^ + version: link:../textbox + '@gradio/theme': + specifier: workspace:^ + version: link:../theme + '@gradio/timeseries': + specifier: workspace:^ + version: link:../timeseries + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/uploadbutton': + specifier: workspace:^ + version: link:../uploadbutton + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + '@gradio/video': + specifier: workspace:^ + version: link:../video + '@gradio/wasm': + specifier: workspace:^ + version: link:../wasm + d3-dsv: + specifier: ^3.0.1 + version: 3.0.1 + mime-types: + specifier: ^2.1.34 + version: 2.1.34 + postcss: + specifier: ^8.4.21 + version: 8.4.21 + postcss-prefix-selector: + specifier: ^1.16.0 + version: 1.16.0(postcss@8.4.21) + + js/atoms: + dependencies: + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/audio: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/button': + specifier: workspace:^ + version: link:../button + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + extendable-media-recorder: + specifier: ^9.0.0 + version: 9.0.0 + extendable-media-recorder-wav-encoder: + specifier: ^7.0.76 + version: 7.0.76 + svelte-range-slider-pips: + specifier: ^2.0.1 + version: 2.0.2 + + js/box: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + + js/button: + dependencies: + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/chatbot: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/markdown': + specifier: workspace:^ + version: link:../markdown + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/theme': + specifier: workspace:^ + version: link:../theme + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + '@types/dompurify': + specifier: ^3.0.2 + version: 3.0.2 + '@types/katex': + specifier: ^0.16.0 + version: 0.16.0 + '@types/prismjs': + specifier: 1.26.1 + version: 1.26.1 + + js/checkbox: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/checkboxgroup: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/code: + dependencies: + '@codemirror/autocomplete': + specifier: ^6.3.0 + version: 6.3.0(@codemirror/language@6.6.0)(@codemirror/state@6.1.2)(@codemirror/view@6.4.1)(@lezer/common@1.0.2) + '@codemirror/commands': + specifier: ^6.1.2 + version: 6.1.2 + '@codemirror/lang-css': + specifier: ^6.1.0 + version: 6.1.0(@codemirror/view@6.4.1)(@lezer/common@1.0.2) + '@codemirror/lang-html': + specifier: ^6.4.2 + version: 6.4.2 + '@codemirror/lang-javascript': + specifier: ^6.1.4 + version: 6.1.4 + '@codemirror/lang-json': + specifier: ^6.0.1 + version: 6.0.1 + '@codemirror/lang-markdown': + specifier: ^6.1.0 + version: 6.1.0 + '@codemirror/lang-python': + specifier: ^6.0.4 + version: 6.0.4 + '@codemirror/language': + specifier: ^6.6.0 + version: 6.6.0 + '@codemirror/legacy-modes': + specifier: ^6.3.1 + version: 6.3.1 + '@codemirror/lint': + specifier: ^6.0.0 + version: 6.0.0 + '@codemirror/search': + specifier: ^6.2.2 + version: 6.2.2 + '@codemirror/state': + specifier: ^6.1.2 + version: 6.1.2 + '@codemirror/view': + specifier: ^6.4.1 + version: 6.4.1 + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + '@lezer/common': + specifier: ^1.0.2 + version: 1.0.2 + '@lezer/highlight': + specifier: ^1.1.3 + version: 1.1.3 + '@lezer/markdown': + specifier: ^1.0.2 + version: 1.0.2 + cm6-theme-basic-dark: + specifier: ^0.2.0 + version: 0.2.0(@codemirror/language@6.6.0)(@codemirror/state@6.1.2)(@codemirror/view@6.4.1)(@lezer/highlight@1.1.3) + cm6-theme-basic-light: + specifier: ^0.2.0 + version: 0.2.0(@codemirror/language@6.6.0)(@codemirror/state@6.1.2)(@codemirror/view@6.4.1)(@lezer/highlight@1.1.3) + codemirror: + specifier: ^6.0.1 + version: 6.0.1(@lezer/common@1.0.2) + + js/colorpicker: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/column: {} + + js/dataframe: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/button': + specifier: workspace:^ + version: link:../button + '@gradio/markdown': + specifier: workspace:^ + version: link:../markdown + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + '@types/d3-dsv': + specifier: ^3.0.0 + version: 3.0.0 + '@types/dompurify': + specifier: ^3.0.2 + version: 3.0.2 + '@types/katex': + specifier: ^0.16.0 + version: 0.16.0 + d3-dsv: + specifier: ^3.0.1 + version: 3.0.1 + dequal: + specifier: ^2.0.2 + version: 2.0.2 + dompurify: + specifier: ^3.0.3 + version: 3.0.3 + katex: + specifier: ^0.16.7 + version: 0.16.7 + marked: + specifier: ^7.0.0 + version: 7.0.0 + + js/dropdown: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/file: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/client': + specifier: workspace:^ + version: link:../../client/js + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/form: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/gallery: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/image': + specifier: workspace:^ + version: link:../image + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/group: {} + + js/highlightedtext: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/theme': + specifier: workspace:^ + version: link:../theme + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/html: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/icons: {} + + js/image: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + cropperjs: + specifier: ^1.5.12 + version: 1.5.12 + lazy-brush: + specifier: ^1.0.1 + version: 1.0.1 + resize-observer-polyfill: + specifier: ^1.5.1 + version: 1.5.1 + + js/json: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/label: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/lite: + devDependencies: + gradio: + specifier: workspace:^ + version: link:../../gradio + + js/markdown: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + '@types/dompurify': + specifier: ^3.0.2 + version: 3.0.2 + '@types/katex': + specifier: ^0.16.0 + version: 0.16.0 + '@types/prismjs': + specifier: 1.26.1 + version: 1.26.1 + dompurify: + specifier: ^3.0.3 + version: 3.0.3 + katex: + specifier: ^0.16.7 + version: 0.16.7 + marked: + specifier: ^7.0.0 + version: 7.0.0 + marked-highlight: + specifier: ^2.0.1 + version: 2.0.1(marked@7.0.0) + prismjs: + specifier: 1.29.0 + version: 1.29.0 + + js/model3D: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + '@types/babylon': + specifier: ^6.16.6 + version: 6.16.6 + babylonjs: + specifier: ^4.2.1 + version: 4.2.2 + babylonjs-loaders: + specifier: ^4.2.1 + version: 4.2.2 + + js/number: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/plot: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/theme': + specifier: workspace:^ + version: link:../theme + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + '@rollup/plugin-json': + specifier: ^6.0.0 + version: 6.0.0 + plotly.js-dist-min: + specifier: ^2.10.1 + version: 2.11.1 + svelte-vega: + specifier: ^2.0.0 + version: 2.0.0(svelte@3.59.2)(vega-lite@5.12.0)(vega@5.22.1) + vega: + specifier: ^5.22.1 + version: 5.22.1 + vega-lite: + specifier: ^5.12.0 + version: 5.12.0(vega@5.22.1) + + js/radio: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/row: + devDependencies: + '@gradio/image': + specifier: workspace:^ + version: link:../image + + js/slider: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/state: {} + + js/statustracker: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/column': + specifier: workspace:^ + version: link:../column + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + + js/tabitem: + dependencies: + '@gradio/column': + specifier: workspace:^ + version: link:../column + '@gradio/tabs': + specifier: workspace:^ + version: link:../tabs + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/tabs: + dependencies: + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/textbox: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/theme: {} + + js/timeseries: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/theme': + specifier: workspace:^ + version: link:../theme + '@gradio/tooltip': + specifier: workspace:^ + version: link:../tooltip + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + d3-dsv: + specifier: ^3.0.1 + version: 3.0.1 + d3-scale: + specifier: ^4.0.2 + version: 4.0.2 + d3-shape: + specifier: ^3.2.0 + version: 3.2.0 + devDependencies: + '@types/d3-dsv': + specifier: ^3.0.0 + version: 3.0.0 + '@types/d3-scale': + specifier: ^4.0.3 + version: 4.0.3 + '@types/d3-shape': + specifier: ^3.1.1 + version: 3.1.1 + + js/tooltip: {} + + js/tootils: {} + + js/upload: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + + js/uploadbutton: + dependencies: + '@gradio/button': + specifier: workspace:^ + version: link:../button + '@gradio/client': + specifier: workspace:^ + version: link:../../client/js + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/utils: + dependencies: + '@gradio/theme': + specifier: workspace:^ + version: link:../theme + + js/video: + dependencies: + '@gradio/atoms': + specifier: workspace:^ + version: link:../atoms + '@gradio/icons': + specifier: workspace:^ + version: link:../icons + '@gradio/image': + specifier: workspace:^ + version: link:../image + '@gradio/statustracker': + specifier: workspace:^ + version: link:../statustracker + '@gradio/upload': + specifier: workspace:^ + version: link:../upload + '@gradio/utils': + specifier: workspace:^ + version: link:../utils + + js/wasm: + dependencies: + '@types/path-browserify': + specifier: ^1.0.0 + version: 1.0.0 + path-browserify: + specifier: ^1.0.1 + version: 1.0.1 + devDependencies: + pyodide: + specifier: ^0.24.0 + version: 0.24.0 + +packages: + + /@aashutoshrathi/word-wrap@1.2.6: + resolution: {integrity: sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==} + engines: {node: '>=0.10.0'} + dev: false + + /@adobe/css-tools@4.2.0: + resolution: {integrity: sha512-E09FiIft46CmH5Qnjb0wsW54/YQd69LsxeKUOWawmws1XWvyFGURnAChH0mlr7YPFR1ofwvUQfcL0J3lMxXqPA==} + dev: false + + /@ampproject/remapping@2.2.1: + resolution: {integrity: sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==} + engines: {node: '>=6.0.0'} + dependencies: + '@jridgewell/gen-mapping': 0.3.3 + '@jridgewell/trace-mapping': 0.3.18 + + /@aw-web-design/x-default-browser@1.4.88: + resolution: {integrity: sha512-AkEmF0wcwYC2QkhK703Y83fxWARttIWXDmQN8+cof8FmFZ5BRhnNXGymeb1S73bOCLfWjYELxtujL56idCN/XA==} + hasBin: true + dependencies: + default-browser-id: 3.0.0 + dev: true + + /@babel/code-frame@7.16.7: + resolution: {integrity: sha512-iAXqUn8IIeBTNd72xsFlgaXHkMBMt6y4HJp1tIaK465CWLT/fG1aqB7ykr95gHHmlBdGbFeWWfyB4NJJ0nmeIg==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/highlight': 7.16.10 + + /@babel/code-frame@7.22.5: + resolution: {integrity: sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/highlight': 7.22.5 + + /@babel/compat-data@7.22.5: + resolution: {integrity: sha512-4Jc/YuIaYqKnDDz892kPIledykKg12Aw1PYX5i/TY28anJtacvM1Rrr8wbieB9GfEJwlzqT0hUEao0CxEebiDA==} + engines: {node: '>=6.9.0'} + + /@babel/core@7.21.8: + resolution: {integrity: sha512-YeM22Sondbo523Sz0+CirSPnbj9bG3P0CdHcBZdqUuaeOaYEFbOLoGU7lebvGP6P5J/WE9wOn7u7C4J9HvS1xQ==} + engines: {node: '>=6.9.0'} + dependencies: + '@ampproject/remapping': 2.2.1 + '@babel/code-frame': 7.22.5 + '@babel/generator': 7.22.5 + '@babel/helper-compilation-targets': 7.22.5(@babel/core@7.21.8) + '@babel/helper-module-transforms': 7.22.5 + '@babel/helpers': 7.22.5 + '@babel/parser': 7.22.5 + '@babel/template': 7.22.5 + '@babel/traverse': 7.22.5 + '@babel/types': 7.21.5 + convert-source-map: 1.9.0 + debug: 4.3.4 + gensync: 1.0.0-beta.2 + json5: 2.2.3 + semver: 6.3.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/core@7.22.5: + resolution: {integrity: sha512-SBuTAjg91A3eKOvD+bPEz3LlhHZRNu1nFOVts9lzDJTXshHTjII0BAtDS3Y2DAkdZdDKWVZGVwkDfc4Clxn1dg==} + engines: {node: '>=6.9.0'} + dependencies: + '@ampproject/remapping': 2.2.1 + '@babel/code-frame': 7.22.5 + '@babel/generator': 7.22.5 + '@babel/helper-compilation-targets': 7.22.5(@babel/core@7.22.5) + '@babel/helper-module-transforms': 7.22.5 + '@babel/helpers': 7.22.5 + '@babel/parser': 7.22.5 + '@babel/template': 7.22.5 + '@babel/traverse': 7.22.5 + '@babel/types': 7.22.5 + convert-source-map: 1.9.0 + debug: 4.3.4 + gensync: 1.0.0-beta.2 + json5: 2.2.3 + semver: 6.3.0 + transitivePeerDependencies: + - supports-color + + /@babel/generator@7.21.9: + resolution: {integrity: sha512-F3fZga2uv09wFdEjEQIJxXALXfz0+JaOb7SabvVMmjHxeVTuGW8wgE8Vp1Hd7O+zMTYtcfEISGRzPkeiaPPsvg==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.22.5 + '@jridgewell/gen-mapping': 0.3.3 + '@jridgewell/trace-mapping': 0.3.18 + jsesc: 2.5.2 + dev: true + + /@babel/generator@7.22.5: + resolution: {integrity: sha512-+lcUbnTRhd0jOewtFSedLyiPsD5tswKkbgcezOqqWFUVNEwoUTlpPOBmvhG7OXWLR4jMdv0czPGH5XbflnD1EA==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.22.5 + '@jridgewell/gen-mapping': 0.3.3 + '@jridgewell/trace-mapping': 0.3.18 + jsesc: 2.5.2 + + /@babel/helper-annotate-as-pure@7.22.5: + resolution: {integrity: sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.22.5 + dev: true + + /@babel/helper-builder-binary-assignment-operator-visitor@7.22.5: + resolution: {integrity: sha512-m1EP3lVOPptR+2DwD125gziZNcmoNSHGmJROKoy87loWUQyJaVXDgpmruWqDARZSmtYQ+Dl25okU8+qhVzuykw==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.22.5 + dev: true + + /@babel/helper-compilation-targets@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-Ji+ywpHeuqxB8WDxraCiqR0xfhYjiDE/e6k7FuIaANnoOFxAHskHChz4vA1mJC9Lbm01s1PVAGhQY4FUKSkGZw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/compat-data': 7.22.5 + '@babel/core': 7.21.8 + '@babel/helper-validator-option': 7.22.5 + browserslist: 4.21.9 + lru-cache: 5.1.1 + semver: 6.3.0 + dev: true + + /@babel/helper-compilation-targets@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-Ji+ywpHeuqxB8WDxraCiqR0xfhYjiDE/e6k7FuIaANnoOFxAHskHChz4vA1mJC9Lbm01s1PVAGhQY4FUKSkGZw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/compat-data': 7.22.5 + '@babel/core': 7.22.5 + '@babel/helper-validator-option': 7.22.5 + browserslist: 4.21.9 + lru-cache: 5.1.1 + semver: 6.3.0 + + /@babel/helper-create-class-features-plugin@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-xkb58MyOYIslxu3gKmVXmjTtUPvBU4odYzbiIQbWwLKIHCsx6UGZGX6F1IznMFVnDdirseUZopzN+ZRt8Xb33Q==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-annotate-as-pure': 7.22.5 + '@babel/helper-environment-visitor': 7.22.5 + '@babel/helper-function-name': 7.22.5 + '@babel/helper-member-expression-to-functions': 7.22.5 + '@babel/helper-optimise-call-expression': 7.22.5 + '@babel/helper-replace-supers': 7.22.5 + '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 + '@babel/helper-split-export-declaration': 7.22.5 + semver: 6.3.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/helper-create-class-features-plugin@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-xkb58MyOYIslxu3gKmVXmjTtUPvBU4odYzbiIQbWwLKIHCsx6UGZGX6F1IznMFVnDdirseUZopzN+ZRt8Xb33Q==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-annotate-as-pure': 7.22.5 + '@babel/helper-environment-visitor': 7.22.5 + '@babel/helper-function-name': 7.22.5 + '@babel/helper-member-expression-to-functions': 7.22.5 + '@babel/helper-optimise-call-expression': 7.22.5 + '@babel/helper-replace-supers': 7.22.5 + '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 + '@babel/helper-split-export-declaration': 7.22.5 + semver: 6.3.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/helper-create-regexp-features-plugin@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-1VpEFOIbMRaXyDeUwUfmTIxExLwQ+zkW+Bh5zXpApA3oQedBx9v/updixWxnx/bZpKw7u8VxWjb/qWpIcmPq8A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-annotate-as-pure': 7.22.5 + regexpu-core: 5.3.2 + semver: 6.3.0 + dev: true + + /@babel/helper-create-regexp-features-plugin@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-1VpEFOIbMRaXyDeUwUfmTIxExLwQ+zkW+Bh5zXpApA3oQedBx9v/updixWxnx/bZpKw7u8VxWjb/qWpIcmPq8A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-annotate-as-pure': 7.22.5 + regexpu-core: 5.3.2 + semver: 6.3.0 + dev: true + + /@babel/helper-define-polyfill-provider@0.3.3(@babel/core@7.21.8): + resolution: {integrity: sha512-z5aQKU4IzbqCC1XH0nAqfsFLMVSo22SBKUc0BxGrLkolTdPTructy0ToNnlO2zA4j9Q/7pjMZf0DSY+DSTYzww==} + peerDependencies: + '@babel/core': ^7.4.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-compilation-targets': 7.22.5(@babel/core@7.21.8) + '@babel/helper-plugin-utils': 7.22.5 + debug: 4.3.4 + lodash.debounce: 4.0.8 + resolve: 1.22.1 + semver: 6.3.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/helper-define-polyfill-provider@0.4.0(@babel/core@7.22.5): + resolution: {integrity: sha512-RnanLx5ETe6aybRi1cO/edaRH+bNYWaryCEmjDDYyNr4wnSzyOp8T0dWipmqVHKEY3AbVKUom50AKSlj1zmKbg==} + peerDependencies: + '@babel/core': ^7.4.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-compilation-targets': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + debug: 4.3.4 + lodash.debounce: 4.0.8 + resolve: 1.22.1 + semver: 6.3.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/helper-environment-visitor@7.22.5: + resolution: {integrity: sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q==} + engines: {node: '>=6.9.0'} + + /@babel/helper-function-name@7.22.5: + resolution: {integrity: sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/template': 7.22.5 + '@babel/types': 7.22.5 + + /@babel/helper-hoist-variables@7.22.5: + resolution: {integrity: sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.22.5 + + /@babel/helper-member-expression-to-functions@7.22.5: + resolution: {integrity: sha512-aBiH1NKMG0H2cGZqspNvsaBe6wNGjbJjuLy29aU+eDZjSbbN53BaxlpB02xm9v34pLTZ1nIQPFYn2qMZoa5BQQ==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.22.5 + dev: true + + /@babel/helper-module-imports@7.22.5: + resolution: {integrity: sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.22.5 + + /@babel/helper-module-transforms@7.22.5: + resolution: {integrity: sha512-+hGKDt/Ze8GFExiVHno/2dvG5IdstpzCq0y4Qc9OJ25D4q3pKfiIP/4Vp3/JvhDkLKsDK2api3q3fpIgiIF5bw==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/helper-environment-visitor': 7.22.5 + '@babel/helper-module-imports': 7.22.5 + '@babel/helper-simple-access': 7.22.5 + '@babel/helper-split-export-declaration': 7.22.5 + '@babel/helper-validator-identifier': 7.22.5 + '@babel/template': 7.22.5 + '@babel/traverse': 7.22.5 + '@babel/types': 7.22.5 + transitivePeerDependencies: + - supports-color + + /@babel/helper-optimise-call-expression@7.22.5: + resolution: {integrity: sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.22.5 + dev: true + + /@babel/helper-plugin-utils@7.22.5: + resolution: {integrity: sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==} + engines: {node: '>=6.9.0'} + dev: true + + /@babel/helper-remap-async-to-generator@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-cU0Sq1Rf4Z55fgz7haOakIyM7+x/uCFwXpLPaeRzfoUtAEAuUZjZvFPjL/rk5rW693dIgn2hng1W7xbT7lWT4g==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-annotate-as-pure': 7.22.5 + '@babel/helper-environment-visitor': 7.22.5 + '@babel/helper-wrap-function': 7.22.5 + '@babel/types': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/helper-remap-async-to-generator@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-cU0Sq1Rf4Z55fgz7haOakIyM7+x/uCFwXpLPaeRzfoUtAEAuUZjZvFPjL/rk5rW693dIgn2hng1W7xbT7lWT4g==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-annotate-as-pure': 7.22.5 + '@babel/helper-environment-visitor': 7.22.5 + '@babel/helper-wrap-function': 7.22.5 + '@babel/types': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/helper-replace-supers@7.22.5: + resolution: {integrity: sha512-aLdNM5I3kdI/V9xGNyKSF3X/gTyMUBohTZ+/3QdQKAA9vxIiy12E+8E2HoOP1/DjeqU+g6as35QHJNMDDYpuCg==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/helper-environment-visitor': 7.22.5 + '@babel/helper-member-expression-to-functions': 7.22.5 + '@babel/helper-optimise-call-expression': 7.22.5 + '@babel/template': 7.22.5 + '@babel/traverse': 7.22.5 + '@babel/types': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/helper-simple-access@7.22.5: + resolution: {integrity: sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.22.5 + + /@babel/helper-skip-transparent-expression-wrappers@7.22.5: + resolution: {integrity: sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.22.5 + dev: true + + /@babel/helper-split-export-declaration@7.22.5: + resolution: {integrity: sha512-thqK5QFghPKWLhAV321lxF95yCg2K3Ob5yw+M3VHWfdia0IkPXUtoLH8x/6Fh486QUvzhb8YOWHChTVen2/PoQ==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.22.5 + + /@babel/helper-string-parser@7.22.5: + resolution: {integrity: sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==} + engines: {node: '>=6.9.0'} + + /@babel/helper-validator-identifier@7.16.7: + resolution: {integrity: sha512-hsEnFemeiW4D08A5gUAZxLBTXpZ39P+a+DGDsHw1yxqyQ/jzFEnxf5uTEGp+3bzAbNOxU1paTgYS4ECU/IgfDw==} + engines: {node: '>=6.9.0'} + + /@babel/helper-validator-identifier@7.22.5: + resolution: {integrity: sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==} + engines: {node: '>=6.9.0'} + + /@babel/helper-validator-option@7.22.5: + resolution: {integrity: sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw==} + engines: {node: '>=6.9.0'} + + /@babel/helper-wrap-function@7.22.5: + resolution: {integrity: sha512-bYqLIBSEshYcYQyfks8ewYA8S30yaGSeRslcvKMvoUk6HHPySbxHq9YRi6ghhzEU+yhQv9bP/jXnygkStOcqZw==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/helper-function-name': 7.22.5 + '@babel/template': 7.22.5 + '@babel/traverse': 7.22.5 + '@babel/types': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/helpers@7.22.5: + resolution: {integrity: sha512-pSXRmfE1vzcUIDFQcSGA5Mr+GxBV9oiRKDuDxXvWQQBCh8HoIjs/2DlDB7H8smac1IVrB9/xdXj2N3Wol9Cr+Q==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/template': 7.22.5 + '@babel/traverse': 7.22.5 + '@babel/types': 7.22.5 + transitivePeerDependencies: + - supports-color + + /@babel/highlight@7.16.10: + resolution: {integrity: sha512-5FnTQLSLswEj6IkgVw5KusNUUFY9ZGqe/TRFnP/BKYHYgfh7tc+C7mwiy95/yNP7Dh9x580Vv8r7u7ZfTBFxdw==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/helper-validator-identifier': 7.16.7 + chalk: 2.4.2 + js-tokens: 4.0.0 + + /@babel/highlight@7.22.5: + resolution: {integrity: sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/helper-validator-identifier': 7.22.5 + chalk: 2.4.2 + js-tokens: 4.0.0 + + /@babel/parser@7.21.9: + resolution: {integrity: sha512-q5PNg/Bi1OpGgx5jYlvWZwAorZepEudDMCLtj967aeS7WMont7dUZI46M2XwcIQqvUlMxWfdLFu4S/qSxeUu5g==} + engines: {node: '>=6.0.0'} + hasBin: true + dependencies: + '@babel/types': 7.22.5 + dev: true + + /@babel/parser@7.22.5: + resolution: {integrity: sha512-DFZMC9LJUG9PLOclRC32G63UXwzqS2koQC8dkx+PLdmt1xSePYpbT/NbsrJy8Q/muXz7o/h/d4A7Fuyixm559Q==} + engines: {node: '>=6.0.0'} + hasBin: true + dependencies: + '@babel/types': 7.22.5 + + /@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-NP1M5Rf+u2Gw9qfSO4ihjcTGW5zXTi36ITLd4/EoAcEhIZ0yjMqmftDNl3QC19CX7olhrjpyU454g/2W7X0jvQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-NP1M5Rf+u2Gw9qfSO4ihjcTGW5zXTi36ITLd4/EoAcEhIZ0yjMqmftDNl3QC19CX7olhrjpyU454g/2W7X0jvQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-31Bb65aZaUwqCbWMnZPduIZxCBngHFlzyN6Dq6KAJjtx+lx6ohKHubc61OomYi7XwVD4Ol0XCVz4h+pYFR048g==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.13.0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 + '@babel/plugin-transform-optional-chaining': 7.22.5(@babel/core@7.21.8) + dev: true + + /@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-31Bb65aZaUwqCbWMnZPduIZxCBngHFlzyN6Dq6KAJjtx+lx6ohKHubc61OomYi7XwVD4Ol0XCVz4h+pYFR048g==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.13.0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 + '@babel/plugin-transform-optional-chaining': 7.22.5(@babel/core@7.22.5) + dev: true + + /@babel/plugin-proposal-async-generator-functions@7.20.7(@babel/core@7.21.8): + resolution: {integrity: sha512-xMbiLsn/8RK7Wq7VeVytytS2L6qE69bXPB10YCmMdDZbKF4okCqY74pI/jJQ/8U0b/F6NrT2+14b8/P9/3AMGA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-environment-visitor': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-remap-async-to-generator': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.21.8) + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-proposal-class-properties@7.18.6(@babel/core@7.21.8): + resolution: {integrity: sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-create-class-features-plugin': 7.22.5(@babel/core@7.21.8) + '@babel/helper-plugin-utils': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-proposal-class-properties@7.18.6(@babel/core@7.22.5): + resolution: {integrity: sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-create-class-features-plugin': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-proposal-class-static-block@7.21.0(@babel/core@7.21.8): + resolution: {integrity: sha512-XP5G9MWNUskFuP30IfFSEFB0Z6HzLIUcjYM4bYOPHXl7eiJ9HFv8tWj6TXTN5QODiEhDZAeI4hLok2iHFFV4hw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.12.0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-create-class-features-plugin': 7.22.5(@babel/core@7.21.8) + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-class-static-block': 7.14.5(@babel/core@7.21.8) + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-proposal-dynamic-import@7.18.6(@babel/core@7.21.8): + resolution: {integrity: sha512-1auuwmK+Rz13SJj36R+jqFPMJWyKEDd7lLSdOj4oJK0UTgGueSAtkrCvz9ewmgyU/P941Rv2fQwZJN8s6QruXw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-dynamic-import': 7.8.3(@babel/core@7.21.8) + dev: true + + /@babel/plugin-proposal-export-namespace-from@7.18.9(@babel/core@7.21.8): + resolution: {integrity: sha512-k1NtHyOMvlDDFeb9G5PhUXuGj8m/wiwojgQVEhJ/fsVsMCpLyOP4h0uGEjYJKrRI+EVPlb5Jk+Gt9P97lOGwtA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-export-namespace-from': 7.8.3(@babel/core@7.21.8) + dev: true + + /@babel/plugin-proposal-json-strings@7.18.6(@babel/core@7.21.8): + resolution: {integrity: sha512-lr1peyn9kOdbYc0xr0OdHTZ5FMqS6Di+H0Fz2I/JwMzGmzJETNeOFq2pBySw6X/KFL5EWDjlJuMsUGRFb8fQgQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.21.8) + dev: true + + /@babel/plugin-proposal-logical-assignment-operators@7.20.7(@babel/core@7.21.8): + resolution: {integrity: sha512-y7C7cZgpMIjWlKE5T7eJwp+tnRYM89HmRvWM5EQuB5BoHEONjmQ8lSNmBUwOyy/GFRsohJED51YBF79hE1djug==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.21.8) + dev: true + + /@babel/plugin-proposal-nullish-coalescing-operator@7.18.6(@babel/core@7.21.8): + resolution: {integrity: sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.21.8) + dev: true + + /@babel/plugin-proposal-nullish-coalescing-operator@7.18.6(@babel/core@7.22.5): + resolution: {integrity: sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.22.5) + dev: true + + /@babel/plugin-proposal-numeric-separator@7.18.6(@babel/core@7.21.8): + resolution: {integrity: sha512-ozlZFogPqoLm8WBr5Z8UckIoE4YQ5KESVcNudyXOR8uqIkliTEgJ3RoketfG6pmzLdeZF0H/wjE9/cCEitBl7Q==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.21.8) + dev: true + + /@babel/plugin-proposal-object-rest-spread@7.20.7(@babel/core@7.21.8): + resolution: {integrity: sha512-d2S98yCiLxDVmBmE8UjGcfPvNEUbA1U5q5WxaWFUGRzJSVAZqm5W6MbPct0jxnegUZ0niLeNX+IOzEs7wYg9Dg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/compat-data': 7.22.5 + '@babel/core': 7.21.8 + '@babel/helper-compilation-targets': 7.22.5(@babel/core@7.21.8) + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.21.8) + '@babel/plugin-transform-parameters': 7.22.5(@babel/core@7.21.8) + dev: true + + /@babel/plugin-proposal-optional-catch-binding@7.18.6(@babel/core@7.21.8): + resolution: {integrity: sha512-Q40HEhs9DJQyaZfUjjn6vE8Cv4GmMHCYuMGIWUnlxH6400VGxOuwWsPt4FxXxJkC/5eOzgn0z21M9gMT4MOhbw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.21.8) + dev: true + + /@babel/plugin-proposal-optional-chaining@7.21.0(@babel/core@7.21.8): + resolution: {integrity: sha512-p4zeefM72gpmEe2fkUr/OnOXpWEf8nAgk7ZYVqqfFiyIG7oFfVZcCrU64hWn5xp4tQ9LkV4bTIa5rD0KANpKNA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 + '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.21.8) + dev: true + + /@babel/plugin-proposal-optional-chaining@7.21.0(@babel/core@7.22.5): + resolution: {integrity: sha512-p4zeefM72gpmEe2fkUr/OnOXpWEf8nAgk7ZYVqqfFiyIG7oFfVZcCrU64hWn5xp4tQ9LkV4bTIa5rD0KANpKNA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 + '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.22.5) + dev: true + + /@babel/plugin-proposal-private-methods@7.18.6(@babel/core@7.21.8): + resolution: {integrity: sha512-nutsvktDItsNn4rpGItSNV2sz1XwS+nfU0Rg8aCx3W3NOKVzdMjJRu0O5OkgDp3ZGICSTbgRpxZoWsxoKRvbeA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-create-class-features-plugin': 7.22.5(@babel/core@7.21.8) + '@babel/helper-plugin-utils': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2(@babel/core@7.22.5): + resolution: {integrity: sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + dev: true + + /@babel/plugin-proposal-private-property-in-object@7.21.11(@babel/core@7.21.8): + resolution: {integrity: sha512-0QZ8qP/3RLDVBwBFoWAwCtgcDZJVwA5LUJRZU8x2YFfKNuFq161wK3cuGrALu5yiPu+vzwTAg/sMWVNeWeNyaw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-annotate-as-pure': 7.22.5 + '@babel/helper-create-class-features-plugin': 7.22.5(@babel/core@7.21.8) + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-private-property-in-object': 7.14.5(@babel/core@7.21.8) + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-proposal-unicode-property-regex@7.18.6(@babel/core@7.21.8): + resolution: {integrity: sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==} + engines: {node: '>=4'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-create-regexp-features-plugin': 7.22.5(@babel/core@7.21.8) + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-proposal-unicode-property-regex@7.18.6(@babel/core@7.22.5): + resolution: {integrity: sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==} + engines: {node: '>=4'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-create-regexp-features-plugin': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-async-generators@7.8.4(@babel/core@7.21.8): + resolution: {integrity: sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-async-generators@7.8.4(@babel/core@7.22.5): + resolution: {integrity: sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.21.8): + resolution: {integrity: sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.22.5): + resolution: {integrity: sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-class-static-block@7.14.5(@babel/core@7.21.8): + resolution: {integrity: sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-class-static-block@7.14.5(@babel/core@7.22.5): + resolution: {integrity: sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-dynamic-import@7.8.3(@babel/core@7.21.8): + resolution: {integrity: sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-dynamic-import@7.8.3(@babel/core@7.22.5): + resolution: {integrity: sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-export-namespace-from@7.8.3(@babel/core@7.21.8): + resolution: {integrity: sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-export-namespace-from@7.8.3(@babel/core@7.22.5): + resolution: {integrity: sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-flow@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-9RdCl0i+q0QExayk2nOS7853w08yLucnnPML6EN9S8fgMPVtdLDCdx/cOQ/i44Lb9UeQX9A35yaqBBOMMZxPxQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-import-assertions@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-rdV97N7KqsRzeNGoWUOK6yUsWarLjE5Su/Snk9IYPU9CwkWHs4t+rTGOvffTR8XGkJMTAdLfO0xVnXm8wugIJg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-import-assertions@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-rdV97N7KqsRzeNGoWUOK6yUsWarLjE5Su/Snk9IYPU9CwkWHs4t+rTGOvffTR8XGkJMTAdLfO0xVnXm8wugIJg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-import-attributes@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-KwvoWDeNKPETmozyFE0P2rOLqh39EoQHNjqizrI5B8Vt0ZNS7M56s7dAiAqbYfiAYOuIzIh96z3iR2ktgu3tEg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-import-meta@7.10.4(@babel/core@7.21.8): + resolution: {integrity: sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-import-meta@7.10.4(@babel/core@7.22.5): + resolution: {integrity: sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-json-strings@7.8.3(@babel/core@7.21.8): + resolution: {integrity: sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-json-strings@7.8.3(@babel/core@7.22.5): + resolution: {integrity: sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-jsx@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-logical-assignment-operators@7.10.4(@babel/core@7.21.8): + resolution: {integrity: sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-logical-assignment-operators@7.10.4(@babel/core@7.22.5): + resolution: {integrity: sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-nullish-coalescing-operator@7.8.3(@babel/core@7.21.8): + resolution: {integrity: sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-nullish-coalescing-operator@7.8.3(@babel/core@7.22.5): + resolution: {integrity: sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-numeric-separator@7.10.4(@babel/core@7.21.8): + resolution: {integrity: sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-numeric-separator@7.10.4(@babel/core@7.22.5): + resolution: {integrity: sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.21.8): + resolution: {integrity: sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.22.5): + resolution: {integrity: sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-optional-catch-binding@7.8.3(@babel/core@7.21.8): + resolution: {integrity: sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-optional-catch-binding@7.8.3(@babel/core@7.22.5): + resolution: {integrity: sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-optional-chaining@7.8.3(@babel/core@7.21.8): + resolution: {integrity: sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-optional-chaining@7.8.3(@babel/core@7.22.5): + resolution: {integrity: sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-private-property-in-object@7.14.5(@babel/core@7.21.8): + resolution: {integrity: sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-private-property-in-object@7.14.5(@babel/core@7.22.5): + resolution: {integrity: sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-top-level-await@7.14.5(@babel/core@7.21.8): + resolution: {integrity: sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-top-level-await@7.14.5(@babel/core@7.22.5): + resolution: {integrity: sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-typescript@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-syntax-unicode-sets-regex@7.18.6(@babel/core@7.22.5): + resolution: {integrity: sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-create-regexp-features-plugin': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-arrow-functions@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-arrow-functions@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-async-generator-functions@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-gGOEvFzm3fWoyD5uZq7vVTD57pPJ3PczPUD/xCFGjzBpUosnklmXyKnGQbbbGs1NPNPskFex0j93yKbHt0cHyg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-environment-visitor': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-remap-async-to-generator': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.22.5) + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-async-to-generator@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-b1A8D8ZzE/VhNDoV1MSJTnpKkCG5bJo+19R4o4oy03zM7ws8yEMK755j61Dc3EyvdysbqH5BOOTquJ7ZX9C6vQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-module-imports': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-remap-async-to-generator': 7.22.5(@babel/core@7.21.8) + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-async-to-generator@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-b1A8D8ZzE/VhNDoV1MSJTnpKkCG5bJo+19R4o4oy03zM7ws8yEMK755j61Dc3EyvdysbqH5BOOTquJ7ZX9C6vQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-module-imports': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-remap-async-to-generator': 7.22.5(@babel/core@7.22.5) + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-block-scoped-functions@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-block-scoped-functions@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-block-scoping@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-EcACl1i5fSQ6bt+YGuU/XGCeZKStLmyVGytWkpyhCLeQVA0eu6Wtiw92V+I1T/hnezUv7j74dA/Ro69gWcU+hg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-block-scoping@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-EcACl1i5fSQ6bt+YGuU/XGCeZKStLmyVGytWkpyhCLeQVA0eu6Wtiw92V+I1T/hnezUv7j74dA/Ro69gWcU+hg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-class-properties@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-nDkQ0NfkOhPTq8YCLiWNxp1+f9fCobEjCb0n8WdbNUBc4IB5V7P1QnX9IjpSoquKrXF5SKojHleVNs2vGeHCHQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-create-class-features-plugin': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-class-static-block@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-SPToJ5eYZLxlnp1UzdARpOGeC2GbHvr9d/UV0EukuVx8atktg194oe+C5BqQ8jRTkgLRVOPYeXRSBg1IlMoVRA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.12.0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-create-class-features-plugin': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-class-static-block': 7.14.5(@babel/core@7.22.5) + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-classes@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-2edQhLfibpWpsVBx2n/GKOz6JdGQvLruZQfGr9l1qes2KQaWswjBzhQF7UDUZMNaMMQeYnQzxwOMPsbYF7wqPQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-annotate-as-pure': 7.22.5 + '@babel/helper-compilation-targets': 7.22.5(@babel/core@7.21.8) + '@babel/helper-environment-visitor': 7.22.5 + '@babel/helper-function-name': 7.22.5 + '@babel/helper-optimise-call-expression': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-replace-supers': 7.22.5 + '@babel/helper-split-export-declaration': 7.22.5 + globals: 11.12.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-classes@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-2edQhLfibpWpsVBx2n/GKOz6JdGQvLruZQfGr9l1qes2KQaWswjBzhQF7UDUZMNaMMQeYnQzxwOMPsbYF7wqPQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-annotate-as-pure': 7.22.5 + '@babel/helper-compilation-targets': 7.22.5(@babel/core@7.22.5) + '@babel/helper-environment-visitor': 7.22.5 + '@babel/helper-function-name': 7.22.5 + '@babel/helper-optimise-call-expression': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-replace-supers': 7.22.5 + '@babel/helper-split-export-declaration': 7.22.5 + globals: 11.12.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-computed-properties@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/template': 7.22.5 + dev: true + + /@babel/plugin-transform-computed-properties@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/template': 7.22.5 + dev: true + + /@babel/plugin-transform-destructuring@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-GfqcFuGW8vnEqTUBM7UtPd5A4q797LTvvwKxXTgRsFjoqaJiEg9deBG6kWeQYkVEL569NpnmpC0Pkr/8BLKGnQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-destructuring@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-GfqcFuGW8vnEqTUBM7UtPd5A4q797LTvvwKxXTgRsFjoqaJiEg9deBG6kWeQYkVEL569NpnmpC0Pkr/8BLKGnQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-dotall-regex@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-5/Yk9QxCQCl+sOIB1WelKnVRxTJDSAIxtJLL2/pqL14ZVlbH0fUQUZa/T5/UnQtBNgghR7mfB8ERBKyKPCi7Vw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-create-regexp-features-plugin': 7.22.5(@babel/core@7.21.8) + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-dotall-regex@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-5/Yk9QxCQCl+sOIB1WelKnVRxTJDSAIxtJLL2/pqL14ZVlbH0fUQUZa/T5/UnQtBNgghR7mfB8ERBKyKPCi7Vw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-create-regexp-features-plugin': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-duplicate-keys@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-dEnYD+9BBgld5VBXHnF/DbYGp3fqGMsyxKbtD1mDyIA7AkTSpKXFhCVuj/oQVOoALfBs77DudA0BE4d5mcpmqw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-duplicate-keys@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-dEnYD+9BBgld5VBXHnF/DbYGp3fqGMsyxKbtD1mDyIA7AkTSpKXFhCVuj/oQVOoALfBs77DudA0BE4d5mcpmqw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-dynamic-import@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-0MC3ppTB1AMxd8fXjSrbPa7LT9hrImt+/fcj+Pg5YMD7UQyWp/02+JWpdnCymmsXwIx5Z+sYn1bwCn4ZJNvhqQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-dynamic-import': 7.8.3(@babel/core@7.22.5) + dev: true + + /@babel/plugin-transform-exponentiation-operator@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-vIpJFNM/FjZ4rh1myqIya9jXwrwwgFRHPjT3DkUA9ZLHuzox8jiXkOLvwm1H+PQIP3CqfC++WPKeuDi0Sjdj1g==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-builder-binary-assignment-operator-visitor': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-exponentiation-operator@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-vIpJFNM/FjZ4rh1myqIya9jXwrwwgFRHPjT3DkUA9ZLHuzox8jiXkOLvwm1H+PQIP3CqfC++WPKeuDi0Sjdj1g==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-builder-binary-assignment-operator-visitor': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-export-namespace-from@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-X4hhm7FRnPgd4nDA4b/5V280xCx6oL7Oob5+9qVS5C13Zq4bh1qq7LU0GgRU6b5dBWBvhGaXYVB4AcN6+ol6vg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-export-namespace-from': 7.8.3(@babel/core@7.22.5) + dev: true + + /@babel/plugin-transform-flow-strip-types@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-tujNbZdxdG0/54g/oua8ISToaXTFBf8EnSb5PgQSciIXWOWKX3S4+JR7ZE9ol8FZwf9kxitzkGQ+QWeov/mCiA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-flow': 7.22.5(@babel/core@7.22.5) + dev: true + + /@babel/plugin-transform-for-of@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-3kxQjX1dU9uudwSshyLeEipvrLjBCVthCgeTp6CzE/9JYrlAIaeekVxRpCWsDDfYTfRZRoCeZatCQvwo+wvK8A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-for-of@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-3kxQjX1dU9uudwSshyLeEipvrLjBCVthCgeTp6CzE/9JYrlAIaeekVxRpCWsDDfYTfRZRoCeZatCQvwo+wvK8A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-function-name@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-compilation-targets': 7.22.5(@babel/core@7.21.8) + '@babel/helper-function-name': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-function-name@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-compilation-targets': 7.22.5(@babel/core@7.22.5) + '@babel/helper-function-name': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-json-strings@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-DuCRB7fu8MyTLbEQd1ew3R85nx/88yMoqo2uPSjevMj3yoN7CDM8jkgrY0wmVxfJZyJ/B9fE1iq7EQppWQmR5A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.22.5) + dev: true + + /@babel/plugin-transform-literals@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-literals@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-logical-assignment-operators@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-MQQOUW1KL8X0cDWfbwYP+TbVbZm16QmQXJQ+vndPtH/BoO0lOKpVoEDMI7+PskYxH+IiE0tS8xZye0qr1lGzSA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.22.5) + dev: true + + /@babel/plugin-transform-member-expression-literals@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-member-expression-literals@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-modules-amd@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-R+PTfLTcYEmb1+kK7FNkhQ1gP4KgjpSO6HfH9+f8/yfp2Nt3ggBjiVpRwmwTlfqZLafYKJACy36yDXlEmI9HjQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-module-transforms': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-modules-amd@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-R+PTfLTcYEmb1+kK7FNkhQ1gP4KgjpSO6HfH9+f8/yfp2Nt3ggBjiVpRwmwTlfqZLafYKJACy36yDXlEmI9HjQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-module-transforms': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-modules-commonjs@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-B4pzOXj+ONRmuaQTg05b3y/4DuFz3WcCNAXPLb2Q0GT0TrGKGxNKV4jwsXts+StaM0LQczZbOpj8o1DLPDJIiA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-module-transforms': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-simple-access': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-modules-commonjs@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-B4pzOXj+ONRmuaQTg05b3y/4DuFz3WcCNAXPLb2Q0GT0TrGKGxNKV4jwsXts+StaM0LQczZbOpj8o1DLPDJIiA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-module-transforms': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-simple-access': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-modules-systemjs@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-emtEpoaTMsOs6Tzz+nbmcePl6AKVtS1yC4YNAeMun9U8YCsgadPNxnOPQ8GhHFB2qdx+LZu9LgoC0Lthuu05DQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-hoist-variables': 7.22.5 + '@babel/helper-module-transforms': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-validator-identifier': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-modules-systemjs@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-emtEpoaTMsOs6Tzz+nbmcePl6AKVtS1yC4YNAeMun9U8YCsgadPNxnOPQ8GhHFB2qdx+LZu9LgoC0Lthuu05DQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-hoist-variables': 7.22.5 + '@babel/helper-module-transforms': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-validator-identifier': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-modules-umd@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-+S6kzefN/E1vkSsKx8kmQuqeQsvCKCd1fraCM7zXm4SFoggI099Tr4G8U81+5gtMdUeMQ4ipdQffbKLX0/7dBQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-module-transforms': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-modules-umd@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-+S6kzefN/E1vkSsKx8kmQuqeQsvCKCd1fraCM7zXm4SFoggI099Tr4G8U81+5gtMdUeMQ4ipdQffbKLX0/7dBQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-module-transforms': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-named-capturing-groups-regex@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-create-regexp-features-plugin': 7.22.5(@babel/core@7.21.8) + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-named-capturing-groups-regex@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-create-regexp-features-plugin': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-new-target@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-AsF7K0Fx/cNKVyk3a+DW0JLo+Ua598/NxMRvxDnkpCIGFh43+h/v2xyhRUYf6oD8gE4QtL83C7zZVghMjHd+iw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-new-target@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-AsF7K0Fx/cNKVyk3a+DW0JLo+Ua598/NxMRvxDnkpCIGFh43+h/v2xyhRUYf6oD8gE4QtL83C7zZVghMjHd+iw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-nullish-coalescing-operator@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-6CF8g6z1dNYZ/VXok5uYkkBBICHZPiGEl7oDnAx2Mt1hlHVHOSIKWJaXHjQJA5VB43KZnXZDIexMchY4y2PGdA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.22.5) + dev: true + + /@babel/plugin-transform-numeric-separator@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-NbslED1/6M+sXiwwtcAB/nieypGw02Ejf4KtDeMkCEpP6gWFMX1wI9WKYua+4oBneCCEmulOkRpwywypVZzs/g==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.22.5) + dev: true + + /@babel/plugin-transform-object-rest-spread@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-Kk3lyDmEslH9DnvCDA1s1kkd3YWQITiBOHngOtDL9Pt6BZjzqb6hiOlb8VfjiiQJ2unmegBqZu0rx5RxJb5vmQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/compat-data': 7.22.5 + '@babel/core': 7.22.5 + '@babel/helper-compilation-targets': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.22.5) + '@babel/plugin-transform-parameters': 7.22.5(@babel/core@7.22.5) + dev: true + + /@babel/plugin-transform-object-super@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-replace-supers': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-object-super@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-replace-supers': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-optional-catch-binding@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-pH8orJahy+hzZje5b8e2QIlBWQvGpelS76C63Z+jhZKsmzfNaPQ+LaW6dcJ9bxTpo1mtXbgHwy765Ro3jftmUg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.22.5) + dev: true + + /@babel/plugin-transform-optional-chaining@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-AconbMKOMkyG+xCng2JogMCDcqW8wedQAqpVIL4cOSescZ7+iW8utC6YDZLMCSUIReEA733gzRSaOSXMAt/4WQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 + '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.21.8) + dev: true + + /@babel/plugin-transform-optional-chaining@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-AconbMKOMkyG+xCng2JogMCDcqW8wedQAqpVIL4cOSescZ7+iW8utC6YDZLMCSUIReEA733gzRSaOSXMAt/4WQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 + '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.22.5) + dev: true + + /@babel/plugin-transform-parameters@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-AVkFUBurORBREOmHRKo06FjHYgjrabpdqRSwq6+C7R5iTCZOsM4QbcB27St0a4U6fffyAOqh3s/qEfybAhfivg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-parameters@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-AVkFUBurORBREOmHRKo06FjHYgjrabpdqRSwq6+C7R5iTCZOsM4QbcB27St0a4U6fffyAOqh3s/qEfybAhfivg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-private-methods@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-PPjh4gyrQnGe97JTalgRGMuU4icsZFnWkzicB/fUtzlKUqvsWBKEpPPfr5a2JiyirZkHxnAqkQMO5Z5B2kK3fA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-create-class-features-plugin': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-private-property-in-object@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-/9xnaTTJcVoBtSSmrVyhtSvO3kbqS2ODoh2juEU72c3aYonNF0OMGiaz2gjukyKM2wBBYJP38S4JiE0Wfb5VMQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-annotate-as-pure': 7.22.5 + '@babel/helper-create-class-features-plugin': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-private-property-in-object': 7.14.5(@babel/core@7.22.5) + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-property-literals@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-property-literals@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-react-jsx@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-rog5gZaVbUip5iWDMTYbVM15XQq+RkUKhET/IHR6oizR+JEoN6CAfTTuHcK4vwUyzca30qqHqEpzBOnaRMWYMA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-annotate-as-pure': 7.22.5 + '@babel/helper-module-imports': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-jsx': 7.22.5(@babel/core@7.22.5) + '@babel/types': 7.22.5 + dev: true + + /@babel/plugin-transform-regenerator@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-rR7KePOE7gfEtNTh9Qw+iO3Q/e4DEsoQ+hdvM6QUDH7JRJ5qxq5AA52ZzBWbI5i9lfNuvySgOGP8ZN7LAmaiPw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + regenerator-transform: 0.15.1 + dev: true + + /@babel/plugin-transform-regenerator@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-rR7KePOE7gfEtNTh9Qw+iO3Q/e4DEsoQ+hdvM6QUDH7JRJ5qxq5AA52ZzBWbI5i9lfNuvySgOGP8ZN7LAmaiPw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + regenerator-transform: 0.15.1 + dev: true + + /@babel/plugin-transform-reserved-words@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-DTtGKFRQUDm8svigJzZHzb/2xatPc6TzNvAIJ5GqOKDsGFYgAskjRulbR/vGsPKq3OPqtexnz327qYpP57RFyA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-reserved-words@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-DTtGKFRQUDm8svigJzZHzb/2xatPc6TzNvAIJ5GqOKDsGFYgAskjRulbR/vGsPKq3OPqtexnz327qYpP57RFyA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-shorthand-properties@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-shorthand-properties@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-spread@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 + dev: true + + /@babel/plugin-transform-spread@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 + dev: true + + /@babel/plugin-transform-sticky-regex@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-zf7LuNpHG0iEeiyCNwX4j3gDg1jgt1k3ZdXBKbZSoA3BbGQGvMiSvfbZRR3Dr3aeJe3ooWFZxOOG3IRStYp2Bw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-sticky-regex@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-zf7LuNpHG0iEeiyCNwX4j3gDg1jgt1k3ZdXBKbZSoA3BbGQGvMiSvfbZRR3Dr3aeJe3ooWFZxOOG3IRStYp2Bw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-template-literals@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-template-literals@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-typeof-symbol@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-bYkI5lMzL4kPii4HHEEChkD0rkc+nvnlR6+o/qdqR6zrm0Sv/nodmyLhlq2DO0YKLUNd2VePmPRjJXSBh9OIdA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-typeof-symbol@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-bYkI5lMzL4kPii4HHEEChkD0rkc+nvnlR6+o/qdqR6zrm0Sv/nodmyLhlq2DO0YKLUNd2VePmPRjJXSBh9OIdA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-typescript@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-SMubA9S7Cb5sGSFFUlqxyClTA9zWJ8qGQrppNUm05LtFuN1ELRFNndkix4zUJrC9F+YivWwa1dHMSyo0e0N9dA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-annotate-as-pure': 7.22.5 + '@babel/helper-create-class-features-plugin': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-syntax-typescript': 7.22.5(@babel/core@7.22.5) + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/plugin-transform-unicode-escapes@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-biEmVg1IYB/raUO5wT1tgfacCef15Fbzhkx493D3urBI++6hpJ+RFG4SrWMn0NEZLfvilqKf3QDrRVZHo08FYg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-unicode-escapes@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-biEmVg1IYB/raUO5wT1tgfacCef15Fbzhkx493D3urBI++6hpJ+RFG4SrWMn0NEZLfvilqKf3QDrRVZHo08FYg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-unicode-property-regex@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-HCCIb+CbJIAE6sXn5CjFQXMwkCClcOfPCzTlilJ8cUatfzwHlWQkbtV0zD338u9dZskwvuOYTuuaMaA8J5EI5A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-create-regexp-features-plugin': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-unicode-regex@7.22.5(@babel/core@7.21.8): + resolution: {integrity: sha512-028laaOKptN5vHJf9/Arr/HiJekMd41hOEZYvNsrsXqJ7YPYuX2bQxh31fkZzGmq3YqHRJzYFFAVYvKfMPKqyg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-create-regexp-features-plugin': 7.22.5(@babel/core@7.21.8) + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-unicode-regex@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-028laaOKptN5vHJf9/Arr/HiJekMd41hOEZYvNsrsXqJ7YPYuX2bQxh31fkZzGmq3YqHRJzYFFAVYvKfMPKqyg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-create-regexp-features-plugin': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/plugin-transform-unicode-sets-regex@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-lhMfi4FC15j13eKrh3DnYHjpGj6UKQHtNKTbtc1igvAhRy4+kLhV07OpLcsN0VgDEw/MjAvJO4BdMJsHwMhzCg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-create-regexp-features-plugin': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + dev: true + + /@babel/preset-env@7.21.5(@babel/core@7.21.8): + resolution: {integrity: sha512-wH00QnTTldTbf/IefEVyChtRdw5RJvODT/Vb4Vcxq1AZvtXj6T0YeX0cAcXhI6/BdGuiP3GcNIL4OQbI2DVNxg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/compat-data': 7.22.5 + '@babel/core': 7.21.8 + '@babel/helper-compilation-targets': 7.22.5(@babel/core@7.21.8) + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-validator-option': 7.22.5 + '@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-proposal-async-generator-functions': 7.20.7(@babel/core@7.21.8) + '@babel/plugin-proposal-class-properties': 7.18.6(@babel/core@7.21.8) + '@babel/plugin-proposal-class-static-block': 7.21.0(@babel/core@7.21.8) + '@babel/plugin-proposal-dynamic-import': 7.18.6(@babel/core@7.21.8) + '@babel/plugin-proposal-export-namespace-from': 7.18.9(@babel/core@7.21.8) + '@babel/plugin-proposal-json-strings': 7.18.6(@babel/core@7.21.8) + '@babel/plugin-proposal-logical-assignment-operators': 7.20.7(@babel/core@7.21.8) + '@babel/plugin-proposal-nullish-coalescing-operator': 7.18.6(@babel/core@7.21.8) + '@babel/plugin-proposal-numeric-separator': 7.18.6(@babel/core@7.21.8) + '@babel/plugin-proposal-object-rest-spread': 7.20.7(@babel/core@7.21.8) + '@babel/plugin-proposal-optional-catch-binding': 7.18.6(@babel/core@7.21.8) + '@babel/plugin-proposal-optional-chaining': 7.21.0(@babel/core@7.21.8) + '@babel/plugin-proposal-private-methods': 7.18.6(@babel/core@7.21.8) + '@babel/plugin-proposal-private-property-in-object': 7.21.11(@babel/core@7.21.8) + '@babel/plugin-proposal-unicode-property-regex': 7.18.6(@babel/core@7.21.8) + '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.21.8) + '@babel/plugin-syntax-class-properties': 7.12.13(@babel/core@7.21.8) + '@babel/plugin-syntax-class-static-block': 7.14.5(@babel/core@7.21.8) + '@babel/plugin-syntax-dynamic-import': 7.8.3(@babel/core@7.21.8) + '@babel/plugin-syntax-export-namespace-from': 7.8.3(@babel/core@7.21.8) + '@babel/plugin-syntax-import-assertions': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-syntax-import-meta': 7.10.4(@babel/core@7.21.8) + '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.21.8) + '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.21.8) + '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.21.8) + '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.21.8) + '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.21.8) + '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.21.8) + '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.21.8) + '@babel/plugin-syntax-private-property-in-object': 7.14.5(@babel/core@7.21.8) + '@babel/plugin-syntax-top-level-await': 7.14.5(@babel/core@7.21.8) + '@babel/plugin-transform-arrow-functions': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-async-to-generator': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-block-scoped-functions': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-block-scoping': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-classes': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-computed-properties': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-destructuring': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-dotall-regex': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-duplicate-keys': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-exponentiation-operator': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-for-of': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-function-name': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-literals': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-member-expression-literals': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-modules-amd': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-modules-commonjs': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-modules-systemjs': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-modules-umd': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-named-capturing-groups-regex': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-new-target': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-object-super': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-parameters': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-property-literals': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-regenerator': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-reserved-words': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-shorthand-properties': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-spread': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-sticky-regex': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-template-literals': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-typeof-symbol': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-unicode-escapes': 7.22.5(@babel/core@7.21.8) + '@babel/plugin-transform-unicode-regex': 7.22.5(@babel/core@7.21.8) + '@babel/preset-modules': 0.1.5(@babel/core@7.21.8) + '@babel/types': 7.21.5 + babel-plugin-polyfill-corejs2: 0.3.3(@babel/core@7.21.8) + babel-plugin-polyfill-corejs3: 0.6.0(@babel/core@7.21.8) + babel-plugin-polyfill-regenerator: 0.4.1(@babel/core@7.21.8) + core-js-compat: 3.31.0 + semver: 6.3.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/preset-env@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-fj06hw89dpiZzGZtxn+QybifF07nNiZjZ7sazs2aVDcysAZVGjW7+7iFYxg6GLNM47R/thYfLdrXc+2f11Vi9A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/compat-data': 7.22.5 + '@babel/core': 7.22.5 + '@babel/helper-compilation-targets': 7.22.5(@babel/core@7.22.5) + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-validator-option': 7.22.5 + '@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-proposal-private-property-in-object': 7.21.0-placeholder-for-preset-env.2(@babel/core@7.22.5) + '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.22.5) + '@babel/plugin-syntax-class-properties': 7.12.13(@babel/core@7.22.5) + '@babel/plugin-syntax-class-static-block': 7.14.5(@babel/core@7.22.5) + '@babel/plugin-syntax-dynamic-import': 7.8.3(@babel/core@7.22.5) + '@babel/plugin-syntax-export-namespace-from': 7.8.3(@babel/core@7.22.5) + '@babel/plugin-syntax-import-assertions': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-syntax-import-attributes': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-syntax-import-meta': 7.10.4(@babel/core@7.22.5) + '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.22.5) + '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.22.5) + '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.22.5) + '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.22.5) + '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.22.5) + '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.22.5) + '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.22.5) + '@babel/plugin-syntax-private-property-in-object': 7.14.5(@babel/core@7.22.5) + '@babel/plugin-syntax-top-level-await': 7.14.5(@babel/core@7.22.5) + '@babel/plugin-syntax-unicode-sets-regex': 7.18.6(@babel/core@7.22.5) + '@babel/plugin-transform-arrow-functions': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-async-generator-functions': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-async-to-generator': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-block-scoped-functions': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-block-scoping': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-class-properties': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-class-static-block': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-classes': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-computed-properties': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-destructuring': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-dotall-regex': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-duplicate-keys': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-dynamic-import': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-exponentiation-operator': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-export-namespace-from': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-for-of': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-function-name': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-json-strings': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-literals': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-logical-assignment-operators': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-member-expression-literals': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-modules-amd': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-modules-commonjs': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-modules-systemjs': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-modules-umd': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-named-capturing-groups-regex': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-new-target': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-nullish-coalescing-operator': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-numeric-separator': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-object-rest-spread': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-object-super': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-optional-catch-binding': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-optional-chaining': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-parameters': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-private-methods': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-private-property-in-object': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-property-literals': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-regenerator': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-reserved-words': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-shorthand-properties': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-spread': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-sticky-regex': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-template-literals': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-typeof-symbol': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-unicode-escapes': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-unicode-property-regex': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-unicode-regex': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-unicode-sets-regex': 7.22.5(@babel/core@7.22.5) + '@babel/preset-modules': 0.1.5(@babel/core@7.22.5) + '@babel/types': 7.22.5 + babel-plugin-polyfill-corejs2: 0.4.3(@babel/core@7.22.5) + babel-plugin-polyfill-corejs3: 0.8.1(@babel/core@7.22.5) + babel-plugin-polyfill-regenerator: 0.5.0(@babel/core@7.22.5) + core-js-compat: 3.31.0 + semver: 6.3.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/preset-flow@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-ta2qZ+LSiGCrP5pgcGt8xMnnkXQrq8Sa4Ulhy06BOlF5QbLw9q5hIx7bn5MrsvyTGAfh6kTOo07Q+Pfld/8Y5Q==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-validator-option': 7.22.5 + '@babel/plugin-transform-flow-strip-types': 7.22.5(@babel/core@7.22.5) + dev: true + + /@babel/preset-modules@0.1.5(@babel/core@7.21.8): + resolution: {integrity: sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-proposal-unicode-property-regex': 7.18.6(@babel/core@7.21.8) + '@babel/plugin-transform-dotall-regex': 7.22.5(@babel/core@7.21.8) + '@babel/types': 7.22.5 + esutils: 2.0.3 + dev: true + + /@babel/preset-modules@0.1.5(@babel/core@7.22.5): + resolution: {integrity: sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/plugin-proposal-unicode-property-regex': 7.18.6(@babel/core@7.22.5) + '@babel/plugin-transform-dotall-regex': 7.22.5(@babel/core@7.22.5) + '@babel/types': 7.22.5 + esutils: 2.0.3 + dev: true + + /@babel/preset-typescript@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-YbPaal9LxztSGhmndR46FmAbkJ/1fAsw293tSU+I5E5h+cnJ3d4GTwyUgGYmOXJYdGA+uNePle4qbaRzj2NISQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-plugin-utils': 7.22.5 + '@babel/helper-validator-option': 7.22.5 + '@babel/plugin-syntax-jsx': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-modules-commonjs': 7.22.5(@babel/core@7.22.5) + '@babel/plugin-transform-typescript': 7.22.5(@babel/core@7.22.5) + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/register@7.22.5(@babel/core@7.22.5): + resolution: {integrity: sha512-vV6pm/4CijSQ8Y47RH5SopXzursN35RQINfGJkmOlcpAtGuf94miFvIPhCKGQN7WGIcsgG1BHEX2KVdTYwTwUQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + clone-deep: 4.0.1 + find-cache-dir: 2.1.0 + make-dir: 2.1.0 + pirates: 4.0.6 + source-map-support: 0.5.21 + dev: true + + /@babel/regjsgen@0.8.0: + resolution: {integrity: sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==} + dev: true + + /@babel/runtime@7.21.0: + resolution: {integrity: sha512-xwII0//EObnq89Ji5AKYQaRYiW/nZ3llSv29d49IuxPhKbtJoLP+9QUUZ4nVragQVtaVGeZrpB+ZtG/Pdy/POw==} + engines: {node: '>=6.9.0'} + dependencies: + regenerator-runtime: 0.13.11 + + /@babel/runtime@7.22.6: + resolution: {integrity: sha512-wDb5pWm4WDdF6LFUde3Jl8WzPA+3ZbxYqkC6xAXuD3irdEHN1k0NfTRrJD8ZD378SJ61miMLCqIOXYhd8x+AJQ==} + engines: {node: '>=6.9.0'} + dependencies: + regenerator-runtime: 0.13.11 + + /@babel/template@7.22.5: + resolution: {integrity: sha512-X7yV7eiwAxdj9k94NEylvbVHLiVG1nvzCV2EAowhxLTwODV1jl9UzZ48leOC0sH7OnuHrIkllaBgneUykIcZaw==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/code-frame': 7.22.5 + '@babel/parser': 7.22.5 + '@babel/types': 7.22.5 + + /@babel/traverse@7.21.5: + resolution: {integrity: sha512-AhQoI3YjWi6u/y/ntv7k48mcrCXmus0t79J9qPNlk/lAsFlCiJ047RmbfMOawySTHtywXhbXgpx/8nXMYd+oFw==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/code-frame': 7.22.5 + '@babel/generator': 7.21.9 + '@babel/helper-environment-visitor': 7.22.5 + '@babel/helper-function-name': 7.22.5 + '@babel/helper-hoist-variables': 7.22.5 + '@babel/helper-split-export-declaration': 7.22.5 + '@babel/parser': 7.21.9 + '@babel/types': 7.22.5 + debug: 4.3.4 + globals: 11.12.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/traverse@7.22.5: + resolution: {integrity: sha512-7DuIjPgERaNo6r+PZwItpjCZEa5vyw4eJGufeLxrPdBXBoLcCJCIasvK6pK/9DVNrLZTLFhUGqaC6X/PA007TQ==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/code-frame': 7.22.5 + '@babel/generator': 7.22.5 + '@babel/helper-environment-visitor': 7.22.5 + '@babel/helper-function-name': 7.22.5 + '@babel/helper-hoist-variables': 7.22.5 + '@babel/helper-split-export-declaration': 7.22.5 + '@babel/parser': 7.22.5 + '@babel/types': 7.22.5 + debug: 4.3.4 + globals: 11.12.0 + transitivePeerDependencies: + - supports-color + + /@babel/types@7.21.5: + resolution: {integrity: sha512-m4AfNvVF2mVC/F7fDEdH2El3HzUg9It/XsCxZiOTTA3m3qYfcSVSbTfM6Q9xG+hYDniZssYhlXKKUMD5m8tF4Q==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/helper-string-parser': 7.22.5 + '@babel/helper-validator-identifier': 7.22.5 + to-fast-properties: 2.0.0 + dev: true + + /@babel/types@7.22.5: + resolution: {integrity: sha512-zo3MIHGOkPOfoRXitsgHLjEXmlDaD/5KU1Uzuc9GNiZPhSqVxVRtxuPaSBZDsYZ9qV88AjtMtWW7ww98loJ9KA==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/helper-string-parser': 7.22.5 + '@babel/helper-validator-identifier': 7.22.5 + to-fast-properties: 2.0.0 + + /@changesets/apply-release-plan@6.1.3: + resolution: {integrity: sha512-ECDNeoc3nfeAe1jqJb5aFQX7CqzQhD2klXRez2JDb/aVpGUbX673HgKrnrgJRuQR/9f2TtLoYIzrGB9qwD77mg==} + dependencies: + '@babel/runtime': 7.22.6 + '@changesets/config': 2.3.0 + '@changesets/get-version-range-type': 0.3.2 + '@changesets/git': 2.0.0 + '@changesets/types': 5.2.1 + '@manypkg/get-packages': 1.1.3 + detect-indent: 6.1.0 + fs-extra: 7.0.1 + lodash.startcase: 4.4.0 + outdent: 0.5.0 + prettier: 2.8.1 + resolve-from: 5.0.0 + semver: 5.7.1 + dev: false + + /@changesets/assemble-release-plan@5.2.3: + resolution: {integrity: sha512-g7EVZCmnWz3zMBAdrcKhid4hkHT+Ft1n0mLussFMcB1dE2zCuwcvGoy9ec3yOgPGF4hoMtgHaMIk3T3TBdvU9g==} + dependencies: + '@babel/runtime': 7.22.6 + '@changesets/errors': 0.1.4 + '@changesets/get-dependents-graph': 1.3.5 + '@changesets/types': 5.2.1 + '@manypkg/get-packages': 1.1.3 + semver: 5.7.1 + dev: false + + /@changesets/changelog-git@0.1.14: + resolution: {integrity: sha512-+vRfnKtXVWsDDxGctOfzJsPhaCdXRYoe+KyWYoq5X/GqoISREiat0l3L8B0a453B2B4dfHGcZaGyowHbp9BSaA==} + dependencies: + '@changesets/types': 5.2.1 + dev: false + + /@changesets/changelog-github@0.4.8: + resolution: {integrity: sha512-jR1DHibkMAb5v/8ym77E4AMNWZKB5NPzw5a5Wtqm1JepAuIF+hrKp2u04NKM14oBZhHglkCfrla9uq8ORnK/dw==} + dependencies: + '@changesets/get-github-info': 0.5.2 + '@changesets/types': 5.2.1 + dotenv: 8.6.0 + transitivePeerDependencies: + - encoding + dev: false + + /@changesets/cli@2.26.1: + resolution: {integrity: sha512-XnTa+b51vt057fyAudvDKGB0Sh72xutQZNAdXkCqPBKO2zvs2yYZx5hFZj1u9cbtpwM6Sxtcr02/FQJfZOzemQ==} + hasBin: true + dependencies: + '@babel/runtime': 7.21.0 + '@changesets/apply-release-plan': 6.1.3 + '@changesets/assemble-release-plan': 5.2.3 + '@changesets/changelog-git': 0.1.14 + '@changesets/config': 2.3.0 + '@changesets/errors': 0.1.4 + '@changesets/get-dependents-graph': 1.3.5 + '@changesets/get-release-plan': 3.0.16 + '@changesets/git': 2.0.0 + '@changesets/logger': 0.0.5 + '@changesets/pre': 1.0.14 + '@changesets/read': 0.5.9 + '@changesets/types': 5.2.1 + '@changesets/write': 0.2.3 + '@manypkg/get-packages': 1.1.3 + '@types/is-ci': 3.0.0 + '@types/semver': 6.2.3 + ansi-colors: 4.1.3 + chalk: 2.4.2 + enquirer: 2.3.6 + external-editor: 3.1.0 + fs-extra: 7.0.1 + human-id: 1.0.2 + is-ci: 3.0.1 + meow: 6.1.1 + outdent: 0.5.0 + p-limit: 2.3.0 + preferred-pm: 3.0.3 + resolve-from: 5.0.0 + semver: 5.7.1 + spawndamnit: 2.0.0 + term-size: 2.2.1 + tty-table: 4.2.1 + dev: false + + /@changesets/config@2.3.0: + resolution: {integrity: sha512-EgP/px6mhCx8QeaMAvWtRrgyxW08k/Bx2tpGT+M84jEdX37v3VKfh4Cz1BkwrYKuMV2HZKeHOh8sHvja/HcXfQ==} + dependencies: + '@changesets/errors': 0.1.4 + '@changesets/get-dependents-graph': 1.3.5 + '@changesets/logger': 0.0.5 + '@changesets/types': 5.2.1 + '@manypkg/get-packages': 1.1.3 + fs-extra: 7.0.1 + micromatch: 4.0.4 + dev: false + + /@changesets/errors@0.1.4: + resolution: {integrity: sha512-HAcqPF7snsUJ/QzkWoKfRfXushHTu+K5KZLJWPb34s4eCZShIf8BFO3fwq6KU8+G7L5KdtN2BzQAXOSXEyiY9Q==} + dependencies: + extendable-error: 0.1.7 + dev: false + + /@changesets/get-dependents-graph@1.3.5: + resolution: {integrity: sha512-w1eEvnWlbVDIY8mWXqWuYE9oKhvIaBhzqzo4ITSJY9hgoqQ3RoBqwlcAzg11qHxv/b8ReDWnMrpjpKrW6m1ZTA==} + dependencies: + '@changesets/types': 5.2.1 + '@manypkg/get-packages': 1.1.3 + chalk: 2.4.2 + fs-extra: 7.0.1 + semver: 5.7.1 + dev: false + + /@changesets/get-github-info@0.5.2: + resolution: {integrity: sha512-JppheLu7S114aEs157fOZDjFqUDpm7eHdq5E8SSR0gUBTEK0cNSHsrSR5a66xs0z3RWuo46QvA3vawp8BxDHvg==} + dependencies: + dataloader: 1.4.0 + node-fetch: 2.6.7 + transitivePeerDependencies: + - encoding + dev: false + + /@changesets/get-release-plan@3.0.16: + resolution: {integrity: sha512-OpP9QILpBp1bY2YNIKFzwigKh7Qe9KizRsZomzLe6pK8IUo8onkAAVUD8+JRKSr8R7d4+JRuQrfSSNlEwKyPYg==} + dependencies: + '@babel/runtime': 7.22.6 + '@changesets/assemble-release-plan': 5.2.3 + '@changesets/config': 2.3.0 + '@changesets/pre': 1.0.14 + '@changesets/read': 0.5.9 + '@changesets/types': 5.2.1 + '@manypkg/get-packages': 1.1.3 + dev: false + + /@changesets/get-version-range-type@0.3.2: + resolution: {integrity: sha512-SVqwYs5pULYjYT4op21F2pVbcrca4qA/bAA3FmFXKMN7Y+HcO8sbZUTx3TAy2VXulP2FACd1aC7f2nTuqSPbqg==} + dev: false + + /@changesets/git@2.0.0: + resolution: {integrity: sha512-enUVEWbiqUTxqSnmesyJGWfzd51PY4H7mH9yUw0hPVpZBJ6tQZFMU3F3mT/t9OJ/GjyiM4770i+sehAn6ymx6A==} + dependencies: + '@babel/runtime': 7.22.6 + '@changesets/errors': 0.1.4 + '@changesets/types': 5.2.1 + '@manypkg/get-packages': 1.1.3 + is-subdir: 1.2.0 + micromatch: 4.0.4 + spawndamnit: 2.0.0 + dev: false + + /@changesets/logger@0.0.5: + resolution: {integrity: sha512-gJyZHomu8nASHpaANzc6bkQMO9gU/ib20lqew1rVx753FOxffnCrJlGIeQVxNWCqM+o6OOleCo/ivL8UAO5iFw==} + dependencies: + chalk: 2.4.2 + dev: false + + /@changesets/parse@0.3.16: + resolution: {integrity: sha512-127JKNd167ayAuBjUggZBkmDS5fIKsthnr9jr6bdnuUljroiERW7FBTDNnNVyJ4l69PzR57pk6mXQdtJyBCJKg==} + dependencies: + '@changesets/types': 5.2.1 + js-yaml: 3.14.1 + dev: false + + /@changesets/pre@1.0.14: + resolution: {integrity: sha512-dTsHmxQWEQekHYHbg+M1mDVYFvegDh9j/kySNuDKdylwfMEevTeDouR7IfHNyVodxZXu17sXoJuf2D0vi55FHQ==} + dependencies: + '@babel/runtime': 7.22.6 + '@changesets/errors': 0.1.4 + '@changesets/types': 5.2.1 + '@manypkg/get-packages': 1.1.3 + fs-extra: 7.0.1 + dev: false + + /@changesets/read@0.5.9: + resolution: {integrity: sha512-T8BJ6JS6j1gfO1HFq50kU3qawYxa4NTbI/ASNVVCBTsKquy2HYwM9r7ZnzkiMe8IEObAJtUVGSrePCOxAK2haQ==} + dependencies: + '@babel/runtime': 7.22.6 + '@changesets/git': 2.0.0 + '@changesets/logger': 0.0.5 + '@changesets/parse': 0.3.16 + '@changesets/types': 5.2.1 + chalk: 2.4.2 + fs-extra: 7.0.1 + p-filter: 2.1.0 + dev: false + + /@changesets/types@4.1.0: + resolution: {integrity: sha512-LDQvVDv5Kb50ny2s25Fhm3d9QSZimsoUGBsUioj6MC3qbMUCuC8GPIvk/M6IvXx3lYhAs0lwWUQLb+VIEUCECw==} + dev: false + + /@changesets/types@5.2.1: + resolution: {integrity: sha512-myLfHbVOqaq9UtUKqR/nZA/OY7xFjQMdfgfqeZIBK4d0hA6pgxArvdv8M+6NUzzBsjWLOtvApv8YHr4qM+Kpfg==} + dev: false + + /@changesets/write@0.2.3: + resolution: {integrity: sha512-Dbamr7AIMvslKnNYsLFafaVORx4H0pvCA2MHqgtNCySMe1blImEyAEOzDmcgKAkgz4+uwoLz7demIrX+JBr/Xw==} + dependencies: + '@babel/runtime': 7.22.6 + '@changesets/types': 5.2.1 + fs-extra: 7.0.1 + human-id: 1.0.2 + prettier: 2.8.1 + dev: false + + /@codemirror/autocomplete@6.3.0(@codemirror/language@6.6.0)(@codemirror/state@6.1.2)(@codemirror/view@6.4.1)(@lezer/common@1.0.2): + resolution: {integrity: sha512-4jEvh3AjJZTDKazd10J6ZsCIqaYxDMCeua5ouQxY8hlFIml+nr7le0SgBhT3SIytFBmdzPK3AUhXGuW3T79nVg==} + peerDependencies: + '@codemirror/language': ^6.0.0 + '@codemirror/state': ^6.0.0 + '@codemirror/view': ^6.0.0 + '@lezer/common': ^1.0.0 + dependencies: + '@codemirror/language': 6.6.0 + '@codemirror/state': 6.1.2 + '@codemirror/view': 6.4.1 + '@lezer/common': 1.0.2 + dev: false + + /@codemirror/commands@6.1.2: + resolution: {integrity: sha512-sO3jdX1s0pam6lIdeSJLMN3DQ6mPEbM4yLvyKkdqtmd/UDwhXA5+AwFJ89rRXm6vTeOXBsE5cAmlos/t7MJdgg==} + dependencies: + '@codemirror/language': 6.6.0 + '@codemirror/state': 6.1.2 + '@codemirror/view': 6.4.1 + '@lezer/common': 1.0.2 + dev: false + + /@codemirror/lang-css@6.1.0(@codemirror/view@6.4.1)(@lezer/common@1.0.2): + resolution: {integrity: sha512-GYn4TyMvQLrkrhdisFh8HCTDAjPY/9pzwN12hG9UdrTUxRUMicF+8GS24sFEYaleaG1KZClIFLCj0Rol/WO24w==} + dependencies: + '@codemirror/autocomplete': 6.3.0(@codemirror/language@6.6.0)(@codemirror/state@6.1.2)(@codemirror/view@6.4.1)(@lezer/common@1.0.2) + '@codemirror/language': 6.6.0 + '@codemirror/state': 6.1.2 + '@lezer/css': 1.1.1 + transitivePeerDependencies: + - '@codemirror/view' + - '@lezer/common' + dev: false + + /@codemirror/lang-html@6.4.2: + resolution: {integrity: sha512-bqCBASkteKySwtIbiV/WCtGnn/khLRbbiV5TE+d9S9eQJD7BA4c5dTRm2b3bVmSpilff5EYxvB4PQaZzM/7cNw==} + dependencies: + '@codemirror/autocomplete': 6.3.0(@codemirror/language@6.6.0)(@codemirror/state@6.1.2)(@codemirror/view@6.4.1)(@lezer/common@1.0.2) + '@codemirror/lang-css': 6.1.0(@codemirror/view@6.4.1)(@lezer/common@1.0.2) + '@codemirror/lang-javascript': 6.1.4 + '@codemirror/language': 6.6.0 + '@codemirror/state': 6.1.2 + '@codemirror/view': 6.4.1 + '@lezer/common': 1.0.2 + '@lezer/css': 1.1.1 + '@lezer/html': 1.3.3 + dev: false + + /@codemirror/lang-javascript@6.1.4: + resolution: {integrity: sha512-OxLf7OfOZBTMRMi6BO/F72MNGmgOd9B0vetOLvHsDACFXayBzW8fm8aWnDM0yuy68wTK03MBf4HbjSBNRG5q7A==} + dependencies: + '@codemirror/autocomplete': 6.3.0(@codemirror/language@6.6.0)(@codemirror/state@6.1.2)(@codemirror/view@6.4.1)(@lezer/common@1.0.2) + '@codemirror/language': 6.6.0 + '@codemirror/lint': 6.0.0 + '@codemirror/state': 6.1.2 + '@codemirror/view': 6.4.1 + '@lezer/common': 1.0.2 + '@lezer/javascript': 1.4.1 + dev: false + + /@codemirror/lang-json@6.0.1: + resolution: {integrity: sha512-+T1flHdgpqDDlJZ2Lkil/rLiRy684WMLc74xUnjJH48GQdfJo/pudlTRreZmKwzP8/tGdKf83wlbAdOCzlJOGQ==} + dependencies: + '@codemirror/language': 6.6.0 + '@lezer/json': 1.0.0 + dev: false + + /@codemirror/lang-markdown@6.1.0: + resolution: {integrity: sha512-HQDJg1Js19fPKKsI3Rp1X0J6mxyrRy2NX6+Evh0+/jGm6IZHL5ygMGKBYNWKXodoDQFvgdofNRG33gWOwV59Ag==} + dependencies: + '@codemirror/lang-html': 6.4.2 + '@codemirror/language': 6.6.0 + '@codemirror/state': 6.1.2 + '@codemirror/view': 6.4.1 + '@lezer/common': 1.0.2 + '@lezer/markdown': 1.0.2 + dev: false + + /@codemirror/lang-python@6.0.4: + resolution: {integrity: sha512-CuC7V6MVw4HshQuFaB1SMXHOSbKLnBnBXMzm9Zjb+uvkggyY8fXp79T9eYFzMn7fuadoPJcXyTcT/q/SRT7lvQ==} + dependencies: + '@codemirror/language': 6.6.0 + '@lezer/python': 1.1.1 + dev: false + + /@codemirror/language@6.6.0: + resolution: {integrity: sha512-cwUd6lzt3MfNYOobdjf14ZkLbJcnv4WtndYaoBkbor/vF+rCNguMPK0IRtvZJG4dsWiaWPcK8x1VijhvSxnstg==} + dependencies: + '@codemirror/state': 6.1.2 + '@codemirror/view': 6.4.1 + '@lezer/common': 1.0.2 + '@lezer/highlight': 1.1.3 + '@lezer/lr': 1.3.3 + style-mod: 4.0.0 + dev: false + + /@codemirror/legacy-modes@6.3.1: + resolution: {integrity: sha512-icXmCs4Mhst2F8mE0TNpmG6l7YTj1uxam3AbZaFaabINH5oWAdg2CfR/PVi+d/rqxJ+TuTnvkKK5GILHrNThtw==} + dependencies: + '@codemirror/language': 6.6.0 + dev: false + + /@codemirror/lint@6.0.0: + resolution: {integrity: sha512-nUUXcJW1Xp54kNs+a1ToPLK8MadO0rMTnJB8Zk4Z8gBdrN0kqV7uvUraU/T2yqg+grDNR38Vmy/MrhQN/RgwiA==} + dependencies: + '@codemirror/state': 6.1.2 + '@codemirror/view': 6.4.1 + crelt: 1.0.5 + dev: false + + /@codemirror/search@6.2.2: + resolution: {integrity: sha512-2pWY599zXk+lSoJ2iv9EuTO4gB7lhgBPLPwFb/zTbimFH4NmZSaKzJSV51okjABZ7/Rj0DYy5klWbIgaJh2LoQ==} + dependencies: + '@codemirror/state': 6.1.2 + '@codemirror/view': 6.4.1 + crelt: 1.0.5 + dev: false + + /@codemirror/state@6.1.2: + resolution: {integrity: sha512-Mxff85Hp5va+zuj+H748KbubXjrinX/k28lj43H14T2D0+4kuvEFIEIO7hCEcvBT8ubZyIelt9yGOjj2MWOEQA==} + dev: false + + /@codemirror/view@6.4.1: + resolution: {integrity: sha512-QdBpD6E5HYx6YFXXhqwrRyQ83w7CxWZnchM4QpWBVkkmV7/oJT8N+yz2KAi2iRaLObc/aOf7C2RCQTO2yswF8A==} + dependencies: + '@codemirror/state': 6.1.2 + style-mod: 4.0.0 + w3c-keyname: 2.2.6 + dev: false + + /@colors/colors@1.5.0: + resolution: {integrity: sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==} + engines: {node: '>=0.1.90'} + requiresBuild: true + dev: true + optional: true + + /@csstools/cascade-layer-name-parser@1.0.3(@csstools/css-parser-algorithms@2.3.0)(@csstools/css-tokenizer@2.1.1): + resolution: {integrity: sha512-ks9ysPP8012j90EQCCFtDsQIXOTCOpTQFIyyoRku06y8CXtUQ+8bXI8KVm9Q9ovwDUVthWuWKZWJD3u1rwnEfw==} + engines: {node: ^14 || ^16 || >=18} + peerDependencies: + '@csstools/css-parser-algorithms': ^2.3.0 + '@csstools/css-tokenizer': ^2.1.1 + dependencies: + '@csstools/css-parser-algorithms': 2.3.0(@csstools/css-tokenizer@2.1.1) + '@csstools/css-tokenizer': 2.1.1 + dev: false + + /@csstools/css-parser-algorithms@2.3.0(@csstools/css-tokenizer@2.1.1): + resolution: {integrity: sha512-dTKSIHHWc0zPvcS5cqGP+/TPFUJB0ekJ9dGKvMAFoNuBFhDPBt9OMGNZiIA5vTiNdGHHBeScYPXIGBMnVOahsA==} + engines: {node: ^14 || ^16 || >=18} + peerDependencies: + '@csstools/css-tokenizer': ^2.1.1 + dependencies: + '@csstools/css-tokenizer': 2.1.1 + dev: false + + /@csstools/css-tokenizer@2.1.1: + resolution: {integrity: sha512-GbrTj2Z8MCTUv+52GE0RbFGM527xuXZ0Xa5g0Z+YN573uveS4G0qi6WNOMyz3yrFM/jaILTTwJ0+umx81EzqfA==} + engines: {node: ^14 || ^16 || >=18} + dev: false + + /@csstools/media-query-list-parser@2.1.2(@csstools/css-parser-algorithms@2.3.0)(@csstools/css-tokenizer@2.1.1): + resolution: {integrity: sha512-M8cFGGwl866o6++vIY7j1AKuq9v57cf+dGepScwCcbut9ypJNr4Cj+LLTWligYUZ0uyhEoJDKt5lvyBfh2L3ZQ==} + engines: {node: ^14 || ^16 || >=18} + peerDependencies: + '@csstools/css-parser-algorithms': ^2.3.0 + '@csstools/css-tokenizer': ^2.1.1 + dependencies: + '@csstools/css-parser-algorithms': 2.3.0(@csstools/css-tokenizer@2.1.1) + '@csstools/css-tokenizer': 2.1.1 + dev: false + + /@csstools/postcss-global-data@2.0.0(postcss@8.4.27): + resolution: {integrity: sha512-yaI3+hBJw7ljUhM0INqnFzG8nsDD+Si/FFUWw6uRLQHUALnXyxpSeihOIvG35y03GhpmiEU6qsADbVVGDHKSLw==} + engines: {node: ^14 || ^16 || >=18} + peerDependencies: + postcss: ^8.4 + dependencies: + postcss: 8.4.27 + dev: false + + /@discoveryjs/json-ext@0.5.7: + resolution: {integrity: sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==} + engines: {node: '>=10.0.0'} + dev: true + + /@emotion/use-insertion-effect-with-fallbacks@1.0.1(react@18.2.0): + resolution: {integrity: sha512-jT/qyKZ9rzLErtrjGgdkMBn2OP8wl0G3sQlBb3YPryvKHsjvINUhVaPFfP+fpBcOkmrVOVEEHQFJ7nbj2TH2gw==} + peerDependencies: + react: '>=16.8.0' + dependencies: + react: 18.2.0 + dev: true + + /@esbuild/android-arm64@0.17.14: + resolution: {integrity: sha512-eLOpPO1RvtsP71afiFTvS7tVFShJBCT0txiv/xjFBo5a7R7Gjw7X0IgIaFoLKhqXYAXhahoXm7qAmRXhY4guJg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + requiresBuild: true + optional: true + + /@esbuild/android-arm64@0.18.20: + resolution: {integrity: sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + requiresBuild: true + dev: false + optional: true + + /@esbuild/android-arm64@0.19.0: + resolution: {integrity: sha512-AzsozJnB+RNaDncBCs3Ys5g3kqhPFUueItfEaCpp89JH2naFNX2mYDIvUgPYMqqjm8hiFoo+jklb3QHZyR3ubw==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + requiresBuild: true + dev: true + optional: true + + /@esbuild/android-arm@0.17.14: + resolution: {integrity: sha512-0CnlwnjDU8cks0yJLXfkaU/uoLyRf9VZJs4p1PskBr2AlAHeEsFEwJEo0of/Z3g+ilw5mpyDwThlxzNEIxOE4g==} + engines: {node: '>=12'} + cpu: [arm] + os: [android] + requiresBuild: true + optional: true + + /@esbuild/android-arm@0.18.20: + resolution: {integrity: sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw==} + engines: {node: '>=12'} + cpu: [arm] + os: [android] + requiresBuild: true + dev: false + optional: true + + /@esbuild/android-arm@0.19.0: + resolution: {integrity: sha512-GAkjUyHgWTYuex3evPd5V7uV/XS4LMKr1PWHRPW1xNyy/Jx08x3uTrDFRefBYLKT/KpaWM8/YMQcwbp5a3yIDA==} + engines: {node: '>=12'} + cpu: [arm] + os: [android] + requiresBuild: true + dev: true + optional: true + + /@esbuild/android-x64@0.17.14: + resolution: {integrity: sha512-nrfQYWBfLGfSGLvRVlt6xi63B5IbfHm3tZCdu/82zuFPQ7zez4XjmRtF/wIRYbJQ/DsZrxJdEvYFE67avYXyng==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + requiresBuild: true + optional: true + + /@esbuild/android-x64@0.18.20: + resolution: {integrity: sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + requiresBuild: true + dev: false + optional: true + + /@esbuild/android-x64@0.19.0: + resolution: {integrity: sha512-SUG8/qiVhljBDpdkHQ9DvOWbp7hFFIP0OzxOTptbmVsgBgzY6JWowmMd6yJuOhapfxmj/DrvwKmjRLvVSIAKZg==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + requiresBuild: true + dev: true + optional: true + + /@esbuild/darwin-arm64@0.17.14: + resolution: {integrity: sha512-eoSjEuDsU1ROwgBH/c+fZzuSyJUVXQTOIN9xuLs9dE/9HbV/A5IqdXHU1p2OfIMwBwOYJ9SFVGGldxeRCUJFyw==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + requiresBuild: true + optional: true + + /@esbuild/darwin-arm64@0.18.20: + resolution: {integrity: sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + requiresBuild: true + dev: false + optional: true + + /@esbuild/darwin-arm64@0.19.0: + resolution: {integrity: sha512-HkxZ8k3Jvcw0FORPNTavA8BMgQjLOB6AajT+iXmil7BwY3gU1hWvJJAyWyEogCmA4LdbGvKF8vEykdmJ4xNJJQ==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@esbuild/darwin-x64@0.17.14: + resolution: {integrity: sha512-zN0U8RWfrDttdFNkHqFYZtOH8hdi22z0pFm0aIJPsNC4QQZv7je8DWCX5iA4Zx6tRhS0CCc0XC2m7wKsbWEo5g==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + requiresBuild: true + optional: true + + /@esbuild/darwin-x64@0.18.20: + resolution: {integrity: sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + requiresBuild: true + dev: false + optional: true + + /@esbuild/darwin-x64@0.19.0: + resolution: {integrity: sha512-9IRWJjqpWFHM9a5Qs3r3bK834NCFuDY5ZaLrmTjqE+10B6w65UMQzeZjh794JcxpHolsAHqwsN/33crUXNCM2Q==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@esbuild/freebsd-arm64@0.17.14: + resolution: {integrity: sha512-z0VcD4ibeZWVQCW1O7szaLxGsx54gcCnajEJMdYoYjLiq4g1jrP2lMq6pk71dbS5+7op/L2Aod+erw+EUr28/A==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + requiresBuild: true + optional: true + + /@esbuild/freebsd-arm64@0.18.20: + resolution: {integrity: sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + requiresBuild: true + dev: false + optional: true + + /@esbuild/freebsd-arm64@0.19.0: + resolution: {integrity: sha512-s7i2WcXcK0V1PJHVBe7NsGddsL62a9Vhpz2U7zapPrwKoFuxPP9jybwX8SXnropR/AOj3ppt2ern4ItblU6UQQ==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/freebsd-x64@0.17.14: + resolution: {integrity: sha512-hd9mPcxfTgJlolrPlcXkQk9BMwNBvNBsVaUe5eNUqXut6weDQH8whcNaKNF2RO8NbpT6GY8rHOK2A9y++s+ehw==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + requiresBuild: true + optional: true + + /@esbuild/freebsd-x64@0.18.20: + resolution: {integrity: sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + requiresBuild: true + dev: false + optional: true + + /@esbuild/freebsd-x64@0.19.0: + resolution: {integrity: sha512-NMdBSSdgwHCqCsucU5k1xflIIRU0qi1QZnM6+vdGy5fvxm1c8rKh50VzsWsIVTFUG3l91AtRxVwoz3Lcvy3I5w==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-arm64@0.17.14: + resolution: {integrity: sha512-FhAMNYOq3Iblcj9i+K0l1Fp/MHt+zBeRu/Qkf0LtrcFu3T45jcwB6A1iMsemQ42vR3GBhjNZJZTaCe3VFPbn9g==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-arm64@0.18.20: + resolution: {integrity: sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + requiresBuild: true + dev: false + optional: true + + /@esbuild/linux-arm64@0.19.0: + resolution: {integrity: sha512-I4zvE2srSZxRPapFnNqj+NL3sDJ1wkvEZqt903OZUlBBgigrQMvzUowvP/TTTu2OGYe1oweg5MFilfyrElIFag==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-arm@0.17.14: + resolution: {integrity: sha512-BNTl+wSJ1omsH8s3TkQmIIIQHwvwJrU9u1ggb9XU2KTVM4TmthRIVyxSp2qxROJHhZuW/r8fht46/QE8hU8Qvg==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-arm@0.18.20: + resolution: {integrity: sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + requiresBuild: true + dev: false + optional: true + + /@esbuild/linux-arm@0.19.0: + resolution: {integrity: sha512-2F1+lH7ZBcCcgxiSs8EXQV0PPJJdTNiNcXxDb61vzxTRJJkXX1I/ye9mAhfHyScXzHaEibEXg1Jq9SW586zz7w==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-ia32@0.17.14: + resolution: {integrity: sha512-91OK/lQ5y2v7AsmnFT+0EyxdPTNhov3y2CWMdizyMfxSxRqHazXdzgBKtlmkU2KYIc+9ZK3Vwp2KyXogEATYxQ==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-ia32@0.18.20: + resolution: {integrity: sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + requiresBuild: true + dev: false + optional: true + + /@esbuild/linux-ia32@0.19.0: + resolution: {integrity: sha512-dz2Q7+P92r1Evc8kEN+cQnB3qqPjmCrOZ+EdBTn8lEc1yN8WDgaDORQQiX+mxaijbH8npXBT9GxUqE52Gt6Y+g==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-loong64@0.17.14: + resolution: {integrity: sha512-vp15H+5NR6hubNgMluqqKza85HcGJgq7t6rMH7O3Y6ApiOWPkvW2AJfNojUQimfTp6OUrACUXfR4hmpcENXoMQ==} + engines: {node: '>=12'} + cpu: [loong64] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-loong64@0.18.20: + resolution: {integrity: sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg==} + engines: {node: '>=12'} + cpu: [loong64] + os: [linux] + requiresBuild: true + dev: false + optional: true + + /@esbuild/linux-loong64@0.19.0: + resolution: {integrity: sha512-IcVJovJVflih4oFahhUw+N7YgNbuMSVFNr38awb0LNzfaiIfdqIh518nOfYaNQU3aVfiJnOIRVJDSAP4k35WxA==} + engines: {node: '>=12'} + cpu: [loong64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-mips64el@0.17.14: + resolution: {integrity: sha512-90TOdFV7N+fgi6c2+GO9ochEkmm9kBAKnuD5e08GQMgMINOdOFHuYLPQ91RYVrnWwQ5683sJKuLi9l4SsbJ7Hg==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-mips64el@0.18.20: + resolution: {integrity: sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + requiresBuild: true + dev: false + optional: true + + /@esbuild/linux-mips64el@0.19.0: + resolution: {integrity: sha512-bZGRAGySMquWsKw0gIdsClwfvgbsSq/7oq5KVu1H1r9Il+WzOcfkV1hguntIuBjRVL8agI95i4AukjdAV2YpUw==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-ppc64@0.17.14: + resolution: {integrity: sha512-NnBGeoqKkTugpBOBZZoktQQ1Yqb7aHKmHxsw43NddPB2YWLAlpb7THZIzsRsTr0Xw3nqiPxbA1H31ZMOG+VVPQ==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-ppc64@0.18.20: + resolution: {integrity: sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + requiresBuild: true + dev: false + optional: true + + /@esbuild/linux-ppc64@0.19.0: + resolution: {integrity: sha512-3LC6H5/gCDorxoRBUdpLV/m7UthYSdar0XcCu+ypycQxMS08MabZ06y1D1yZlDzL/BvOYliRNRWVG/YJJvQdbg==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-riscv64@0.17.14: + resolution: {integrity: sha512-0qdlKScLXA8MGVy21JUKvMzCYWovctuP8KKqhtE5A6IVPq4onxXhSuhwDd2g5sRCzNDlDjitc5sX31BzDoL5Fw==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-riscv64@0.18.20: + resolution: {integrity: sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + requiresBuild: true + dev: false + optional: true + + /@esbuild/linux-riscv64@0.19.0: + resolution: {integrity: sha512-jfvdKjWk+Cp2sgLtEEdSHXO7qckrw2B2eFBaoRdmfhThqZs29GMMg7q/LsQpybA7BxCLLEs4di5ucsWzZC5XPA==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-s390x@0.17.14: + resolution: {integrity: sha512-Hdm2Jo1yaaOro4v3+6/zJk6ygCqIZuSDJHdHaf8nVH/tfOuoEX5Riv03Ka15LmQBYJObUTNS1UdyoMk0WUn9Ww==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-s390x@0.18.20: + resolution: {integrity: sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + requiresBuild: true + dev: false + optional: true + + /@esbuild/linux-s390x@0.19.0: + resolution: {integrity: sha512-ofcucfNLkoXmcnJaw9ugdEOf40AWKGt09WBFCkpor+vFJVvmk/8OPjl/qRtks2Z7BuZbG3ztJuK1zS9z5Cgx9A==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-x64@0.17.14: + resolution: {integrity: sha512-8KHF17OstlK4DuzeF/KmSgzrTWQrkWj5boluiiq7kvJCiQVzUrmSkaBvcLB2UgHpKENO2i6BthPkmUhNDaJsVw==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-x64@0.18.20: + resolution: {integrity: sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + requiresBuild: true + dev: false + optional: true + + /@esbuild/linux-x64@0.19.0: + resolution: {integrity: sha512-Fpf7zNDBti3xrQKQKLdXT0hTyOxgFdRJIMtNy8x1az9ATR9/GJ1brYbB/GLWoXhKiHsoWs+2DLkFVNNMTCLEwA==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/netbsd-x64@0.17.14: + resolution: {integrity: sha512-nVwpqvb3yyXztxIT2+VsxJhB5GCgzPdk1n0HHSnchRAcxqKO6ghXwHhJnr0j/B+5FSyEqSxF4q03rbA2fKXtUQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + requiresBuild: true + optional: true + + /@esbuild/netbsd-x64@0.18.20: + resolution: {integrity: sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + requiresBuild: true + dev: false + optional: true + + /@esbuild/netbsd-x64@0.19.0: + resolution: {integrity: sha512-AMQAp/5oENgDOvVhvOlbhVe1pWii7oFAMRHlmTjSEMcpjTpIHtFXhv9uAFgUERHm3eYtNvS9Vf+gT55cwuI6Aw==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/openbsd-x64@0.17.14: + resolution: {integrity: sha512-1RZ7uQQ9zcy/GSAJL1xPdN7NDdOOtNEGiJalg/MOzeakZeTrgH/DoCkbq7TaPDiPhWqnDF+4bnydxRqQD7il6g==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + requiresBuild: true + optional: true + + /@esbuild/openbsd-x64@0.18.20: + resolution: {integrity: sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + requiresBuild: true + dev: false + optional: true + + /@esbuild/openbsd-x64@0.19.0: + resolution: {integrity: sha512-fDztEve1QUs3h/Dw2AUmBlWGkNQbhDoD05ppm5jKvzQv+HVuV13so7m5RYeiSMIC2XQy7PAjZh+afkxAnCRZxA==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/sunos-x64@0.17.14: + resolution: {integrity: sha512-nqMjDsFwv7vp7msrwWRysnM38Sd44PKmW8EzV01YzDBTcTWUpczQg6mGao9VLicXSgW/iookNK6AxeogNVNDZA==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + requiresBuild: true + optional: true + + /@esbuild/sunos-x64@0.18.20: + resolution: {integrity: sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + requiresBuild: true + dev: false + optional: true + + /@esbuild/sunos-x64@0.19.0: + resolution: {integrity: sha512-bKZzJ2/rvUjDzA5Ddyva2tMk89WzNJEibZEaq+wY6SiqPlwgFbqyQLimouxLHiHh1itb5P3SNCIF1bc2bw5H9w==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + requiresBuild: true + dev: true + optional: true + + /@esbuild/win32-arm64@0.17.14: + resolution: {integrity: sha512-xrD0mccTKRBBIotrITV7WVQAwNJ5+1va6L0H9zN92v2yEdjfAN7864cUaZwJS7JPEs53bDTzKFbfqVlG2HhyKQ==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + requiresBuild: true + optional: true + + /@esbuild/win32-arm64@0.18.20: + resolution: {integrity: sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + requiresBuild: true + dev: false + optional: true + + /@esbuild/win32-arm64@0.19.0: + resolution: {integrity: sha512-NQJ+4jmnA79saI+sE+QzcEls19uZkoEmdxo7r//PDOjIpX8pmoWtTnWg6XcbnO7o4fieyAwb5U2LvgWynF4diA==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@esbuild/win32-ia32@0.17.14: + resolution: {integrity: sha512-nXpkz9bbJrLLyUTYtRotSS3t5b+FOuljg8LgLdINWFs3FfqZMtbnBCZFUmBzQPyxqU87F8Av+3Nco/M3hEcu1w==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + requiresBuild: true + optional: true + + /@esbuild/win32-ia32@0.18.20: + resolution: {integrity: sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + requiresBuild: true + dev: false + optional: true + + /@esbuild/win32-ia32@0.19.0: + resolution: {integrity: sha512-uyxiZAnsfu9diHm9/rIH2soecF/HWLXYUhJKW4q1+/LLmNQ+55lRjvSUDhUmsgJtSUscRJB/3S4RNiTb9o9mCg==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@esbuild/win32-x64@0.17.14: + resolution: {integrity: sha512-gPQmsi2DKTaEgG14hc3CHXHp62k8g6qr0Pas+I4lUxRMugGSATh/Bi8Dgusoz9IQ0IfdrvLpco6kujEIBoaogA==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + requiresBuild: true + optional: true + + /@esbuild/win32-x64@0.18.20: + resolution: {integrity: sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + requiresBuild: true + dev: false + optional: true + + /@esbuild/win32-x64@0.19.0: + resolution: {integrity: sha512-jl+NXUjK2StMgqnZnqgNjZuerFG8zQqWXMBZdMMv4W/aO1ZKQaYWZBxTrtWKphkCBVEMh0wMVfGgOd2BjOZqUQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@eslint-community/eslint-utils@4.4.0(eslint@8.46.0): + resolution: {integrity: sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + dependencies: + eslint: 8.46.0 + eslint-visitor-keys: 3.4.2 + dev: false + + /@eslint-community/regexpp@4.5.1: + resolution: {integrity: sha512-Z5ba73P98O1KUYCCJTUeVpja9RcGoMdncZ6T49FCUl2lN38JtCJ+3WgIDBv0AuY4WChU5PmtJmOCTlN6FZTFKQ==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + dev: false + + /@eslint-community/regexpp@4.6.2: + resolution: {integrity: sha512-pPTNuaAG3QMH+buKyBIGJs3g/S5y0caxw0ygM3YyE6yJFySwiGGSzA+mM3KJ8QQvzeLh3blwgSonkFjgQdxzMw==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + dev: false + + /@eslint/eslintrc@1.4.1: + resolution: {integrity: sha512-XXrH9Uarn0stsyldqDYq8r++mROmWRI1xKMXa640Bb//SY1+ECYX6VzT6Lcx5frD0V30XieqJ0oX9I2Xj5aoMA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + ajv: 6.12.6 + debug: 4.3.4 + espree: 9.5.2 + globals: 13.20.0 + ignore: 5.2.4 + import-fresh: 3.3.0 + js-yaml: 4.1.0 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + dev: true + + /@eslint/eslintrc@2.1.1: + resolution: {integrity: sha512-9t7ZA7NGGK8ckelF0PQCfcxIUzs1Md5rrO6U/c+FIQNanea5UZC0wqKXH4vHBccmu4ZJgZ2idtPeW7+Q2npOEA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + ajv: 6.12.6 + debug: 4.3.4 + espree: 9.6.1 + globals: 13.20.0 + ignore: 5.2.4 + import-fresh: 3.3.0 + js-yaml: 4.1.0 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + dev: false + + /@eslint/js@8.46.0: + resolution: {integrity: sha512-a8TLtmPi8xzPkCbp/OGFUo5yhRkHM2Ko9kOWP4znJr0WAhWyThaw3PnwX4vOTWOAMsV2uRt32PPDcEz63esSaA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dev: false + + /@fal-works/esbuild-plugin-global-externals@2.1.2: + resolution: {integrity: sha512-cEee/Z+I12mZcFJshKcCqC8tuX5hG3s+d+9nZ3LabqKF1vKdF41B92pJVCBggjAGORAeOzyyDDKrZwIkLffeOQ==} + dev: true + + /@formatjs/ecma402-abstract@1.11.4: + resolution: {integrity: sha512-EBikYFp2JCdIfGEb5G9dyCkTGDmC57KSHhRQOC3aYxoPWVZvfWCDjZwkGYHN7Lis/fmuWl906bnNTJifDQ3sXw==} + dependencies: + '@formatjs/intl-localematcher': 0.2.25 + tslib: 2.6.1 + dev: false + + /@formatjs/fast-memoize@1.2.1: + resolution: {integrity: sha512-Rg0e76nomkz3vF9IPlKeV+Qynok0r7YZjL6syLz4/urSg0IbjPZCB/iYUMNsYA643gh4mgrX3T7KEIFIxJBQeg==} + dependencies: + tslib: 2.6.1 + dev: false + + /@formatjs/icu-messageformat-parser@2.1.0: + resolution: {integrity: sha512-Qxv/lmCN6hKpBSss2uQ8IROVnta2r9jd3ymUEIjm2UyIkUCHVcbUVRGL/KS/wv7876edvsPe+hjHVJ4z8YuVaw==} + dependencies: + '@formatjs/ecma402-abstract': 1.11.4 + '@formatjs/icu-skeleton-parser': 1.3.6 + tslib: 2.6.1 + dev: false + + /@formatjs/icu-skeleton-parser@1.3.6: + resolution: {integrity: sha512-I96mOxvml/YLrwU2Txnd4klA7V8fRhb6JG/4hm3VMNmeJo1F03IpV2L3wWt7EweqNLES59SZ4d6hVOPCSf80Bg==} + dependencies: + '@formatjs/ecma402-abstract': 1.11.4 + tslib: 2.6.1 + dev: false + + /@formatjs/intl-localematcher@0.2.25: + resolution: {integrity: sha512-YmLcX70BxoSopLFdLr1Ds99NdlTI2oWoLbaUW2M406lxOIPzE1KQhRz2fPUkq34xVZQaihCoU29h0KK7An3bhA==} + dependencies: + tslib: 2.6.1 + dev: false + + /@humanwhocodes/config-array@0.11.10: + resolution: {integrity: sha512-KVVjQmNUepDVGXNuoRRdmmEjruj0KfiGSbS8LVc12LMsWDQzRXJ0qdhN8L8uUigKpfEHRhlaQFY0ib1tnUbNeQ==} + engines: {node: '>=10.10.0'} + dependencies: + '@humanwhocodes/object-schema': 1.2.1 + debug: 4.3.4 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + dev: false + + /@humanwhocodes/config-array@0.9.5: + resolution: {integrity: sha512-ObyMyWxZiCu/yTisA7uzx81s40xR2fD5Cg/2Kq7G02ajkNubJf6BopgDTmDyc3U7sXpNKM8cYOw7s7Tyr+DnCw==} + engines: {node: '>=10.10.0'} + dependencies: + '@humanwhocodes/object-schema': 1.2.1 + debug: 4.3.4 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + dev: true + + /@humanwhocodes/module-importer@1.0.1: + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + dev: false + + /@humanwhocodes/object-schema@1.2.1: + resolution: {integrity: sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==} + + /@istanbuljs/load-nyc-config@1.1.0: + resolution: {integrity: sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==} + engines: {node: '>=8'} + dependencies: + camelcase: 5.3.1 + find-up: 4.1.0 + get-package-type: 0.1.0 + js-yaml: 3.14.1 + resolve-from: 5.0.0 + dev: true + + /@istanbuljs/schema@0.1.3: + resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==} + engines: {node: '>=8'} + dev: true + + /@jest/expect-utils@29.5.0: + resolution: {integrity: sha512-fmKzsidoXQT2KwnrwE0SQq3uj8Z763vzR8LnLBwC2qYWEFpjX8daRsk6rHUM1QvNlEW/UJXNXm59ztmJJWs2Mg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + jest-get-type: 29.4.3 + dev: false + + /@jest/schemas@29.4.3: + resolution: {integrity: sha512-VLYKXQmtmuEz6IxJsrZwzG9NvtkQsWNnWMsKxqWNu3+CnfzJQhp0WDDKWLVV9hLKr0l3SLLFRqcYHjhtyuDVxg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@sinclair/typebox': 0.25.24 + + /@jest/transform@29.5.0: + resolution: {integrity: sha512-8vbeZWqLJOvHaDfeMuoHITGKSz5qWc9u04lnWrQE3VyuSw604PzQM824ZeX9XSjUCeDiE3GuxZe5UKa8J61NQw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@babel/core': 7.22.5 + '@jest/types': 29.5.0 + '@jridgewell/trace-mapping': 0.3.18 + babel-plugin-istanbul: 6.1.1 + chalk: 4.1.2 + convert-source-map: 2.0.0 + fast-json-stable-stringify: 2.1.0 + graceful-fs: 4.2.9 + jest-haste-map: 29.5.0 + jest-regex-util: 29.4.3 + jest-util: 29.5.0 + micromatch: 4.0.4 + pirates: 4.0.6 + slash: 3.0.0 + write-file-atomic: 4.0.2 + transitivePeerDependencies: + - supports-color + dev: true + + /@jest/types@27.5.1: + resolution: {integrity: sha512-Cx46iJ9QpwQTjIdq5VJu2QTMMs3QlEjI0x1QbBP5W1+nMzyc2XmimiRR/CbX9TO0cPTeUlxWMOu8mslYsJ8DEw==} + engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} + dependencies: + '@types/istanbul-lib-coverage': 2.0.4 + '@types/istanbul-reports': 3.0.1 + '@types/node': 20.3.2 + '@types/yargs': 16.0.5 + chalk: 4.1.2 + dev: true + + /@jest/types@29.5.0: + resolution: {integrity: sha512-qbu7kN6czmVRc3xWFQcAN03RAUamgppVUdXrvl1Wr3jlNF93o9mJbGcDWrwGB6ht44u7efB1qCFgVQmca24Uog==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/schemas': 29.4.3 + '@types/istanbul-lib-coverage': 2.0.4 + '@types/istanbul-reports': 3.0.1 + '@types/node': 20.3.2 + '@types/yargs': 17.0.24 + chalk: 4.1.2 + + /@jridgewell/gen-mapping@0.3.3: + resolution: {integrity: sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==} + engines: {node: '>=6.0.0'} + dependencies: + '@jridgewell/set-array': 1.1.2 + '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/trace-mapping': 0.3.18 + + /@jridgewell/resolve-uri@3.1.0: + resolution: {integrity: sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==} + engines: {node: '>=6.0.0'} + + /@jridgewell/set-array@1.1.2: + resolution: {integrity: sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==} + engines: {node: '>=6.0.0'} + + /@jridgewell/source-map@0.3.5: + resolution: {integrity: sha512-UTYAUj/wviwdsMfzoSJspJxbkH5o1snzwX0//0ENX1u/55kkZZkcTZP6u9bwKGkv+dkk9at4m1Cpt0uY80kcpQ==} + dependencies: + '@jridgewell/gen-mapping': 0.3.3 + '@jridgewell/trace-mapping': 0.3.18 + dev: true + + /@jridgewell/sourcemap-codec@1.4.14: + resolution: {integrity: sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==} + + /@jridgewell/sourcemap-codec@1.4.15: + resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} + + /@jridgewell/trace-mapping@0.3.17: + resolution: {integrity: sha512-MCNzAp77qzKca9+W/+I0+sEpaUnZoeasnghNeVc41VZCEKaCH73Vq3BZZ/SzWIgrqE4H4ceI+p+b6C0mHf9T4g==} + dependencies: + '@jridgewell/resolve-uri': 3.1.0 + '@jridgewell/sourcemap-codec': 1.4.14 + dev: true + + /@jridgewell/trace-mapping@0.3.18: + resolution: {integrity: sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==} + dependencies: + '@jridgewell/resolve-uri': 3.1.0 + '@jridgewell/sourcemap-codec': 1.4.14 + + /@juggle/resize-observer@3.4.0: + resolution: {integrity: sha512-dfLbk+PwWvFzSxwk3n5ySL0hfBog779o8h68wK/7/APo/7cgyWp5jcXockbxdk5kFRkbeXWm4Fbi9FrdN381sA==} + dev: true + + /@lezer/common@1.0.2: + resolution: {integrity: sha512-SVgiGtMnMnW3ActR8SXgsDhw7a0w0ChHSYAyAUxxrOiJ1OqYWEKk/xJd84tTSPo1mo6DXLObAJALNnd0Hrv7Ng==} + dev: false + + /@lezer/css@1.1.1: + resolution: {integrity: sha512-mSjx+unLLapEqdOYDejnGBokB5+AiJKZVclmud0MKQOKx3DLJ5b5VTCstgDDknR6iIV4gVrN6euzsCnj0A2gQA==} + dependencies: + '@lezer/highlight': 1.1.3 + '@lezer/lr': 1.3.3 + dev: false + + /@lezer/highlight@1.1.3: + resolution: {integrity: sha512-3vLKLPThO4td43lYRBygmMY18JN3CPh9w+XS2j8WC30vR4yZeFG4z1iFe4jXE43NtGqe//zHW5q8ENLlHvz9gw==} + dependencies: + '@lezer/common': 1.0.2 + dev: false + + /@lezer/html@1.3.3: + resolution: {integrity: sha512-04Fyvu66DjV2EjhDIG1kfDdktn5Pfw56SXPrzKNQH5B2m7BDfc6bDsz+ZJG8dLS3kIPEKbyyq1Sm2/kjeG0+AA==} + dependencies: + '@lezer/common': 1.0.2 + '@lezer/highlight': 1.1.3 + '@lezer/lr': 1.3.3 + dev: false + + /@lezer/javascript@1.4.1: + resolution: {integrity: sha512-Hqx36DJeYhKtdpc7wBYPR0XF56ZzIp0IkMO/zNNj80xcaFOV4Oj/P7TQc/8k2TxNhzl7tV5tXS8ZOCPbT4L3nA==} + dependencies: + '@lezer/highlight': 1.1.3 + '@lezer/lr': 1.3.3 + dev: false + + /@lezer/json@1.0.0: + resolution: {integrity: sha512-zbAuUY09RBzCoCA3lJ1+ypKw5WSNvLqGMtasdW6HvVOqZoCpPr8eWrsGnOVWGKGn8Rh21FnrKRVlJXrGAVUqRw==} + dependencies: + '@lezer/highlight': 1.1.3 + '@lezer/lr': 1.3.3 + dev: false + + /@lezer/lr@1.3.3: + resolution: {integrity: sha512-JPQe3mwJlzEVqy67iQiiGozhcngbO8QBgpqZM6oL1Wj/dXckrEexpBLeFkq0edtW5IqnPRFxA24BHJni8Js69w==} + dependencies: + '@lezer/common': 1.0.2 + dev: false + + /@lezer/markdown@1.0.2: + resolution: {integrity: sha512-8CY0OoZ6V5EzPjSPeJ4KLVbtXdLBd8V6sRCooN5kHnO28ytreEGTyrtU/zUwo/XLRzGr/e1g44KlzKi3yWGB5A==} + dependencies: + '@lezer/common': 1.0.2 + '@lezer/highlight': 1.1.3 + dev: false + + /@lezer/python@1.1.1: + resolution: {integrity: sha512-ArUGh9kvdaOVu6IkSaYUS9WFQeMAFVWKRuZo6vexnxoeCLnxf0Y9DCFEAMMa7W9SQBGYE55OarSpPqSkdOXSCA==} + dependencies: + '@lezer/highlight': 1.1.3 + '@lezer/lr': 1.3.3 + dev: false + + /@manypkg/find-root@1.1.0: + resolution: {integrity: sha512-mki5uBvhHzO8kYYix/WRy2WX8S3B5wdVSc9D6KcU5lQNglP2yt58/VfLuAK49glRXChosY8ap2oJ1qgma3GUVA==} + dependencies: + '@babel/runtime': 7.22.6 + '@types/node': 12.20.55 + find-up: 4.1.0 + fs-extra: 8.1.0 + dev: false + + /@manypkg/find-root@2.2.1: + resolution: {integrity: sha512-34NlypD5mmTY65cFAK7QPgY5Tzt0qXR4ZRXdg97xAlkiLuwXUPBEXy5Hsqzd+7S2acsLxUz6Cs50rlDZQr4xUA==} + engines: {node: '>=14.18.0'} + dependencies: + '@manypkg/tools': 1.1.0 + find-up: 4.1.0 + fs-extra: 8.1.0 + dev: false + + /@manypkg/get-packages@1.1.3: + resolution: {integrity: sha512-fo+QhuU3qE/2TQMQmbVMqaQ6EWbMhi4ABWP+O4AM1NqPBuy0OrApV5LO6BrrgnhtAHS2NH6RrVk9OL181tTi8A==} + dependencies: + '@babel/runtime': 7.22.6 + '@changesets/types': 4.1.0 + '@manypkg/find-root': 1.1.0 + fs-extra: 8.1.0 + globby: 11.1.0 + read-yaml-file: 1.1.0 + dev: false + + /@manypkg/get-packages@2.2.0: + resolution: {integrity: sha512-B5p5BXMwhGZKi/syEEAP1eVg5DZ/9LP+MZr0HqfrHLgu9fq0w4ZwH8yVen4JmjrxI2dWS31dcoswYzuphLaRxg==} + engines: {node: '>=14.18.0'} + dependencies: + '@manypkg/find-root': 2.2.1 + '@manypkg/tools': 1.1.0 + dev: false + + /@manypkg/tools@1.1.0: + resolution: {integrity: sha512-SkAyKAByB9l93Slyg8AUHGuM2kjvWioUTCckT/03J09jYnfEzMO/wSXmEhnKGYs6qx9De8TH4yJCl0Y9lRgnyQ==} + engines: {node: '>=14.18.0'} + dependencies: + fs-extra: 8.1.0 + globby: 11.1.0 + jju: 1.4.0 + read-yaml-file: 1.1.0 + dev: false + + /@mapbox/node-pre-gyp@1.0.11: + resolution: {integrity: sha512-Yhlar6v9WQgUp/He7BdgzOz8lqMQ8sU+jkCq7Wx8Myc5YFJLbEe7lgui/V7G1qB1DJykHSGwreceSaD60Y0PUQ==} + hasBin: true + dependencies: + detect-libc: 2.0.2 + https-proxy-agent: 5.0.1 + make-dir: 3.1.0 + node-fetch: 2.6.7 + nopt: 5.0.0 + npmlog: 5.0.1 + rimraf: 3.0.2 + semver: 7.5.4 + tar: 6.1.15 + transitivePeerDependencies: + - encoding + - supports-color + dev: false + + /@mdx-js/react@2.3.0(react@18.2.0): + resolution: {integrity: sha512-zQH//gdOmuu7nt2oJR29vFhDv88oGPmVw6BggmrHeMI+xgEkp1B2dX9/bMBSYtK0dyLX/aOmesKS09g222K1/g==} + peerDependencies: + react: '>=16' + dependencies: + '@types/mdx': 2.0.5 + '@types/react': 18.2.14 + react: 18.2.0 + dev: true + + /@mswjs/cookies@0.2.2: + resolution: {integrity: sha512-mlN83YSrcFgk7Dm1Mys40DLssI1KdJji2CMKN8eOlBqsTADYzj2+jWzsANsUTFbxDMWPD5e9bfA1RGqBpS3O1g==} + engines: {node: '>=14'} + dependencies: + '@types/set-cookie-parser': 2.4.2 + set-cookie-parser: 2.6.0 + dev: false + + /@mswjs/interceptors@0.17.7: + resolution: {integrity: sha512-dPInyLEF6ybLxfKGY99euI+mbT6ls4PVO9qPgGIsRk3+2VZVfT7fo9Sq6Q8eKT9W38QtUyhG74hN7xMtKWioGw==} + engines: {node: '>=14'} + dependencies: + '@open-draft/until': 1.0.3 + '@types/debug': 4.1.7 + '@xmldom/xmldom': 0.8.6 + debug: 4.3.4 + headers-polyfill: 3.1.2 + outvariant: 1.4.0 + strict-event-emitter: 0.2.8 + web-encoding: 1.1.5 + transitivePeerDependencies: + - supports-color + dev: false + + /@ndelangen/get-tarball@3.0.9: + resolution: {integrity: sha512-9JKTEik4vq+yGosHYhZ1tiH/3WpUS0Nh0kej4Agndhox8pAdWhEx5knFVRcb/ya9knCRCs1rPxNrSXTDdfVqpA==} + dependencies: + gunzip-maybe: 1.4.2 + pump: 3.0.0 + tar-fs: 2.1.1 + dev: true + + /@nodelib/fs.scandir@2.1.5: + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.2.0 + + /@nodelib/fs.stat@2.0.5: + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + + /@nodelib/fs.walk@1.2.8: + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.13.0 + + /@open-draft/until@1.0.3: + resolution: {integrity: sha512-Aq58f5HiWdyDlFffbbSjAlv596h/cOnt2DO1w3DOC7OJ5EHs0hd/nycJfiu9RJbT6Yk6F1knnRRXNSpxoIVZ9Q==} + dev: false + + /@playwright/experimental-ct-core@1.37.1(@types/node@20.3.1)(less@4.1.3): + resolution: {integrity: sha512-3OmhDzrIAcVQlyVHmyaVtzcht/OUEH47uvwPh/rQmz/CrvB6zHMxH/7dIo6SCswMbCFkACepbqGmTW4HjW+yeQ==} + engines: {node: '>=16'} + hasBin: true + dependencies: + '@playwright/test': 1.37.1 + playwright-core: 1.37.1 + vite: 4.3.9(@types/node@20.3.1)(less@4.1.3) + transitivePeerDependencies: + - '@types/node' + - less + - sass + - stylus + - sugarss + - terser + dev: false + + /@playwright/experimental-ct-svelte@1.37.1(@types/node@20.3.1)(less@4.1.3)(svelte@4.0.0)(vite@4.3.9): + resolution: {integrity: sha512-UwdO4uyDkr57uh1NWe04PFuH0QKnFvb9KioY20pLswt9K1DfTemY1ql/NJFg8cWR8kZlYTxhbazFQv6iAi3JFQ==} + engines: {node: '>=16'} + hasBin: true + dependencies: + '@playwright/experimental-ct-core': 1.37.1(@types/node@20.3.1)(less@4.1.3) + '@sveltejs/vite-plugin-svelte': 2.4.2(svelte@4.0.0)(vite@4.3.9) + transitivePeerDependencies: + - '@types/node' + - less + - sass + - stylus + - sugarss + - supports-color + - svelte + - terser + - vite + dev: false + + /@playwright/test@1.37.1: + resolution: {integrity: sha512-bq9zTli3vWJo8S3LwB91U0qDNQDpEXnw7knhxLM0nwDvexQAwx9tO8iKDZSqqneVq+URd/WIoz+BALMqUTgdSg==} + engines: {node: '>=16'} + hasBin: true + dependencies: + '@types/node': 20.3.2 + playwright-core: 1.37.1 + optionalDependencies: + fsevents: 2.3.2 + dev: false + + /@polka/url@1.0.0-next.21: + resolution: {integrity: sha512-a5Sab1C4/icpTZVzZc5Ghpz88yQtGOyNqYXcZgOssB2uuAr+wF/MvN6bgtW32q7HHrvBki+BsZ0OuNv6EV3K9g==} + + /@rollup/plugin-json@6.0.0: + resolution: {integrity: sha512-i/4C5Jrdr1XUarRhVu27EEwjt4GObltD7c+MkCIpO2QIbojw8MUs+CCTqOphQi3Qtg1FLmYt+l+6YeoIf51J7w==} + engines: {node: '>=14.0.0'} + peerDependencies: + rollup: ^1.20.0||^2.0.0||^3.0.0 + peerDependenciesMeta: + rollup: + optional: true + dependencies: + '@rollup/pluginutils': 5.0.2 + dev: false + + /@rollup/pluginutils@4.2.1: + resolution: {integrity: sha512-iKnFXr7NkdZAIHiIWE+BX5ULi/ucVFYWD6TbAV+rZctiRTY2PL6tsIKhoIOaoskiWAkgu+VsbXgUVDNLHf+InQ==} + engines: {node: '>= 8.0.0'} + dependencies: + estree-walker: 2.0.2 + picomatch: 2.3.1 + dev: false + + /@rollup/pluginutils@5.0.2: + resolution: {integrity: sha512-pTd9rIsP92h+B6wWwFbW8RkZv4hiR/xKsqre4SIuAOaOEQRxi0lqLke9k2/7WegC85GgUs9pjmOjCUi3In4vwA==} + engines: {node: '>=14.0.0'} + peerDependencies: + rollup: ^1.20.0||^2.0.0||^3.0.0 + peerDependenciesMeta: + rollup: + optional: true + dependencies: + '@types/estree': 1.0.0 + estree-walker: 2.0.2 + picomatch: 2.3.1 + dev: false + + /@sinclair/typebox@0.25.24: + resolution: {integrity: sha512-XJfwUVUKDHF5ugKwIcxEgc9k8b7HbznCp6eUfWgu710hMPNIO4aw4/zB5RogDQz8nd6gyCDpU9O/m6qYEWY6yQ==} + + /@sindresorhus/slugify@2.2.0: + resolution: {integrity: sha512-9Vybc/qX8Kj6pxJaapjkFbiUJPk7MAkCh/GFCxIBnnsuYCFPIXKvnLidG8xlepht3i24L5XemUmGtrJ3UWrl6w==} + engines: {node: '>=12'} + dependencies: + '@sindresorhus/transliterate': 1.6.0 + escape-string-regexp: 5.0.0 + dev: false + + /@sindresorhus/transliterate@1.6.0: + resolution: {integrity: sha512-doH1gimEu3A46VX6aVxpHTeHrytJAG6HgdxntYnCFiIFHEM/ZGpG8KiZGBChchjQmG0XFIBL552kBTjVcMZXwQ==} + engines: {node: '>=12'} + dependencies: + escape-string-regexp: 5.0.0 + dev: false + + /@storybook/addon-a11y@7.0.24(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-5HkQhen9lyp5equRKfr4ap3+wUSWby8NMEnWedCSeghsjrwUq3tuXxYLMrCx00WlJmJTduEyJEM+tQeoPSGAzA==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + '@storybook/addon-highlight': 7.0.24 + '@storybook/channels': 7.0.24 + '@storybook/client-logger': 7.0.24 + '@storybook/components': 7.0.24(react-dom@18.2.0)(react@18.2.0) + '@storybook/core-events': 7.0.24 + '@storybook/global': 5.0.0 + '@storybook/manager-api': 7.0.24(react-dom@18.2.0)(react@18.2.0) + '@storybook/preview-api': 7.0.24 + '@storybook/theming': 7.0.24(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.24 + axe-core: 4.7.2 + lodash: 4.17.21 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + react-resize-detector: 7.1.2(react-dom@18.2.0)(react@18.2.0) + dev: true + + /@storybook/addon-actions@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-xsLUZez6fzHc+be8BypVO5aA7kjeH9jymLAib68SSQoF0GQry7mb/fhumifQno2BKfCyCw++lYqLHzwV0EISxg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + '@storybook/client-logger': 7.0.23 + '@storybook/components': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/core-events': 7.0.23 + '@storybook/global': 5.0.0 + '@storybook/manager-api': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/preview-api': 7.0.23 + '@storybook/theming': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.23 + dequal: 2.0.3 + lodash: 4.17.21 + polished: 4.2.2 + prop-types: 15.8.1 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + react-inspector: 6.0.2(react@18.2.0) + telejson: 7.1.0 + ts-dedent: 2.2.0 + uuid: 9.0.0 + dev: true + + /@storybook/addon-backgrounds@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-6zlLKnAbcaBbLgADylhhih7uma4FLisjgUjY/wpPlqhx/9pEWp7tUoYcGkAADnrN97+70g43VxL6mElKnGtZeA==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + '@storybook/client-logger': 7.0.23 + '@storybook/components': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/core-events': 7.0.23 + '@storybook/global': 5.0.0 + '@storybook/manager-api': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/preview-api': 7.0.23 + '@storybook/theming': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.23 + memoizerific: 1.11.3 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + ts-dedent: 2.2.0 + dev: true + + /@storybook/addon-controls@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-G6DQwaLCqxnDtiG5qtnJWLD3MkMYjC0Ki9uye5kXCIoPcM52NV1/NQQtfhvzFpwNX3QiQvo7Za2g7/RLhd2z5w==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + '@storybook/blocks': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/client-logger': 7.0.23 + '@storybook/components': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/core-common': 7.0.23 + '@storybook/manager-api': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/node-logger': 7.0.23 + '@storybook/preview-api': 7.0.23 + '@storybook/theming': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.23 + lodash: 4.17.21 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + ts-dedent: 2.2.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@storybook/addon-docs@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-BD4F5uCE4VND5Z3UQ9xF+q3qy6MHTxTMgNMVfcBc4TM8gCuFyuuiOl0sxW3Ap6YdWEFfvzE822RGMk5IlD6UWA==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@babel/core': 7.22.5 + '@babel/plugin-transform-react-jsx': 7.22.5(@babel/core@7.22.5) + '@jest/transform': 29.5.0 + '@mdx-js/react': 2.3.0(react@18.2.0) + '@storybook/blocks': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/client-logger': 7.0.23 + '@storybook/components': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/csf-plugin': 7.0.23 + '@storybook/csf-tools': 7.0.23 + '@storybook/global': 5.0.0 + '@storybook/mdx2-csf': 1.1.0 + '@storybook/node-logger': 7.0.23 + '@storybook/postinstall': 7.0.23 + '@storybook/preview-api': 7.0.23 + '@storybook/react-dom-shim': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/theming': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.23 + fs-extra: 11.1.1 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + remark-external-links: 8.0.0 + remark-slug: 6.1.0 + ts-dedent: 2.2.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@storybook/addon-essentials@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-t4ChTrsd+ctKjmhy6TLsOPmPPzkPjCSP3yVDSW8pOzHsSxfFUa7qSu89Kb9zYrwEDwXxiAie1KIRZE3smUeD0A==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@storybook/addon-actions': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/addon-backgrounds': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/addon-controls': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/addon-docs': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/addon-highlight': 7.0.23 + '@storybook/addon-measure': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/addon-outline': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/addon-toolbars': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/addon-viewport': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/core-common': 7.0.23 + '@storybook/manager-api': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/node-logger': 7.0.23 + '@storybook/preview-api': 7.0.23 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + ts-dedent: 2.2.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@storybook/addon-highlight@7.0.23: + resolution: {integrity: sha512-/qO4VM8CeoUG3ivgki4FtJyEMRzLxJFkeWETaUegReh+n6uaOUeYrJYZr5ES/k0Ily0HikQYdkn/m7JZGQ6VIw==} + dependencies: + '@storybook/core-events': 7.0.23 + '@storybook/global': 5.0.0 + '@storybook/preview-api': 7.0.23 + dev: true + + /@storybook/addon-highlight@7.0.24: + resolution: {integrity: sha512-IoCJHiX5Ai+7S08isxt7BH4baNF2RsjuGUA/iMoJtto/rMc5u0xftVeIjh6oVqV3tjckowXpezI3oStnrLWuRw==} + dependencies: + '@storybook/core-events': 7.0.24 + '@storybook/global': 5.0.0 + '@storybook/preview-api': 7.0.24 + dev: true + + /@storybook/addon-interactions@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-NBI7ejUO/YmjQRRRopD3tuA+87fq5BRwTINbs17AkaEjO84xQ+G1rTixQZ18PkLpO65OlmuDeGIrbfbt8hrmcA==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + '@storybook/client-logger': 7.0.23 + '@storybook/components': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/core-common': 7.0.23 + '@storybook/core-events': 7.0.23 + '@storybook/global': 5.0.0 + '@storybook/instrumenter': 7.0.23 + '@storybook/manager-api': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/preview-api': 7.0.23 + '@storybook/theming': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.23 + jest-mock: 27.5.1 + polished: 4.2.2 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + ts-dedent: 2.2.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@storybook/addon-links@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-e95Y7oVCjsECh8XEs6+SWZtUz+cfUDNuF1mty4/6/d03H8HraWXgUSOfTRhRj+Q076CNcIh7IcqqNgeMxvGdKA==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + '@storybook/client-logger': 7.0.23 + '@storybook/core-events': 7.0.23 + '@storybook/csf': 0.1.1 + '@storybook/global': 5.0.0 + '@storybook/manager-api': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/preview-api': 7.0.23 + '@storybook/router': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.23 + prop-types: 15.8.1 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + ts-dedent: 2.2.0 + dev: true + + /@storybook/addon-measure@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-j0HrykvDdUgjjGjZimtp21cPQuYcOOrq21QijYts4t+hk0xfW396e6ZAUyFK24+oXaPkQBHdlApFHKYAP+p8Eg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + '@storybook/client-logger': 7.0.23 + '@storybook/components': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/core-events': 7.0.23 + '@storybook/global': 5.0.0 + '@storybook/manager-api': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/preview-api': 7.0.23 + '@storybook/types': 7.0.23 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /@storybook/addon-outline@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-NQsmHaAnqAH0Lus+54s3702491APXmDgKjiaIBgBKhoJt5cLiJ7er6nvGA1ntAgU7FCMrTMZaoV7UDnO45K9vg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + '@storybook/client-logger': 7.0.23 + '@storybook/components': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/core-events': 7.0.23 + '@storybook/global': 5.0.0 + '@storybook/manager-api': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/preview-api': 7.0.23 + '@storybook/types': 7.0.23 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + ts-dedent: 2.2.0 + dev: true + + /@storybook/addon-styling@1.3.1(less@4.1.3)(postcss@8.4.27)(react-dom@18.2.0)(react@18.2.0)(webpack@5.88.1): + resolution: {integrity: sha512-5ofDihi4LxOGXFFIU5D7eGLKtK8wzNH19h58QFa1w8kCrZmARYYjiZXVB0bJrMQxU9TMy+B6aOg04vV+IGX2OA==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + '@babel/template': 7.22.5 + '@babel/types': 7.22.5 + '@storybook/api': 7.0.25(react-dom@18.2.0)(react@18.2.0) + '@storybook/components': 7.0.25(react-dom@18.2.0)(react@18.2.0) + '@storybook/core-common': 7.0.25 + '@storybook/core-events': 7.0.25 + '@storybook/csf-tools': 7.0.25 + '@storybook/manager-api': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/node-logger': 7.0.25 + '@storybook/preview-api': 7.0.25 + '@storybook/theming': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.25 + css-loader: 6.8.1(webpack@5.88.1) + less-loader: 11.1.3(less@4.1.3)(webpack@5.88.1) + postcss-loader: 7.3.3(postcss@8.4.27)(webpack@5.88.1) + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + recast: 0.23.2 + resolve-url-loader: 5.0.0 + sass-loader: 13.3.2(webpack@5.88.1) + style-loader: 3.3.3(webpack@5.88.1) + transitivePeerDependencies: + - encoding + - fibers + - less + - node-sass + - postcss + - sass + - sass-embedded + - supports-color + - webpack + dev: true + + /@storybook/addon-svelte-csf@4.0.0(@storybook/svelte@7.0.23)(@storybook/theming@7.0.23)(@sveltejs/vite-plugin-svelte@2.4.2)(svelte@4.0.0)(vite@4.3.9): + resolution: {integrity: sha512-2f3QjbSDZ3/eu7ny7QctNLAmee5JOEkhLrYjdpFkHnuFo46nCII6BqkFVq5Jm3NkrwqWp+p6kgjpNNKQqxwG8A==} + peerDependencies: + '@storybook/svelte': ^7.0.0 + '@storybook/theming': ^7.0.0 + '@sveltejs/vite-plugin-svelte': ^2.0.0 + svelte: ^4.0.0 + svelte-loader: ^3.1.2 + vite: ^4.0.0 + peerDependenciesMeta: + '@sveltejs/vite-plugin-svelte': + optional: true + svelte-loader: + optional: true + vite: + optional: true + dependencies: + '@babel/runtime': 7.22.6 + '@storybook/svelte': 7.0.23(svelte@4.0.0) + '@storybook/theming': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@sveltejs/vite-plugin-svelte': 2.4.2(svelte@4.0.0)(vite@4.3.9) + dedent: 1.5.1 + fs-extra: 11.1.1 + magic-string: 0.30.1 + svelte: 4.0.0 + vite: 4.3.9(@types/node@20.3.1)(less@4.1.3) + transitivePeerDependencies: + - babel-plugin-macros + dev: true + + /@storybook/addon-toolbars@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-o5X6XY480gmhrRb0aNScMrTbSdizoE7yIvJDuWEe6JCgToKUr0bG7xpa8OpOYcC17yIz69eRwqZjhqDRv57nQQ==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + '@storybook/client-logger': 7.0.23 + '@storybook/components': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/manager-api': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/preview-api': 7.0.23 + '@storybook/theming': 7.0.23(react-dom@18.2.0)(react@18.2.0) + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /@storybook/addon-viewport@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-xeSFieRZNKwj44qMKEheQ9staEc+rvlwLeVaSfJHviLOr8Jq8sn6aWZr/1rn9YwT50H/s1o+Kt1h0jDOLQANyw==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + '@storybook/client-logger': 7.0.23 + '@storybook/components': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/core-events': 7.0.23 + '@storybook/global': 5.0.0 + '@storybook/manager-api': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/preview-api': 7.0.23 + '@storybook/theming': 7.0.23(react-dom@18.2.0)(react@18.2.0) + memoizerific: 1.11.3 + prop-types: 15.8.1 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /@storybook/api@7.0.25(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-EgNUz49ObMTkQvCgxLqHf8wAFwv8B4y23RKXgl7q/HYA+jSWc5SZiNLleNxme7GqCo0qh+wjeU0luPv/A426WA==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + '@storybook/client-logger': 7.0.25 + '@storybook/manager-api': 7.0.25(react-dom@18.2.0)(react@18.2.0) + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /@storybook/blocks@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-yhdff1m+SY90g+52745h/x6r0uDwKHoMffhjttKTSSKhsHOnvHCaslpPBHsxDxsPNGLrjUT+ueK/GSwKJUJmLA==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@storybook/channels': 7.0.23 + '@storybook/client-logger': 7.0.23 + '@storybook/components': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/core-events': 7.0.23 + '@storybook/csf': 0.1.1 + '@storybook/docs-tools': 7.0.23 + '@storybook/global': 5.0.0 + '@storybook/manager-api': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/preview-api': 7.0.23 + '@storybook/theming': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.23 + '@types/lodash': 4.14.195 + color-convert: 2.0.1 + dequal: 2.0.3 + lodash: 4.17.21 + markdown-to-jsx: 7.2.1(react@18.2.0) + memoizerific: 1.11.3 + polished: 4.2.2 + react: 18.2.0 + react-colorful: 5.6.1(react-dom@18.2.0)(react@18.2.0) + react-dom: 18.2.0(react@18.2.0) + telejson: 7.1.0 + ts-dedent: 2.2.0 + util-deprecate: 1.0.2 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@storybook/builder-manager@7.0.23: + resolution: {integrity: sha512-um0+fhOX9ai25YMuMEDzFKSZDzYKof2e/DKPOziZoxUeDuJasiAX/i4CChLqkk94NJKQXB/QAFHhbJ0ei/wnxA==} + dependencies: + '@fal-works/esbuild-plugin-global-externals': 2.1.2 + '@storybook/core-common': 7.0.23 + '@storybook/manager': 7.0.23 + '@storybook/node-logger': 7.0.23 + '@types/ejs': 3.1.2 + '@types/find-cache-dir': 3.2.1 + '@yarnpkg/esbuild-plugin-pnp': 3.0.0-rc.15(esbuild@0.17.14) + browser-assert: 1.2.1 + ejs: 3.1.9 + esbuild: 0.17.14 + esbuild-plugin-alias: 0.2.1 + express: 4.18.2 + find-cache-dir: 3.3.2 + fs-extra: 11.1.1 + process: 0.11.10 + util: 0.12.5 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@storybook/builder-vite@7.0.23(typescript@5.1.3)(vite@4.3.9): + resolution: {integrity: sha512-2RY0BzXQ5TxwwDwJsTjxxIaCBJlyybfgn/hU7EkRRSaLvJhaUfHVJHoYrbYA2EFOUufRXmvrf/c138D97air7w==} + peerDependencies: + '@preact/preset-vite': '*' + typescript: '>= 4.3.x' + vite: ^3.0.0 || ^4.0.0 + vite-plugin-glimmerx: '*' + peerDependenciesMeta: + '@preact/preset-vite': + optional: true + typescript: + optional: true + vite-plugin-glimmerx: + optional: true + dependencies: + '@storybook/channel-postmessage': 7.0.23 + '@storybook/channel-websocket': 7.0.23 + '@storybook/client-logger': 7.0.23 + '@storybook/core-common': 7.0.23 + '@storybook/csf-plugin': 7.0.23 + '@storybook/mdx2-csf': 1.1.0 + '@storybook/node-logger': 7.0.23 + '@storybook/preview': 7.0.23 + '@storybook/preview-api': 7.0.23 + '@storybook/types': 7.0.23 + browser-assert: 1.2.1 + es-module-lexer: 0.9.3 + express: 4.18.2 + fs-extra: 11.1.1 + glob: 8.1.0 + glob-promise: 6.0.3(glob@8.1.0) + magic-string: 0.27.0 + remark-external-links: 8.0.0 + remark-slug: 6.1.0 + rollup: 3.21.6 + typescript: 5.1.3 + vite: 4.3.9(@types/node@20.3.1)(less@4.1.3) + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@storybook/channel-postmessage@7.0.23: + resolution: {integrity: sha512-SfXTV55Z9U5rN1OuyR56s+PUpav3b4SgXtP67bnNsrv7dkKhBwr0DUUJogIRnjmY0Loy/hLvJ23kfmKXPWC4vQ==} + dependencies: + '@storybook/channels': 7.0.23 + '@storybook/client-logger': 7.0.23 + '@storybook/core-events': 7.0.23 + '@storybook/global': 5.0.0 + qs: 6.11.2 + telejson: 7.1.0 + dev: true + + /@storybook/channel-postmessage@7.0.24: + resolution: {integrity: sha512-QLtLXjEeTEwBN/7pB888mBaykmRU9Jy2BitvZuLJWyHHygTYm3vYZOaGR37DT+q/6Ob5GaZ0tURZmCSNDe8IIA==} + dependencies: + '@storybook/channels': 7.0.24 + '@storybook/client-logger': 7.0.24 + '@storybook/core-events': 7.0.24 + '@storybook/global': 5.0.0 + qs: 6.11.2 + telejson: 7.1.0 + dev: true + + /@storybook/channel-postmessage@7.0.25: + resolution: {integrity: sha512-h4AHsgaGNcTJD8gzHAOAA6L9oxg4fVOw0LVO1L6Jd0CJ0jDV1jc3UPqh+i3bkTOB/4xjaT5xSr1h+eCUAA+N+w==} + dependencies: + '@storybook/channels': 7.0.25 + '@storybook/client-logger': 7.0.25 + '@storybook/core-events': 7.0.25 + '@storybook/global': 5.0.0 + qs: 6.11.2 + telejson: 7.1.0 + dev: true + + /@storybook/channel-websocket@7.0.23: + resolution: {integrity: sha512-xjY09pOaE5T5TgC41V3fezzqdrL+aPjiW0q4H/CrPF9Oa87hHBZq2dmq1TU5Wd4GFrW/OHqo2rGemS/bXh8mNg==} + dependencies: + '@storybook/channels': 7.0.23 + '@storybook/client-logger': 7.0.23 + '@storybook/global': 5.0.0 + telejson: 7.1.0 + dev: true + + /@storybook/channels@7.0.23: + resolution: {integrity: sha512-cCxR3Z84YQjsVMPgFTI+kDVNOlgXSDakwjkNFBznU+s2qhGW5eZt2g9YRDeVDQ6AjR4j4RrGhwddRq4lQZF2pg==} + dev: true + + /@storybook/channels@7.0.24: + resolution: {integrity: sha512-NZVLwMhtzy6cZrNRjshFvMAD9mQTmJDNwhohodSkM/YFCDVFhmxQk9tgizVGh9MwY3CYGJ1SI96RUejGosb49Q==} + dev: true + + /@storybook/channels@7.0.25: + resolution: {integrity: sha512-FLuXysj0uHBQNHpfiggtyaV0EFCMVWgEQjJLeBysqB/+sBCtpjrD7kUKrgJFF+N/IEhJq/dlWt7jOpxT2bffQA==} + dev: true + + /@storybook/cli@7.0.23: + resolution: {integrity: sha512-6os+7rQN/Bx89bOgx/Ju+n0WXi2BN+eBIyvPJrZ7r5tl389lqL7IKHJFYmQ/FnIzhGvwuUxmoSq5niCt2Hvc3w==} + hasBin: true + dependencies: + '@babel/core': 7.22.5 + '@babel/preset-env': 7.22.5(@babel/core@7.22.5) + '@ndelangen/get-tarball': 3.0.9 + '@storybook/codemod': 7.0.23 + '@storybook/core-common': 7.0.23 + '@storybook/core-server': 7.0.23 + '@storybook/csf-tools': 7.0.23 + '@storybook/node-logger': 7.0.23 + '@storybook/telemetry': 7.0.23 + '@storybook/types': 7.0.23 + '@types/semver': 7.5.0 + chalk: 4.1.2 + commander: 6.2.1 + cross-spawn: 7.0.3 + detect-indent: 6.1.0 + envinfo: 7.10.0 + execa: 5.1.1 + express: 4.18.2 + find-up: 5.0.0 + fs-extra: 11.1.1 + get-npm-tarball-url: 2.0.3 + get-port: 5.1.1 + giget: 1.1.2 + globby: 11.1.0 + jscodeshift: 0.14.0(@babel/preset-env@7.22.5) + leven: 3.1.0 + ora: 5.4.1 + prettier: 2.8.1 + prompts: 2.4.2 + puppeteer-core: 2.1.1 + read-pkg-up: 7.0.1 + semver: 7.4.0 + shelljs: 0.8.5 + simple-update-notifier: 1.1.0 + strip-json-comments: 3.1.1 + tempy: 1.0.1 + ts-dedent: 2.2.0 + util-deprecate: 1.0.2 + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - utf-8-validate + dev: true + + /@storybook/client-logger@7.0.23: + resolution: {integrity: sha512-L287SRO8EaYOxTpryV7N/1WCL5I1IFs5Naiq3FpybhguUP7F3Si7KWvVdFmSW06K9jNj2IEQ/8zBRM8ra4ttyg==} + dependencies: + '@storybook/global': 5.0.0 + dev: true + + /@storybook/client-logger@7.0.24: + resolution: {integrity: sha512-4zRTb+QQ1hWaRqad/UufZNRfi2d/cf5a40My72Ct97VwjhJFE6aQ3K+hl1Xt6hh8dncDL2JK3cgziw6ElqjT0w==} + dependencies: + '@storybook/global': 5.0.0 + dev: true + + /@storybook/client-logger@7.0.25: + resolution: {integrity: sha512-jPUH38qA+FFiEkeA6vzo2Uq2tbgJolII2nKcKw6K6KMSv+/lJNElQ4extEvXHjmPe7TqPIZVDlh8QMh5DlZO5Q==} + dependencies: + '@storybook/global': 5.0.0 + dev: true + + /@storybook/codemod@7.0.23: + resolution: {integrity: sha512-Jr1UmOT4h/0Cst1a6xOIxCstN7arJYdQPvcmnM9QUqYjVpJ65y8ASANinyD27xZS8pshJ38z4pPzZCFE+YVP3Q==} + dependencies: + '@babel/core': 7.21.8 + '@babel/preset-env': 7.21.5(@babel/core@7.21.8) + '@babel/types': 7.21.5 + '@storybook/csf': 0.1.1 + '@storybook/csf-tools': 7.0.23 + '@storybook/node-logger': 7.0.23 + '@storybook/types': 7.0.23 + cross-spawn: 7.0.3 + globby: 11.1.0 + jscodeshift: 0.14.0(@babel/preset-env@7.21.5) + lodash: 4.17.21 + prettier: 2.8.1 + recast: 0.23.2 + transitivePeerDependencies: + - supports-color + dev: true + + /@storybook/components@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-nEMWjqL34uDzQsHM/MJQt6IoeVzbyONeS14UsS/WKTVpnQvxYLeZAg/kyMwZsl28U25na3d+EhZKv/0mWXw5Nw==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@storybook/client-logger': 7.0.23 + '@storybook/csf': 0.1.1 + '@storybook/global': 5.0.0 + '@storybook/theming': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.23 + memoizerific: 1.11.3 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + use-resize-observer: 9.1.0(react-dom@18.2.0)(react@18.2.0) + util-deprecate: 1.0.2 + dev: true + + /@storybook/components@7.0.24(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-Pu7zGurCyWyiuFl2Pb5gybHA0f4blmHuVqccbMqnUw4Ew80BRu8AqfhNqN2hNdxFCx0mmy0baRGVftx76rNZ0w==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@storybook/client-logger': 7.0.24 + '@storybook/csf': 0.1.1 + '@storybook/global': 5.0.0 + '@storybook/theming': 7.0.24(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.24 + memoizerific: 1.11.3 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + use-resize-observer: 9.1.0(react-dom@18.2.0)(react@18.2.0) + util-deprecate: 1.0.2 + dev: true + + /@storybook/components@7.0.25(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-eY6R8P7HRisamVed/HwsgLerhDvL3UKdg9KsgBMoGLc7//lC2Zf9qYnDSMWutCdXJh0Te+gJS/i4Jv63YP3mDQ==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@storybook/client-logger': 7.0.25 + '@storybook/csf': 0.1.1 + '@storybook/global': 5.0.0 + '@storybook/theming': 7.0.25(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.25 + memoizerific: 1.11.3 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + use-resize-observer: 9.1.0(react-dom@18.2.0)(react@18.2.0) + util-deprecate: 1.0.2 + dev: true + + /@storybook/core-client@7.0.23: + resolution: {integrity: sha512-YKZvUtFl0DH4xq6GkrYTx9UXfJoNlh6ZiybBXkD0eRi2cEo/EFKM6w5IIXYuyfn8uogBX1cUo61FrcRNulS5bw==} + dependencies: + '@storybook/client-logger': 7.0.23 + '@storybook/preview-api': 7.0.23 + dev: true + + /@storybook/core-common@7.0.23: + resolution: {integrity: sha512-2W87Z9I0ObEMQkGVPMvgB3I5lWkqqkQDkfIbfoc717+DO3Lqgg/CGy5WL7+v2xVlzfoUnYIeXgkeAwDPDrDyMA==} + dependencies: + '@storybook/node-logger': 7.0.23 + '@storybook/types': 7.0.23 + '@types/node': 16.18.38 + '@types/node-fetch': 2.6.4 + '@types/pretty-hrtime': 1.0.1 + chalk: 4.1.2 + esbuild: 0.17.14 + esbuild-register: 3.4.2(esbuild@0.17.14) + file-system-cache: 2.4.1 + find-up: 5.0.0 + fs-extra: 11.1.1 + glob: 8.1.0 + glob-promise: 6.0.3(glob@8.1.0) + handlebars: 4.7.7 + lazy-universal-dotenv: 4.0.0 + node-fetch: 2.6.7 + picomatch: 2.3.1 + pkg-dir: 5.0.0 + pretty-hrtime: 1.0.3 + resolve-from: 5.0.0 + ts-dedent: 2.2.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@storybook/core-common@7.0.25: + resolution: {integrity: sha512-ohnYWhsDgOC23CMFeRlR4OM/Y5l7qq0wQiM3KtCYqFUnRwNkHZJ+rj7s6wkKKGUDy4pebvApeW7HFm/sLWaJgw==} + dependencies: + '@storybook/node-logger': 7.0.25 + '@storybook/types': 7.0.25 + '@types/node': 16.18.38 + '@types/node-fetch': 2.6.4 + '@types/pretty-hrtime': 1.0.1 + chalk: 4.1.2 + esbuild: 0.17.14 + esbuild-register: 3.4.2(esbuild@0.17.14) + file-system-cache: 2.3.0 + find-up: 5.0.0 + fs-extra: 11.1.1 + glob: 8.1.0 + glob-promise: 6.0.3(glob@8.1.0) + handlebars: 4.7.7 + lazy-universal-dotenv: 4.0.0 + node-fetch: 2.6.7 + picomatch: 2.3.1 + pkg-dir: 5.0.0 + pretty-hrtime: 1.0.3 + resolve-from: 5.0.0 + ts-dedent: 2.2.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@storybook/core-events@7.0.23: + resolution: {integrity: sha512-Hdt18p/qbgJc+1wY2dGcdjmlsuNXWsoLTaXrjInuvr1U0kmKmKs0VMaB0cFubnUgCmB3YQWTGnVr3q8iz9iB7g==} + dev: true + + /@storybook/core-events@7.0.24: + resolution: {integrity: sha512-xkf/rihCkhqMeh5EA8lVp90/mzbb2gcg6I3oeFWw2hognVcTnPXg6llhWdU4Spqd0cals7GEFmQugIILCmH8GA==} + dev: true + + /@storybook/core-events@7.0.25: + resolution: {integrity: sha512-abM0M+H19eZu0dRK+/2PB0W9b7xXFhiPddXpFCjIfJQFGPIwGBWVAFot1bKR5Mu4IB9OftkJYMRtYEEBrNep3A==} + dev: true + + /@storybook/core-server@7.0.23: + resolution: {integrity: sha512-xHt2WB2kL7VQIxYgtE1TDjd4WEvyqlaf256L3RbuQVGZ/AkuFUEV60FULimM6V+/DyF83hGZTREkjovI+Mb16w==} + dependencies: + '@aw-web-design/x-default-browser': 1.4.88 + '@discoveryjs/json-ext': 0.5.7 + '@storybook/builder-manager': 7.0.23 + '@storybook/core-common': 7.0.23 + '@storybook/core-events': 7.0.23 + '@storybook/csf': 0.1.1 + '@storybook/csf-tools': 7.0.23 + '@storybook/docs-mdx': 0.1.0 + '@storybook/global': 5.0.0 + '@storybook/manager': 7.0.23 + '@storybook/node-logger': 7.0.23 + '@storybook/preview-api': 7.0.23 + '@storybook/telemetry': 7.0.23 + '@storybook/types': 7.0.23 + '@types/detect-port': 1.3.3 + '@types/node': 16.18.38 + '@types/node-fetch': 2.6.4 + '@types/pretty-hrtime': 1.0.1 + '@types/semver': 7.5.0 + better-opn: 2.1.1 + chalk: 4.1.2 + cli-table3: 0.6.3 + compression: 1.7.4 + detect-port: 1.5.1 + express: 4.18.2 + fs-extra: 11.1.1 + globby: 11.1.0 + ip: 2.0.0 + lodash: 4.17.21 + node-fetch: 2.6.7 + open: 8.4.2 + pretty-hrtime: 1.0.3 + prompts: 2.4.2 + read-pkg-up: 7.0.1 + semver: 7.4.0 + serve-favicon: 2.5.0 + telejson: 7.1.0 + ts-dedent: 2.2.0 + util-deprecate: 1.0.2 + watchpack: 2.4.0 + ws: 8.13.0(bufferutil@4.0.7) + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - utf-8-validate + dev: true + + /@storybook/csf-plugin@7.0.23: + resolution: {integrity: sha512-hKlCkZ8NONqRfzt5rdyQznnf/jMbbUF3h8mLxs1nYSevqH8CaHH9w8dYW2y67hyzT7Wt050bqRO2YH8ZQG/VVA==} + dependencies: + '@storybook/csf-tools': 7.0.23 + unplugin: 0.10.2 + transitivePeerDependencies: + - supports-color + dev: true + + /@storybook/csf-tools@7.0.23: + resolution: {integrity: sha512-fCRmI/UduL7/Bhz4Ww8pn+dHqU/qCaZTcigxQSeWm3OpTUpHzbFwVLXLr/ZnL4ofS+AWa5FhiZXcMF5TMXWXLw==} + dependencies: + '@babel/generator': 7.21.9 + '@babel/parser': 7.21.9 + '@babel/traverse': 7.21.5 + '@babel/types': 7.21.5 + '@storybook/csf': 0.1.1 + '@storybook/types': 7.0.23 + fs-extra: 11.1.1 + recast: 0.23.2 + ts-dedent: 2.2.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@storybook/csf-tools@7.0.25: + resolution: {integrity: sha512-ybxHmnQDEoqZZnc1DtsFuRmQG6va3eSo/eZeH6ixzTmuA5QWVx1UE7lA97c1wgbipa17+Jo1hJaMkoMPeKc7ew==} + dependencies: + '@babel/generator': 7.21.9 + '@babel/parser': 7.21.9 + '@babel/traverse': 7.21.5 + '@babel/types': 7.21.5 + '@storybook/csf': 0.1.1 + '@storybook/types': 7.0.25 + fs-extra: 11.1.1 + recast: 0.23.2 + ts-dedent: 2.2.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@storybook/csf@0.1.1: + resolution: {integrity: sha512-4hE3AlNVxR60Wc5KSC68ASYzUobjPqtSKyhV6G+ge0FIXU55N5nTY7dXGRZHQGDBPq+XqchMkIdlkHPRs8nTHg==} + dependencies: + type-fest: 2.19.0 + dev: true + + /@storybook/docs-mdx@0.1.0: + resolution: {integrity: sha512-JDaBR9lwVY4eSH5W8EGHrhODjygPd6QImRbwjAuJNEnY0Vw4ie3bPkeGfnacB3OBW6u/agqPv2aRlR46JcAQLg==} + dev: true + + /@storybook/docs-tools@7.0.23: + resolution: {integrity: sha512-sf0eGmx7ZfFgj/lrSjvDoqOQWRdAk9Os5nuy/rtSyOYLv8Y7+Pwdjn+1cUTs6j/yhgOooC0IweJom0+D40Mkog==} + dependencies: + '@babel/core': 7.22.5 + '@storybook/core-common': 7.0.23 + '@storybook/preview-api': 7.0.23 + '@storybook/types': 7.0.23 + '@types/doctrine': 0.0.3 + doctrine: 3.0.0 + lodash: 4.17.21 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@storybook/global@5.0.0: + resolution: {integrity: sha512-FcOqPAXACP0I3oJ/ws6/rrPT9WGhu915Cg8D02a9YxLo0DE9zI+a9A5gRGvmQ09fiWPukqI8ZAEoQEdWUKMQdQ==} + dev: true + + /@storybook/instrumenter@7.0.23: + resolution: {integrity: sha512-CeV2se64XxccD4L6XFI3cFfEz3/Lcbrvb+T3bZZzGOXO18zH5tN3jXVfAONrz/mU69jL9mbo96hFl51UDtwcAg==} + dependencies: + '@storybook/channels': 7.0.23 + '@storybook/client-logger': 7.0.23 + '@storybook/core-events': 7.0.23 + '@storybook/global': 5.0.0 + '@storybook/preview-api': 7.0.23 + dev: true + + /@storybook/manager-api@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-tvq5+xVkpqWDDnvyoi/sfAR7ZaIu7oiommMtuEt1/mhItn9nv8TXkWbthWUlwRgUrPiJJl2BNSnXMRS+byOAZg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@storybook/channels': 7.0.23 + '@storybook/client-logger': 7.0.23 + '@storybook/core-events': 7.0.23 + '@storybook/csf': 0.1.1 + '@storybook/global': 5.0.0 + '@storybook/router': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/theming': 7.0.23(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.23 + dequal: 2.0.3 + lodash: 4.17.21 + memoizerific: 1.11.3 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + semver: 7.4.0 + store2: 2.14.2 + telejson: 7.1.0 + ts-dedent: 2.2.0 + dev: true + + /@storybook/manager-api@7.0.24(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-cBpgDWq8reFgyrv4fBZlZJQyWYb9cDW0LDe476rWn/29uXNvYMNsHRwveLNgSA8Oy1NdyQCgf4ZgcYvY3wpvMA==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@storybook/channels': 7.0.24 + '@storybook/client-logger': 7.0.24 + '@storybook/core-events': 7.0.24 + '@storybook/csf': 0.1.1 + '@storybook/global': 5.0.0 + '@storybook/router': 7.0.24(react-dom@18.2.0)(react@18.2.0) + '@storybook/theming': 7.0.24(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.24 + dequal: 2.0.3 + lodash: 4.17.21 + memoizerific: 1.11.3 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + semver: 7.4.0 + store2: 2.14.2 + telejson: 7.1.0 + ts-dedent: 2.2.0 + dev: true + + /@storybook/manager-api@7.0.25(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-ZuimeFd9jUlYip1TLa+Q+CoU7xfHk7ZrCeuPSWG3QGodgt0L1gSwRbQnoS8ZN895Y3i0WQX32nnRWwwSWGrjgA==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@storybook/channels': 7.0.25 + '@storybook/client-logger': 7.0.25 + '@storybook/core-events': 7.0.25 + '@storybook/csf': 0.1.1 + '@storybook/global': 5.0.0 + '@storybook/router': 7.0.25(react-dom@18.2.0)(react@18.2.0) + '@storybook/theming': 7.0.25(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.0.25 + dequal: 2.0.3 + lodash: 4.17.21 + memoizerific: 1.11.3 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + semver: 7.4.0 + store2: 2.14.2 + telejson: 7.1.0 + ts-dedent: 2.2.0 + dev: true + + /@storybook/manager@7.0.23: + resolution: {integrity: sha512-D3WIqtzjSY3UOskZhKQ2R7RypPUeqAmsXLKxw2EEEx7iLHgJfKvFeAZ77NCKNOxQsEDjLrjTQH4WjiKEaSpK5Q==} + dev: true + + /@storybook/mdx2-csf@1.1.0: + resolution: {integrity: sha512-TXJJd5RAKakWx4BtpwvSNdgTDkKM6RkXU8GK34S/LhidQ5Pjz3wcnqb0TxEkfhK/ztbP8nKHqXFwLfa2CYkvQw==} + dev: true + + /@storybook/node-logger@7.0.23: + resolution: {integrity: sha512-bVa0LnD0pAI0ZU9cue+hvWiWEli3Gny6ofolaWiOw1W03P5ogUo7gHHw/+Is4Iba7FxD1+W1BXm5oi22xD1x0g==} + dependencies: + '@types/npmlog': 4.1.4 + chalk: 4.1.2 + npmlog: 5.0.1 + pretty-hrtime: 1.0.3 + dev: true + + /@storybook/node-logger@7.0.25: + resolution: {integrity: sha512-ZzglyuEuYgged6xHhupQ3a4N2icvQq+4GL5UXNlMWgSMwthO7yeJERJHavvjjFIukQIF8d5plTy27/LCueIX3g==} + dependencies: + '@types/npmlog': 4.1.4 + chalk: 4.1.2 + npmlog: 5.0.1 + pretty-hrtime: 1.0.3 + dev: true + + /@storybook/postinstall@7.0.23: + resolution: {integrity: sha512-GOVF1MXIRjK8Qx5FjMVoYGlQetJJFjxh75FHb2cm2xxEiIxLpMWOOHkTcsqh2BQzGqi/Bs4IKx2OxMxZazgroQ==} + dev: true + + /@storybook/preview-api@7.0.23: + resolution: {integrity: sha512-kXhDX6gVjQu4Lx4SnCW5Yt5W/TbQofp9SL0paB1ywsJ15xSAPU5KVILe9OWAOba2YUnk7sHux/xDX/gH5RCpVw==} + dependencies: + '@storybook/channel-postmessage': 7.0.23 + '@storybook/channels': 7.0.23 + '@storybook/client-logger': 7.0.23 + '@storybook/core-events': 7.0.23 + '@storybook/csf': 0.1.1 + '@storybook/global': 5.0.0 + '@storybook/types': 7.0.23 + '@types/qs': 6.9.7 + dequal: 2.0.3 + lodash: 4.17.21 + memoizerific: 1.11.3 + qs: 6.11.2 + synchronous-promise: 2.0.17 + ts-dedent: 2.2.0 + util-deprecate: 1.0.2 + dev: true + + /@storybook/preview-api@7.0.24: + resolution: {integrity: sha512-psycU07tuB5nyJvfAJiDN/9e8cjOdJ+5lrCSYC3vPzH86LxADDIN0/8xFb1CaQWcXZsADEFJGpHKWbRhjym5ew==} + dependencies: + '@storybook/channel-postmessage': 7.0.24 + '@storybook/channels': 7.0.24 + '@storybook/client-logger': 7.0.24 + '@storybook/core-events': 7.0.24 + '@storybook/csf': 0.1.1 + '@storybook/global': 5.0.0 + '@storybook/types': 7.0.24 + '@types/qs': 6.9.7 + dequal: 2.0.3 + lodash: 4.17.21 + memoizerific: 1.11.3 + qs: 6.11.2 + synchronous-promise: 2.0.17 + ts-dedent: 2.2.0 + util-deprecate: 1.0.2 + dev: true + + /@storybook/preview-api@7.0.25: + resolution: {integrity: sha512-/KiCKMOFGSc9LaQxuNDEeWqqn/GRROCWeg4wyhm4bsxhd/DsQfTmLaB/rW0+GZpMMZoOfSITkSYETNCPzNhO9g==} + dependencies: + '@storybook/channel-postmessage': 7.0.25 + '@storybook/channels': 7.0.25 + '@storybook/client-logger': 7.0.25 + '@storybook/core-events': 7.0.25 + '@storybook/csf': 0.1.1 + '@storybook/global': 5.0.0 + '@storybook/types': 7.0.25 + '@types/qs': 6.9.7 + dequal: 2.0.3 + lodash: 4.17.21 + memoizerific: 1.11.3 + qs: 6.11.2 + synchronous-promise: 2.0.17 + ts-dedent: 2.2.0 + util-deprecate: 1.0.2 + dev: true + + /@storybook/preview@7.0.23: + resolution: {integrity: sha512-D4oDayFOXqNDLJStbZ35Lc0UAXvzdWiij1IE01wH1mzndlEgR+/1ZEPQfm5Leb5LZd7pWmyYLJqh6m/CCK2uPg==} + dev: true + + /@storybook/react-dom-shim@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-v4jIaDb3SwYmRADuNZDwR/5r0V65zAc+hJlW+8z3FRZ5xN3gGV/3s08VL2xnItmidsneMndz9ECjlaTHvSGOng==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /@storybook/router@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-qZJJJKqcyhTAXRWxGwBlL97BSt/TbWcXNUB1H3Q4ufKrgdrCRuThfr8R8Fir+iggr7vF3QnMQ7rCyPT/yB56/g==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@storybook/client-logger': 7.0.23 + memoizerific: 1.11.3 + qs: 6.11.2 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /@storybook/router@7.0.24(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-SRCV+srCZUbko/V0phVN8jY8ilrxQWWAY/gegwNlIYaNqLJSyYqIj739VDmX+deXl6rOEpFLZreClVXWiDU9+w==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@storybook/client-logger': 7.0.24 + memoizerific: 1.11.3 + qs: 6.11.2 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /@storybook/router@7.0.25(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-RcTe407o9m2KvDv+vKG3Qd2zdkswGSa6rIGKvvF/N1wEjB9pyu+HBW5PjAwlXWwPeWpfaz2du/KmXk+dxEFfug==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@storybook/client-logger': 7.0.25 + memoizerific: 1.11.3 + qs: 6.11.2 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /@storybook/svelte-vite@7.0.23(react-dom@18.2.0)(react@18.2.0)(typescript@5.1.3)(vite@4.3.9): + resolution: {integrity: sha512-jM6U6B1CiExLt4a3grrlWW5owTMM+8+gzcZhgXCRs4n2e3FpmVz9RkfVFH6+c37acCzlJ7GlhblNLupbRyoNLA==} + engines: {node: ^14.18 || >=16} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + vite: ^3.0.0 || ^4.0.0 + dependencies: + '@storybook/builder-vite': 7.0.23(typescript@5.1.3)(vite@4.3.9) + '@storybook/node-logger': 7.0.23 + '@storybook/svelte': 7.0.23(svelte@3.59.2) + '@sveltejs/vite-plugin-svelte': 2.4.2(svelte@3.59.2)(vite@4.3.9) + magic-string: 0.27.0 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + svelte: 3.59.2 + sveltedoc-parser: 4.2.1 + ts-dedent: 2.2.0 + vite: 4.3.9(@types/node@20.3.1)(less@4.1.3) + transitivePeerDependencies: + - '@preact/preset-vite' + - encoding + - supports-color + - typescript + - vite-plugin-glimmerx + dev: true + + /@storybook/svelte@7.0.23(svelte@3.59.2): + resolution: {integrity: sha512-sCPbdQD9mn8qlCaoBS045U4VEs3uibQl7Pg8geAB9jxMvd7w7QiCLeiK073WJnPu6AJL6D7plxU8nMuXW1aZ2A==} + engines: {node: '>=16.0.0'} + peerDependencies: + svelte: ^3.1.0 + dependencies: + '@storybook/client-logger': 7.0.23 + '@storybook/core-client': 7.0.23 + '@storybook/core-events': 7.0.23 + '@storybook/docs-tools': 7.0.23 + '@storybook/global': 5.0.0 + '@storybook/preview-api': 7.0.23 + '@storybook/types': 7.0.23 + svelte: 3.59.2 + sveltedoc-parser: 4.2.1 + type-fest: 2.19.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@storybook/svelte@7.0.23(svelte@4.0.0): + resolution: {integrity: sha512-sCPbdQD9mn8qlCaoBS045U4VEs3uibQl7Pg8geAB9jxMvd7w7QiCLeiK073WJnPu6AJL6D7plxU8nMuXW1aZ2A==} + engines: {node: '>=16.0.0'} + peerDependencies: + svelte: ^3.1.0 + dependencies: + '@storybook/client-logger': 7.0.23 + '@storybook/core-client': 7.0.23 + '@storybook/core-events': 7.0.23 + '@storybook/docs-tools': 7.0.23 + '@storybook/global': 5.0.0 + '@storybook/preview-api': 7.0.23 + '@storybook/types': 7.0.23 + svelte: 4.0.0 + sveltedoc-parser: 4.2.1 + type-fest: 2.19.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@storybook/telemetry@7.0.23: + resolution: {integrity: sha512-bV6U58+JXvliq6FHnEOmy902Coa2JVD0M1N6En0us9kNNrtxpn4xSO4dvFW0A+veZimtT6kI55liG89IKeN3Nw==} + dependencies: + '@storybook/client-logger': 7.0.23 + '@storybook/core-common': 7.0.23 + chalk: 4.1.2 + detect-package-manager: 2.0.1 + fetch-retry: 5.0.6 + fs-extra: 11.1.1 + isomorphic-unfetch: 3.1.0 + nanoid: 3.3.6 + read-pkg-up: 7.0.1 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@storybook/testing-library@0.2.0: + resolution: {integrity: sha512-Ff6jNnrsosmDshgCf0Eb5Cz7IA34p/1Ps5N3Kp3598kfXpBSccSkQQvVFUXC3kIHw/isIXWPqntZuKqnWUz7Gw==} + dependencies: + '@testing-library/dom': 9.0.0 + '@testing-library/user-event': 14.0.0(@testing-library/dom@9.0.0) + ts-dedent: 2.2.0 + dev: true + + /@storybook/theming@7.0.23(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-hKmpjFS24YK0vl69KhqNauARTgxQu5mvlifHmu7xO80bigXi6NzA5VyyCMHO1SKVFJwPBVHHfauQCenSRm2PDQ==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@emotion/use-insertion-effect-with-fallbacks': 1.0.1(react@18.2.0) + '@storybook/client-logger': 7.0.23 + '@storybook/global': 5.0.0 + memoizerific: 1.11.3 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /@storybook/theming@7.0.24(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-CMeCCfqffJ/D5rBl1HpAM/e5Vw0h7ucT+CLzP0ALtLrguz9ZzOiIZYgMj17KpfvWqje7HT+DwEtNkSrnJ01FNQ==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@emotion/use-insertion-effect-with-fallbacks': 1.0.1(react@18.2.0) + '@storybook/client-logger': 7.0.24 + '@storybook/global': 5.0.0 + memoizerific: 1.11.3 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /@storybook/theming@7.0.25(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-OaLtP4jjN6NGvdZpfQq3FO2IE/uZDxcXJdEXCf8azzAyhwvFU5kMA8huCE1KvOGJfAR5lPfDMQDKMXTlkV7frg==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + dependencies: + '@emotion/use-insertion-effect-with-fallbacks': 1.0.1(react@18.2.0) + '@storybook/client-logger': 7.0.25 + '@storybook/global': 5.0.0 + memoizerific: 1.11.3 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /@storybook/types@7.0.23: + resolution: {integrity: sha512-ziszL3OfhTT5PHE7kiQjWWx3Lw3qro8eLX+56dXDNgmft5LS66yEANcaA7OzxLnEgdyWSxJqgrVo6r0JwHp2Eg==} + dependencies: + '@storybook/channels': 7.0.23 + '@types/babel__core': 7.20.1 + '@types/express': 4.17.17 + file-system-cache: 2.4.1 + dev: true + + /@storybook/types@7.0.24: + resolution: {integrity: sha512-SZh/XBHP1TT5bmEk0W52nT0v6fUnYwmZVls3da5noutdgOAiwL7TANtl41XrNjG+UDr8x0OE3PVVJi+LhwUaNA==} + dependencies: + '@storybook/channels': 7.0.24 + '@types/babel__core': 7.20.1 + '@types/express': 4.17.17 + file-system-cache: 2.3.0 + dev: true + + /@storybook/types@7.0.25: + resolution: {integrity: sha512-18Mn8IRbgsR+QXRa25wbNRJiKapKvODVx6rbBIH9Kim30gbTCgukYKJQlus27IODMMzMr86LiXKgnGpFv6NQ5w==} + dependencies: + '@storybook/channels': 7.0.25 + '@types/babel__core': 7.20.1 + '@types/express': 4.17.17 + file-system-cache: 2.3.0 + dev: true + + /@sveltejs/adapter-auto@2.0.1(@sveltejs/kit@1.16.3): + resolution: {integrity: sha512-anxxYMcQy7HWSKxN4YNaVcgNzCHtNFwygq72EA1Xv7c+5gSECOJ1ez1PYoLciPiFa7A3XBvMDQXUFJ2eqLDtAA==} + peerDependencies: + '@sveltejs/kit': ^1.0.0 + dependencies: + '@sveltejs/kit': 1.16.3(svelte@3.57.0)(vite@4.3.5) + import-meta-resolve: 3.0.0 + dev: true + + /@sveltejs/adapter-static@2.0.2(@sveltejs/kit@1.16.3): + resolution: {integrity: sha512-9wYtf6s6ew7DHUHMrt55YpD1FgV7oWql2IGsW5BXquLxqcY9vjrqCFo0TzzDpo+ZPZkW/v77k0eOP6tsAb8HmQ==} + peerDependencies: + '@sveltejs/kit': ^1.5.0 + dependencies: + '@sveltejs/kit': 1.16.3(svelte@3.59.2)(vite@4.3.9) + dev: true + + /@sveltejs/adapter-vercel@3.0.3(@sveltejs/kit@1.16.3): + resolution: {integrity: sha512-0FQMjR6klW4627ewdclSr0lUe/DqiiyOaRTfgb5cXgNbVMsZMOA2fQ77TYQnJdvMfSEWe6y8uznV48XqKh9+vA==} + peerDependencies: + '@sveltejs/kit': ^1.5.0 + dependencies: + '@sveltejs/kit': 1.16.3(svelte@3.59.2)(vite@4.3.9) + '@vercel/nft': 0.23.0 + esbuild: 0.18.20 + transitivePeerDependencies: + - encoding + - supports-color + dev: false + + /@sveltejs/kit@1.16.3(svelte@3.57.0)(vite@4.3.5): + resolution: {integrity: sha512-8uv0udYRpVuE1BweFidcWHfL+u2gAANKmvIal1dN/FWPBl7DJYbt9zYEtr3bNTiXystT8Sn0Wp54RfwpbPqHjQ==} + engines: {node: ^16.14 || >=18} + hasBin: true + requiresBuild: true + peerDependencies: + svelte: ^3.54.0 + vite: ^4.0.0 + dependencies: + '@sveltejs/vite-plugin-svelte': 2.4.2(svelte@3.57.0)(vite@4.3.5) + '@types/cookie': 0.5.1 + cookie: 0.5.0 + devalue: 4.3.0 + esm-env: 1.0.0 + kleur: 4.1.5 + magic-string: 0.30.0 + mime: 3.0.0 + sade: 1.8.1 + set-cookie-parser: 2.6.0 + sirv: 2.0.2 + svelte: 3.57.0 + tiny-glob: 0.2.9 + undici: 5.22.0 + vite: 4.3.5(@types/node@20.3.1)(less@4.1.3) + transitivePeerDependencies: + - supports-color + dev: true + + /@sveltejs/kit@1.16.3(svelte@3.59.2)(vite@4.3.9): + resolution: {integrity: sha512-8uv0udYRpVuE1BweFidcWHfL+u2gAANKmvIal1dN/FWPBl7DJYbt9zYEtr3bNTiXystT8Sn0Wp54RfwpbPqHjQ==} + engines: {node: ^16.14 || >=18} + hasBin: true + requiresBuild: true + peerDependencies: + svelte: ^3.54.0 + vite: ^4.0.0 + dependencies: + '@sveltejs/vite-plugin-svelte': 2.4.2(svelte@3.59.2)(vite@4.3.9) + '@types/cookie': 0.5.1 + cookie: 0.5.0 + devalue: 4.3.0 + esm-env: 1.0.0 + kleur: 4.1.5 + magic-string: 0.30.0 + mime: 3.0.0 + sade: 1.8.1 + set-cookie-parser: 2.6.0 + sirv: 2.0.2 + svelte: 3.59.2 + tiny-glob: 0.2.9 + undici: 5.22.0 + vite: 4.3.9(@types/node@20.3.2)(less@4.1.3) + transitivePeerDependencies: + - supports-color + + /@sveltejs/vite-plugin-svelte-inspector@1.0.3(@sveltejs/vite-plugin-svelte@2.4.2)(svelte@3.57.0)(vite@4.3.5): + resolution: {integrity: sha512-Khdl5jmmPN6SUsVuqSXatKpQTMIifoQPDanaxC84m9JxIibWvSABJyHpyys0Z+1yYrxY5TTEQm+6elh0XCMaOA==} + engines: {node: ^14.18.0 || >= 16} + peerDependencies: + '@sveltejs/vite-plugin-svelte': ^2.2.0 + svelte: ^3.54.0 || ^4.0.0 + vite: ^4.0.0 + dependencies: + '@sveltejs/vite-plugin-svelte': 2.4.2(svelte@3.57.0)(vite@4.3.5) + debug: 4.3.4 + svelte: 3.57.0 + vite: 4.3.5(@types/node@20.3.1)(less@4.1.3) + transitivePeerDependencies: + - supports-color + dev: true + + /@sveltejs/vite-plugin-svelte-inspector@1.0.3(@sveltejs/vite-plugin-svelte@2.4.2)(svelte@3.59.2)(vite@4.3.9): + resolution: {integrity: sha512-Khdl5jmmPN6SUsVuqSXatKpQTMIifoQPDanaxC84m9JxIibWvSABJyHpyys0Z+1yYrxY5TTEQm+6elh0XCMaOA==} + engines: {node: ^14.18.0 || >= 16} + peerDependencies: + '@sveltejs/vite-plugin-svelte': ^2.2.0 + svelte: ^3.54.0 || ^4.0.0 + vite: ^4.0.0 + dependencies: + '@sveltejs/vite-plugin-svelte': 2.4.2(svelte@3.59.2)(vite@4.3.9) + debug: 4.3.4 + svelte: 3.59.2 + vite: 4.3.9(@types/node@20.3.1)(less@4.1.3) + transitivePeerDependencies: + - supports-color + + /@sveltejs/vite-plugin-svelte-inspector@1.0.3(@sveltejs/vite-plugin-svelte@2.4.2)(svelte@4.0.0)(vite@4.3.9): + resolution: {integrity: sha512-Khdl5jmmPN6SUsVuqSXatKpQTMIifoQPDanaxC84m9JxIibWvSABJyHpyys0Z+1yYrxY5TTEQm+6elh0XCMaOA==} + engines: {node: ^14.18.0 || >= 16} + peerDependencies: + '@sveltejs/vite-plugin-svelte': ^2.2.0 + svelte: ^3.54.0 || ^4.0.0 + vite: ^4.0.0 + dependencies: + '@sveltejs/vite-plugin-svelte': 2.4.2(svelte@4.0.0)(vite@4.3.9) + debug: 4.3.4 + svelte: 4.0.0 + vite: 4.3.9(@types/node@20.3.1)(less@4.1.3) + transitivePeerDependencies: + - supports-color + + /@sveltejs/vite-plugin-svelte@2.4.2(svelte@3.57.0)(vite@4.3.5): + resolution: {integrity: sha512-ePfcC48ftMKhkT0OFGdOyycYKnnkT6i/buzey+vHRTR/JpQvuPzzhf1PtKqCDQfJRgoPSN2vscXs6gLigx/zGw==} + engines: {node: ^14.18.0 || >= 16} + peerDependencies: + svelte: ^3.54.0 || ^4.0.0 + vite: ^4.0.0 + dependencies: + '@sveltejs/vite-plugin-svelte-inspector': 1.0.3(@sveltejs/vite-plugin-svelte@2.4.2)(svelte@3.57.0)(vite@4.3.5) + debug: 4.3.4 + deepmerge: 4.3.1 + kleur: 4.1.5 + magic-string: 0.30.1 + svelte: 3.57.0 + svelte-hmr: 0.15.2(svelte@3.57.0) + vite: 4.3.5(@types/node@20.3.1)(less@4.1.3) + vitefu: 0.2.4(vite@4.3.5) + transitivePeerDependencies: + - supports-color + dev: true + + /@sveltejs/vite-plugin-svelte@2.4.2(svelte@3.59.2)(vite@4.3.9): + resolution: {integrity: sha512-ePfcC48ftMKhkT0OFGdOyycYKnnkT6i/buzey+vHRTR/JpQvuPzzhf1PtKqCDQfJRgoPSN2vscXs6gLigx/zGw==} + engines: {node: ^14.18.0 || >= 16} + peerDependencies: + svelte: ^3.54.0 || ^4.0.0 + vite: ^4.0.0 + dependencies: + '@sveltejs/vite-plugin-svelte-inspector': 1.0.3(@sveltejs/vite-plugin-svelte@2.4.2)(svelte@3.59.2)(vite@4.3.9) + debug: 4.3.4 + deepmerge: 4.3.1 + kleur: 4.1.5 + magic-string: 0.30.1 + svelte: 3.59.2 + svelte-hmr: 0.15.2(svelte@3.59.2) + vite: 4.3.9(@types/node@20.3.1)(less@4.1.3) + vitefu: 0.2.4(vite@4.3.9) + transitivePeerDependencies: + - supports-color + + /@sveltejs/vite-plugin-svelte@2.4.2(svelte@4.0.0)(vite@4.3.9): + resolution: {integrity: sha512-ePfcC48ftMKhkT0OFGdOyycYKnnkT6i/buzey+vHRTR/JpQvuPzzhf1PtKqCDQfJRgoPSN2vscXs6gLigx/zGw==} + engines: {node: ^14.18.0 || >= 16} + peerDependencies: + svelte: ^3.54.0 || ^4.0.0 + vite: ^4.0.0 + dependencies: + '@sveltejs/vite-plugin-svelte-inspector': 1.0.3(@sveltejs/vite-plugin-svelte@2.4.2)(svelte@4.0.0)(vite@4.3.9) + debug: 4.3.4 + deepmerge: 4.3.1 + kleur: 4.1.5 + magic-string: 0.30.1 + svelte: 4.0.0 + svelte-hmr: 0.15.2(svelte@4.0.0) + vite: 4.3.9(@types/node@20.3.1)(less@4.1.3) + vitefu: 0.2.4(vite@4.3.9) + transitivePeerDependencies: + - supports-color + + /@tailwindcss/forms@0.5.0(tailwindcss@3.1.6): + resolution: {integrity: sha512-KzWugryEBFkmoaYcBE18rs6gthWCFHHO7cAZm2/hv3hwD67AzwP7udSCa22E7R1+CEJL/FfhYsJWrc0b1aeSzw==} + peerDependencies: + tailwindcss: '>=3.0.0 || >= 3.0.0-alpha.1' + dependencies: + mini-svg-data-uri: 1.4.4 + tailwindcss: 3.1.6(postcss@8.4.27) + + /@tailwindcss/typography@0.5.4(tailwindcss@3.1.6): + resolution: {integrity: sha512-QEdg40EmGvE7kKoDei8zr5sf4D1pIayHj4R31bH3lX8x2BtTiR+jNejYPOkhbmy3DXgkMF9jC8xqNiGFAuL9Sg==} + peerDependencies: + tailwindcss: '>=3.0.0 || insiders' + dependencies: + lodash.castarray: 4.4.0 + lodash.isplainobject: 4.0.6 + lodash.merge: 4.6.2 + tailwindcss: 3.1.6(postcss@8.4.21) + dev: true + + /@testing-library/dom@9.0.0: + resolution: {integrity: sha512-+/TLgKNFsYUshOY/zXsQOk+PlFQK+eyJ9T13IDVNJEi+M+Un7xlJK+FZKkbGSnf0+7E1G6PlDhkSYQ/GFiruBQ==} + engines: {node: '>=14'} + dependencies: + '@babel/code-frame': 7.16.7 + '@babel/runtime': 7.21.0 + '@types/aria-query': 5.0.1 + aria-query: 5.0.0 + chalk: 4.1.2 + dom-accessibility-api: 0.5.13 + lz-string: 1.4.4 + pretty-format: 27.5.1 + + /@testing-library/jest-dom@6.0.0(vitest@0.34.0): + resolution: {integrity: sha512-Ye2R3+/oM27jir8CzYPmuWdavTaKwNZcu0d22L9pO/vnOYE0wmrtpw79TQJa8H6gV8/i7yd+pLaqeLlA0rTMfg==} + engines: {node: '>=14', npm: '>=6', yarn: '>=1'} + peerDependencies: + '@jest/globals': '>= 28' + '@types/jest': '>= 28' + jest: '>= 28' + vitest: '>= 0.32' + peerDependenciesMeta: + '@jest/globals': + optional: true + '@types/jest': + optional: true + jest: + optional: true + vitest: + optional: true + dependencies: + '@adobe/css-tools': 4.2.0 + '@babel/runtime': 7.22.6 + aria-query: 5.3.0 + chalk: 3.0.0 + css.escape: 1.5.1 + dom-accessibility-api: 0.5.13 + lodash: 4.17.21 + redent: 3.0.0 + vitest: 0.34.0(jsdom@22.1.0)(less@4.1.3) + dev: false + + /@testing-library/user-event@14.0.0(@testing-library/dom@9.0.0): + resolution: {integrity: sha512-hZhjNrle/DMi1ziHwHy8LS0fYJtf+cID7fuG5+4h+Bk83xQaRDQT/DlqfL4hJYw3mxW6KTIxoODrhGnhqJebdQ==} + engines: {node: '>=12', npm: '>=6'} + peerDependencies: + '@testing-library/dom': '>=7.21.4' + dependencies: + '@testing-library/dom': 9.0.0 + + /@tootallnate/once@2.0.0: + resolution: {integrity: sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==} + engines: {node: '>= 10'} + dev: false + + /@types/aria-query@5.0.1: + resolution: {integrity: sha512-XTIieEY+gvJ39ChLcB4If5zHtPxt3Syj5rgZR+e1ctpmK8NjPf0zFqsz4JpLJT0xla9GFDKjy8Cpu331nrmE1Q==} + + /@types/babel-types@7.0.11: + resolution: {integrity: sha512-pkPtJUUY+Vwv6B1inAz55rQvivClHJxc9aVEPPmaq2cbyeMLCiDpbKpcKyX4LAwpNGi+SHBv0tHv6+0gXv0P2A==} + dev: false + + /@types/babel__core@7.20.1: + resolution: {integrity: sha512-aACu/U/omhdk15O4Nfb+fHgH/z3QsfQzpnvRZhYhThms83ZnAOZz7zZAWO7mn2yyNQaA4xTO8GLK3uqFU4bYYw==} + dependencies: + '@babel/parser': 7.22.5 + '@babel/types': 7.22.5 + '@types/babel__generator': 7.6.4 + '@types/babel__template': 7.4.1 + '@types/babel__traverse': 7.20.1 + dev: true + + /@types/babel__generator@7.6.4: + resolution: {integrity: sha512-tFkciB9j2K755yrTALxD44McOrk+gfpIpvC3sxHjRawj6PfnQxrse4Clq5y/Rq+G3mrBurMax/lG8Qn2t9mSsg==} + dependencies: + '@babel/types': 7.22.5 + dev: true + + /@types/babel__template@7.4.1: + resolution: {integrity: sha512-azBFKemX6kMg5Io+/rdGT0dkGreboUVR0Cdm3fz9QJWpaQGJRQXl7C+6hOTCZcMll7KFyEQpgbYI2lHdsS4U7g==} + dependencies: + '@babel/parser': 7.22.5 + '@babel/types': 7.22.5 + dev: true + + /@types/babel__traverse@7.20.1: + resolution: {integrity: sha512-MitHFXnhtgwsGZWtT68URpOvLN4EREih1u3QtQiN4VdAxWKRVvGCSvw/Qth0M0Qq3pJpnGOu5JaM/ydK7OGbqg==} + dependencies: + '@babel/types': 7.22.5 + dev: true + + /@types/babylon@6.16.6: + resolution: {integrity: sha512-G4yqdVlhr6YhzLXFKy5F7HtRBU8Y23+iWy7UKthMq/OSQnL1hbsoeXESQ2LY8zEDlknipDG3nRGhUC9tkwvy/w==} + dependencies: + '@types/babel-types': 7.0.11 + dev: false + + /@types/body-parser@1.19.2: + resolution: {integrity: sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==} + dependencies: + '@types/connect': 3.4.35 + '@types/node': 20.3.2 + dev: true + + /@types/chai-subset@1.3.3: + resolution: {integrity: sha512-frBecisrNGz+F4T6bcc+NLeolfiojh5FxW2klu669+8BARtyQv2C/GkNW6FUodVe4BroGMP/wER/YDGc7rEllw==} + dependencies: + '@types/chai': 4.3.5 + dev: false + + /@types/chai@4.3.5: + resolution: {integrity: sha512-mEo1sAde+UCE6b2hxn332f1g1E8WfYRu6p5SvTKr2ZKC1f7gFJXk4h5PyGP9Dt6gCaG8y8XhwnXWC6Iy2cmBng==} + dev: false + + /@types/clone@2.1.1: + resolution: {integrity: sha512-BZIU34bSYye0j/BFcPraiDZ5ka6MJADjcDVELGf7glr9K+iE8NYVjFslJFVWzskSxkLLyCrSPScE82/UUoBSvg==} + dev: false + + /@types/connect@3.4.35: + resolution: {integrity: sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==} + dependencies: + '@types/node': 20.3.2 + dev: true + + /@types/cookie@0.4.1: + resolution: {integrity: sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q==} + dev: false + + /@types/cookie@0.5.1: + resolution: {integrity: sha512-COUnqfB2+ckwXXSFInsFdOAWQzCCx+a5hq2ruyj+Vjund94RJQd4LG2u9hnvJrTgunKAaax7ancBYlDrNYxA0g==} + + /@types/d3-dsv@3.0.0: + resolution: {integrity: sha512-o0/7RlMl9p5n6FQDptuJVMxDf/7EDEv2SYEO/CwdG2tr1hTfUVi0Iavkk2ax+VpaQ/1jVhpnj5rq1nj8vwhn2A==} + + /@types/d3-path@3.0.0: + resolution: {integrity: sha512-0g/A+mZXgFkQxN3HniRDbXMN79K3CdTpLsevj+PXiTcb2hVyvkZUBg37StmgCQkaD84cUJ4uaDAWq7UJOQy2Tg==} + dev: true + + /@types/d3-scale@4.0.3: + resolution: {integrity: sha512-PATBiMCpvHJSMtZAMEhc2WyL+hnzarKzI6wAHYjhsonjWJYGq5BXTzQjv4l8m2jO183/4wZ90rKvSeT7o72xNQ==} + dependencies: + '@types/d3-time': 3.0.0 + dev: true + + /@types/d3-shape@3.1.1: + resolution: {integrity: sha512-6Uh86YFF7LGg4PQkuO2oG6EMBRLuW9cbavUW46zkIO5kuS2PfTqo2o9SkgtQzguBHbLgNnU90UNsITpsX1My+A==} + dependencies: + '@types/d3-path': 3.0.0 + dev: true + + /@types/d3-time@3.0.0: + resolution: {integrity: sha512-sZLCdHvBUcNby1cB6Fd3ZBrABbjz3v1Vm90nysCQ6Vt7vd6e/h9Lt7SiJUoEX0l4Dzc7P5llKyhqSi1ycSf1Hg==} + dev: true + + /@types/debug@4.1.7: + resolution: {integrity: sha512-9AonUzyTjXXhEOa0DnqpzZi6VHlqKMswga9EXjpXnnqxwLtdvPPtlO8evrI5D9S6asFRCQ6v+wpiUKbw+vKqyg==} + dependencies: + '@types/ms': 0.7.31 + dev: false + + /@types/detect-port@1.3.3: + resolution: {integrity: sha512-bV/jQlAJ/nPY3XqSatkGpu+nGzou+uSwrH1cROhn+jBFg47yaNH+blW4C7p9KhopC7QxCv/6M86s37k8dMk0Yg==} + dev: true + + /@types/doctrine@0.0.3: + resolution: {integrity: sha512-w5jZ0ee+HaPOaX25X2/2oGR/7rgAQSYII7X7pp0m9KgBfMP7uKfMfTvcpl5Dj+eDBbpxKGiqE+flqDr6XTd2RA==} + dev: true + + /@types/dompurify@3.0.2: + resolution: {integrity: sha512-YBL4ziFebbbfQfH5mlC+QTJsvh0oJUrWbmxKMyEdL7emlHJqGR2Qb34TEFKj+VCayBvjKy3xczMFNhugThUsfQ==} + dependencies: + '@types/trusted-types': 2.0.3 + dev: false + + /@types/ejs@3.1.2: + resolution: {integrity: sha512-ZmiaE3wglXVWBM9fyVC17aGPkLo/UgaOjEiI2FXQfyczrCefORPxIe+2dVmnmk3zkVIbizjrlQzmPGhSYGXG5g==} + dev: true + + /@types/eslint-scope@3.7.4: + resolution: {integrity: sha512-9K4zoImiZc3HlIp6AVUDE4CWYx22a+lhSZMYNpbjW04+YF0KWj4pJXnEMjdnFTiQibFFmElcsasJXDbdI/EPhA==} + dependencies: + '@types/eslint': 8.40.2 + '@types/estree': 1.0.0 + dev: true + + /@types/eslint@8.40.2: + resolution: {integrity: sha512-PRVjQ4Eh9z9pmmtaq8nTjZjQwKFk7YIHIud3lRoKRBgUQjgjRmoGxxGEPXQkF+lH7QkHJRNr5F4aBgYCW0lqpQ==} + dependencies: + '@types/estree': 1.0.0 + '@types/json-schema': 7.0.12 + dev: true + + /@types/estree@0.0.50: + resolution: {integrity: sha512-C6N5s2ZFtuZRj54k2/zyRhNDjJwwcViAM3Nbm8zjBpbqAdZ00mr0CFxvSKeO8Y/e03WVFLpQMdHYVfUd6SB+Hw==} + dev: false + + /@types/estree@1.0.0: + resolution: {integrity: sha512-WulqXMDUTYAXCjZnk6JtIHPigp55cVtDgDrO2gHRwhyJto21+1zbVCtOYB2L1F9w4qCQ0rOGWBnBe0FNTiEJIQ==} + + /@types/express-serve-static-core@4.17.35: + resolution: {integrity: sha512-wALWQwrgiB2AWTT91CB62b6Yt0sNHpznUXeZEcnPU3DRdlDIz74x8Qg1UUYKSVFi+va5vKOLYRBI1bRKiLLKIg==} + dependencies: + '@types/node': 20.3.2 + '@types/qs': 6.9.7 + '@types/range-parser': 1.2.4 + '@types/send': 0.17.1 + dev: true + + /@types/express@4.17.17: + resolution: {integrity: sha512-Q4FmmuLGBG58btUnfS1c1r/NQdlp3DMfGDGig8WhfpA2YRUtEkxAjkZb0yvplJGYdF1fsQ81iMDcH24sSCNC/Q==} + dependencies: + '@types/body-parser': 1.19.2 + '@types/express-serve-static-core': 4.17.35 + '@types/qs': 6.9.7 + '@types/serve-static': 1.15.2 + dev: true + + /@types/find-cache-dir@3.2.1: + resolution: {integrity: sha512-frsJrz2t/CeGifcu/6uRo4b+SzAwT4NYCVPu1GN8IB9XTzrpPkGuV0tmh9mN+/L0PklAlsC3u5Fxt0ju00LXIw==} + dev: true + + /@types/glob@8.1.0: + resolution: {integrity: sha512-IO+MJPVhoqz+28h1qLAcBEH2+xHMK6MTyHJc7MTnnYb6wsoLR29POVGJ7LycmVXIqyy/4/2ShP5sUwTXuOwb/w==} + dependencies: + '@types/minimatch': 5.1.2 + '@types/node': 20.3.2 + dev: true + + /@types/graceful-fs@4.1.6: + resolution: {integrity: sha512-Sig0SNORX9fdW+bQuTEovKj3uHcUL6LQKbCrrqb1X7J6/ReAbhCXRAhc+SMejhLELFj2QcyuxmUooZ4bt5ReSw==} + dependencies: + '@types/node': 20.3.2 + dev: true + + /@types/hast@3.0.0: + resolution: {integrity: sha512-SoytUJRuf68HXYqcXicQIhCrLQjqeYU2anikr4G3p3Iz+OZO5QDQpDj++gv+RenHsnUBwNZ2dumBArF8VLSk2Q==} + dependencies: + '@types/unist': 2.0.6 + dev: false + + /@types/http-errors@2.0.1: + resolution: {integrity: sha512-/K3ds8TRAfBvi5vfjuz8y6+GiAYBZ0x4tXv1Av6CWBWn0IlADc+ZX9pMq7oU0fNQPnBwIZl3rmeLp6SBApbxSQ==} + dev: true + + /@types/is-ci@3.0.0: + resolution: {integrity: sha512-Q0Op0hdWbYd1iahB+IFNQcWXFq4O0Q5MwQP7uN0souuQ4rPg1vEYcnIOfr1gY+M+6rc8FGoRaBO1mOOvL29sEQ==} + dependencies: + ci-info: 3.8.0 + dev: false + + /@types/istanbul-lib-coverage@2.0.4: + resolution: {integrity: sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==} + + /@types/istanbul-lib-report@3.0.0: + resolution: {integrity: sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==} + dependencies: + '@types/istanbul-lib-coverage': 2.0.4 + + /@types/istanbul-reports@3.0.1: + resolution: {integrity: sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==} + dependencies: + '@types/istanbul-lib-report': 3.0.0 + + /@types/jest@29.5.0: + resolution: {integrity: sha512-3Emr5VOl/aoBwnWcH/EFQvlSAmjV+XtV9GGu5mwdYew5vhQh0IUZx/60x0TzHDu09Bi7HMx10t/namdJw5QIcg==} + dependencies: + expect: 29.5.0 + pretty-format: 29.5.0 + dev: false + + /@types/js-levenshtein@1.1.1: + resolution: {integrity: sha512-qC4bCqYGy1y/NP7dDVr7KJarn+PbX1nSpwA7JXdu0HxT3QYjO8MJ+cntENtHFVy2dRAyBV23OZ6MxsW1AM1L8g==} + dev: false + + /@types/json-schema@7.0.12: + resolution: {integrity: sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==} + + /@types/katex@0.16.0: + resolution: {integrity: sha512-hz+S3nV6Mym5xPbT9fnO8dDhBFQguMYpY0Ipxv06JMi1ORgnEM4M1ymWDUhUNer3ElLmT583opRo4RzxKmh9jw==} + dev: false + + /@types/lodash@4.14.195: + resolution: {integrity: sha512-Hwx9EUgdwf2GLarOjQp5ZH8ZmblzcbTBC2wtQWNKARBSxM9ezRIAUpeDTgoQRAFB0+8CNWXVA9+MaSOzOF3nPg==} + dev: true + + /@types/mdx@2.0.5: + resolution: {integrity: sha512-76CqzuD6Q7LC+AtbPqrvD9AqsN0k8bsYo2bM2J8pmNldP1aIPAbzUQ7QbobyXL4eLr1wK5x8FZFe8eF/ubRuBg==} + dev: true + + /@types/mime-types@2.1.1: + resolution: {integrity: sha512-vXOTGVSLR2jMw440moWTC7H19iUyLtP3Z1YTj7cSsubOICinjMxFeb/V57v9QdyyPGbbWolUFSSmSiRSn94tFw==} + dev: true + + /@types/mime@1.3.2: + resolution: {integrity: sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==} + dev: true + + /@types/mime@3.0.1: + resolution: {integrity: sha512-Y4XFY5VJAuw0FgAqPNd6NNoV44jbq9Bz2L7Rh/J6jLTiHBSBJa9fxqQIvkIld4GsoDOcCbvzOUAbLPsSKKg+uA==} + dev: true + + /@types/minimatch@5.1.2: + resolution: {integrity: sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA==} + dev: true + + /@types/minimist@1.2.2: + resolution: {integrity: sha512-jhuKLIRrhvCPLqwPcx6INqmKeiA5EWrsCOPhrlFSrbrmU4ZMPjj5Ul/oLCMDO98XRUIwVm78xICz4EPCektzeQ==} + dev: false + + /@types/ms@0.7.31: + resolution: {integrity: sha512-iiUgKzV9AuaEkZqkOLDIvlQiL6ltuZd9tGcW3gwpnX8JbuiuhFlEGmmFXEXkN50Cvq7Os88IY2v0dkDqXYWVgA==} + dev: false + + /@types/node-fetch@2.6.4: + resolution: {integrity: sha512-1ZX9fcN4Rvkvgv4E6PAY5WXUFWFcRWxZa3EW83UjycOB9ljJCedb2CupIP4RZMEwF/M3eTcCihbBRgwtGbg5Rg==} + dependencies: + '@types/node': 20.3.2 + form-data: 3.0.1 + dev: true + + /@types/node@12.20.55: + resolution: {integrity: sha512-J8xLz7q2OFulZ2cyGTLE1TbbZcjpno7FaN6zdJNrgAdrJ+DZzh/uFR6YrTb4C+nXakvud8Q4+rbhoIWlYQbUFQ==} + dev: false + + /@types/node@16.18.38: + resolution: {integrity: sha512-6sfo1qTulpVbkxECP+AVrHV9OoJqhzCsfTNp5NIG+enM4HyM3HvZCO798WShIXBN0+QtDIcutJCjsVYnQP5rIQ==} + dev: true + + /@types/node@20.3.1: + resolution: {integrity: sha512-EhcH/wvidPy1WeML3TtYFGR83UzjxeWRen9V402T8aUGYsCHOmfoisV3ZSg03gAFIbLq8TnWOJ0f4cALtnSEUg==} + + /@types/node@20.3.2: + resolution: {integrity: sha512-vOBLVQeCQfIcF/2Y7eKFTqrMnizK5lRNQ7ykML/5RuwVXVWxYkgwS7xbt4B6fKCUPgbSL5FSsjHQpaGQP/dQmw==} + + /@types/normalize-package-data@2.4.1: + resolution: {integrity: sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==} + + /@types/npmlog@4.1.4: + resolution: {integrity: sha512-WKG4gTr8przEZBiJ5r3s8ZIAoMXNbOgQ+j/d5O4X3x6kZJRLNvyUJuUK/KoG3+8BaOHPhp2m7WC6JKKeovDSzQ==} + dev: true + + /@types/path-browserify@1.0.0: + resolution: {integrity: sha512-XMCcyhSvxcch8b7rZAtFAaierBYdeHXVvg2iYnxOV0MCQHmPuRRmGZPFDRzPayxcGiiSL1Te9UIO+f3cuj0tfw==} + dev: false + + /@types/pretty-hrtime@1.0.1: + resolution: {integrity: sha512-VjID5MJb1eGKthz2qUerWT8+R4b9N+CHvGCzg9fn4kWZgaF9AhdYikQio3R7wV8YY1NsQKPaCwKz1Yff+aHNUQ==} + dev: true + + /@types/prismjs@1.26.0: + resolution: {integrity: sha512-ZTaqn/qSqUuAq1YwvOFQfVW1AR/oQJlLSZVustdjwI+GZ8kr0MSHBj0tsXPW1EqHubx50gtBEjbPGsdZwQwCjQ==} + dev: true + + /@types/prismjs@1.26.1: + resolution: {integrity: sha512-Q7jDsRbzcNHIQje15CS/piKhu6lMLb9jwjxSfEIi4KcFKXW23GoJMkwQiJ8VObyfx+VmUaDcJxXaWN+cTCjVog==} + dev: false + + /@types/prop-types@15.7.5: + resolution: {integrity: sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==} + dev: true + + /@types/pug@2.0.6: + resolution: {integrity: sha512-SnHmG9wN1UVmagJOnyo/qkk0Z7gejYxOYYmaAwr5u2yFYfsupN3sg10kyzN8Hep/2zbHxCnsumxOoRIRMBwKCg==} + + /@types/qs@6.9.7: + resolution: {integrity: sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==} + dev: true + + /@types/range-parser@1.2.4: + resolution: {integrity: sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==} + dev: true + + /@types/react@18.2.14: + resolution: {integrity: sha512-A0zjq+QN/O0Kpe30hA1GidzyFjatVvrpIvWLxD+xv67Vt91TWWgco9IvrJBkeyHm1trGaFS/FSGqPlhyeZRm0g==} + dependencies: + '@types/prop-types': 15.7.5 + '@types/scheduler': 0.16.3 + csstype: 3.1.2 + dev: true + + /@types/scheduler@0.16.3: + resolution: {integrity: sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==} + dev: true + + /@types/semver@6.2.3: + resolution: {integrity: sha512-KQf+QAMWKMrtBMsB8/24w53tEsxllMj6TuA80TT/5igJalLI/zm0L3oXRbIAl4Ohfc85gyHX/jhMwsVkmhLU4A==} + dev: false + + /@types/semver@7.5.0: + resolution: {integrity: sha512-G8hZ6XJiHnuhQKR7ZmysCeJWE08o8T0AXtk5darsCaTVsYZhhgUrq53jizaR2FvsoeCwJhlmwTjkXBY5Pn/ZHw==} + + /@types/send@0.17.1: + resolution: {integrity: sha512-Cwo8LE/0rnvX7kIIa3QHCkcuF21c05Ayb0ZfxPiv0W8VRiZiNW/WuRupHKpqqGVGf7SUA44QSOUKaEd9lIrd/Q==} + dependencies: + '@types/mime': 1.3.2 + '@types/node': 20.3.2 + dev: true + + /@types/serve-static@1.15.2: + resolution: {integrity: sha512-J2LqtvFYCzaj8pVYKw8klQXrLLk7TBZmQ4ShlcdkELFKGwGMfevMLneMMRkMgZxotOD9wg497LpC7O8PcvAmfw==} + dependencies: + '@types/http-errors': 2.0.1 + '@types/mime': 3.0.1 + '@types/node': 20.3.2 + dev: true + + /@types/set-cookie-parser@2.4.2: + resolution: {integrity: sha512-fBZgytwhYAUkj/jC/FAV4RQ5EerRup1YQsXQCh8rZfiHkc4UahC192oH0smGwsXol3cL3A5oETuAHeQHmhXM4w==} + dependencies: + '@types/node': 20.3.2 + dev: false + + /@types/stack-utils@2.0.1: + resolution: {integrity: sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==} + dev: false + + /@types/testing-library__jest-dom@5.14.6: + resolution: {integrity: sha512-FkHXCb+ikSoUP4Y4rOslzTdX5sqYwMxfefKh1GmZ8ce1GOkEHntSp6b5cGadmNfp5e4BMEWOMx+WSKd5/MqlDA==} + dependencies: + '@types/jest': 29.5.0 + dev: false + + /@types/trusted-types@2.0.3: + resolution: {integrity: sha512-NfQ4gyz38SL8sDNrSixxU2Os1a5xcdFxipAFxYEuLUlvU2uDwS4NUpsImcf1//SlWItCVMMLiylsxbmNMToV/g==} + dev: false + + /@types/unist@2.0.6: + resolution: {integrity: sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ==} + + /@types/ws@8.5.4: + resolution: {integrity: sha512-zdQDHKUgcX/zBc4GrwsE/7dVdAD8JR4EuiAXiiUhhfyIJXXb2+PrGshFyeXWQPMmmZ2XxgaqclgpIC7eTXc1mg==} + dependencies: + '@types/node': 20.3.1 + dev: true + + /@types/yargs-parser@21.0.0: + resolution: {integrity: sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==} + + /@types/yargs@16.0.5: + resolution: {integrity: sha512-AxO/ADJOBFJScHbWhq2xAhlWP24rY4aCEG/NFaMvbT3X2MgRsLjhjQwsn0Zi5zn0LG9jUhCCZMeX9Dkuw6k+vQ==} + dependencies: + '@types/yargs-parser': 21.0.0 + dev: true + + /@types/yargs@17.0.24: + resolution: {integrity: sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==} + dependencies: + '@types/yargs-parser': 21.0.0 + + /@typescript-eslint/eslint-plugin@6.2.1(@typescript-eslint/parser@6.2.1)(eslint@8.46.0)(typescript@5.1.3): + resolution: {integrity: sha512-iZVM/ALid9kO0+I81pnp1xmYiFyqibAHzrqX4q5YvvVEyJqY+e6rfTXSCsc2jUxGNqJqTfFSSij/NFkZBiBzLw==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + '@typescript-eslint/parser': ^6.0.0 || ^6.0.0-alpha + eslint: ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@eslint-community/regexpp': 4.5.1 + '@typescript-eslint/parser': 6.2.1(eslint@8.46.0)(typescript@5.1.3) + '@typescript-eslint/scope-manager': 6.2.1 + '@typescript-eslint/type-utils': 6.2.1(eslint@8.46.0)(typescript@5.1.3) + '@typescript-eslint/utils': 6.2.1(eslint@8.46.0)(typescript@5.1.3) + '@typescript-eslint/visitor-keys': 6.2.1 + debug: 4.3.4 + eslint: 8.46.0 + graphemer: 1.4.0 + ignore: 5.2.4 + natural-compare: 1.4.0 + natural-compare-lite: 1.4.0 + semver: 7.5.4 + ts-api-utils: 1.0.1(typescript@5.1.3) + typescript: 5.1.3 + transitivePeerDependencies: + - supports-color + dev: false + + /@typescript-eslint/parser@6.2.1(eslint@8.46.0)(typescript@5.1.3): + resolution: {integrity: sha512-Ld+uL1kYFU8e6btqBFpsHkwQ35rw30IWpdQxgOqOh4NfxSDH6uCkah1ks8R/RgQqI5hHPXMaLy9fbFseIe+dIg==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@typescript-eslint/scope-manager': 6.2.1 + '@typescript-eslint/types': 6.2.1 + '@typescript-eslint/typescript-estree': 6.2.1(typescript@5.1.3) + '@typescript-eslint/visitor-keys': 6.2.1 + debug: 4.3.4 + eslint: 8.46.0 + typescript: 5.1.3 + transitivePeerDependencies: + - supports-color + dev: false + + /@typescript-eslint/scope-manager@6.2.1: + resolution: {integrity: sha512-UCqBF9WFqv64xNsIEPfBtenbfodPXsJ3nPAr55mGPkQIkiQvgoWNo+astj9ZUfJfVKiYgAZDMnM6dIpsxUMp3Q==} + engines: {node: ^16.0.0 || >=18.0.0} + dependencies: + '@typescript-eslint/types': 6.2.1 + '@typescript-eslint/visitor-keys': 6.2.1 + dev: false + + /@typescript-eslint/type-utils@6.2.1(eslint@8.46.0)(typescript@5.1.3): + resolution: {integrity: sha512-fTfCgomBMIgu2Dh2Or3gMYgoNAnQm3RLtRp+jP7A8fY+LJ2+9PNpi5p6QB5C4RSP+U3cjI0vDlI3mspAkpPVbQ==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@typescript-eslint/typescript-estree': 6.2.1(typescript@5.1.3) + '@typescript-eslint/utils': 6.2.1(eslint@8.46.0)(typescript@5.1.3) + debug: 4.3.4 + eslint: 8.46.0 + ts-api-utils: 1.0.1(typescript@5.1.3) + typescript: 5.1.3 + transitivePeerDependencies: + - supports-color + dev: false + + /@typescript-eslint/types@6.2.1: + resolution: {integrity: sha512-528bGcoelrpw+sETlyM91k51Arl2ajbNT9L4JwoXE2dvRe1yd8Q64E4OL7vHYw31mlnVsf+BeeLyAZUEQtqahQ==} + engines: {node: ^16.0.0 || >=18.0.0} + dev: false + + /@typescript-eslint/typescript-estree@6.2.1(typescript@5.1.3): + resolution: {integrity: sha512-G+UJeQx9AKBHRQBpmvr8T/3K5bJa485eu+4tQBxFq0KoT22+jJyzo1B50JDT9QdC1DEmWQfdKsa8ybiNWYsi0Q==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@typescript-eslint/types': 6.2.1 + '@typescript-eslint/visitor-keys': 6.2.1 + debug: 4.3.4 + globby: 11.1.0 + is-glob: 4.0.3 + semver: 7.5.4 + ts-api-utils: 1.0.1(typescript@5.1.3) + typescript: 5.1.3 + transitivePeerDependencies: + - supports-color + dev: false + + /@typescript-eslint/utils@6.2.1(eslint@8.46.0)(typescript@5.1.3): + resolution: {integrity: sha512-eBIXQeupYmxVB6S7x+B9SdBeB6qIdXKjgQBge2J+Ouv8h9Cxm5dHf/gfAZA6dkMaag+03HdbVInuXMmqFB/lKQ==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0 + dependencies: + '@eslint-community/eslint-utils': 4.4.0(eslint@8.46.0) + '@types/json-schema': 7.0.12 + '@types/semver': 7.5.0 + '@typescript-eslint/scope-manager': 6.2.1 + '@typescript-eslint/types': 6.2.1 + '@typescript-eslint/typescript-estree': 6.2.1(typescript@5.1.3) + eslint: 8.46.0 + semver: 7.5.4 + transitivePeerDependencies: + - supports-color + - typescript + dev: false + + /@typescript-eslint/visitor-keys@6.2.1: + resolution: {integrity: sha512-iTN6w3k2JEZ7cyVdZJTVJx2Lv7t6zFA8DCrJEHD2mwfc16AEvvBWVhbFh34XyG2NORCd0viIgQY1+u7kPI0WpA==} + engines: {node: ^16.0.0 || >=18.0.0} + dependencies: + '@typescript-eslint/types': 6.2.1 + eslint-visitor-keys: 3.4.1 + dev: false + + /@vercel/nft@0.23.0: + resolution: {integrity: sha512-1iuPjyltiPqyZrvc/bW1CyICRdng8bVhpJT8MsIXV7Wj+mRFyJs9krsHbVy2pZwu7BMAgforQsT5TCY1JoBDxw==} + engines: {node: '>=14'} + hasBin: true + dependencies: + '@mapbox/node-pre-gyp': 1.0.11 + '@rollup/pluginutils': 4.2.1 + acorn: 8.10.0 + async-sema: 3.1.1 + bindings: 1.5.0 + estree-walker: 2.0.2 + glob: 7.2.0 + graceful-fs: 4.2.9 + micromatch: 4.0.4 + node-gyp-build: 4.6.0 + resolve-from: 5.0.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: false + + /@vitest/expect@0.34.0: + resolution: {integrity: sha512-d1ZU0XomWFAFyYIc6uNuY0N8NJIWESyO/6ZmwLvlHZw0GevH4AEEpq178KjXIvSCrbHN0GnzYzitd0yjfy7+Ow==} + dependencies: + '@vitest/spy': 0.34.0 + '@vitest/utils': 0.34.0 + chai: 4.3.7 + dev: false + + /@vitest/runner@0.34.0: + resolution: {integrity: sha512-xaqM+oArJothtYXzy/dwu/iHe93Khq5QkvnYbzTxiLA0enD2peft1cask3yE6cJpwMkr7C2D1uMJwnTt4mquDw==} + dependencies: + '@vitest/utils': 0.34.0 + p-limit: 4.0.0 + pathe: 1.1.1 + dev: false + + /@vitest/snapshot@0.34.0: + resolution: {integrity: sha512-eGN5XBZHYOghxCOQbf8dcn6/3g7IW77GOOOC/mNFYwRXsPeoQgcgWnhj+6wgJ04pVv25wpxWL9jUkzaQ7LoFtg==} + dependencies: + magic-string: 0.30.1 + pathe: 1.1.1 + pretty-format: 29.5.0 + dev: false + + /@vitest/spy@0.34.0: + resolution: {integrity: sha512-0SZaWrQvL9ZiF/uJvyWSvsKjfuMvD1M6dE5BbE4Dmt8Vh3k4htwCV8g3ce8YOYmJSxkbh6TNOpippD6NVsxW6w==} + dependencies: + tinyspy: 2.1.1 + dev: false + + /@vitest/utils@0.34.0: + resolution: {integrity: sha512-IktrDLhBKf3dEUUxH+lcHiPnaw952+GdGvoxg99liMscgP6IePf6LuMY7B9dEIHkFunB1R8VMR/wmI/4UGg1aw==} + dependencies: + diff-sequences: 29.4.3 + loupe: 2.3.6 + pretty-format: 29.5.0 + dev: false + + /@webassemblyjs/ast@1.11.6: + resolution: {integrity: sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==} + dependencies: + '@webassemblyjs/helper-numbers': 1.11.6 + '@webassemblyjs/helper-wasm-bytecode': 1.11.6 + dev: true + + /@webassemblyjs/floating-point-hex-parser@1.11.6: + resolution: {integrity: sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==} + dev: true + + /@webassemblyjs/helper-api-error@1.11.6: + resolution: {integrity: sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==} + dev: true + + /@webassemblyjs/helper-buffer@1.11.6: + resolution: {integrity: sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA==} + dev: true + + /@webassemblyjs/helper-numbers@1.11.6: + resolution: {integrity: sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==} + dependencies: + '@webassemblyjs/floating-point-hex-parser': 1.11.6 + '@webassemblyjs/helper-api-error': 1.11.6 + '@xtuc/long': 4.2.2 + dev: true + + /@webassemblyjs/helper-wasm-bytecode@1.11.6: + resolution: {integrity: sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==} + dev: true + + /@webassemblyjs/helper-wasm-section@1.11.6: + resolution: {integrity: sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==} + dependencies: + '@webassemblyjs/ast': 1.11.6 + '@webassemblyjs/helper-buffer': 1.11.6 + '@webassemblyjs/helper-wasm-bytecode': 1.11.6 + '@webassemblyjs/wasm-gen': 1.11.6 + dev: true + + /@webassemblyjs/ieee754@1.11.6: + resolution: {integrity: sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==} + dependencies: + '@xtuc/ieee754': 1.2.0 + dev: true + + /@webassemblyjs/leb128@1.11.6: + resolution: {integrity: sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==} + dependencies: + '@xtuc/long': 4.2.2 + dev: true + + /@webassemblyjs/utf8@1.11.6: + resolution: {integrity: sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==} + dev: true + + /@webassemblyjs/wasm-edit@1.11.6: + resolution: {integrity: sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==} + dependencies: + '@webassemblyjs/ast': 1.11.6 + '@webassemblyjs/helper-buffer': 1.11.6 + '@webassemblyjs/helper-wasm-bytecode': 1.11.6 + '@webassemblyjs/helper-wasm-section': 1.11.6 + '@webassemblyjs/wasm-gen': 1.11.6 + '@webassemblyjs/wasm-opt': 1.11.6 + '@webassemblyjs/wasm-parser': 1.11.6 + '@webassemblyjs/wast-printer': 1.11.6 + dev: true + + /@webassemblyjs/wasm-gen@1.11.6: + resolution: {integrity: sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==} + dependencies: + '@webassemblyjs/ast': 1.11.6 + '@webassemblyjs/helper-wasm-bytecode': 1.11.6 + '@webassemblyjs/ieee754': 1.11.6 + '@webassemblyjs/leb128': 1.11.6 + '@webassemblyjs/utf8': 1.11.6 + dev: true + + /@webassemblyjs/wasm-opt@1.11.6: + resolution: {integrity: sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==} + dependencies: + '@webassemblyjs/ast': 1.11.6 + '@webassemblyjs/helper-buffer': 1.11.6 + '@webassemblyjs/wasm-gen': 1.11.6 + '@webassemblyjs/wasm-parser': 1.11.6 + dev: true + + /@webassemblyjs/wasm-parser@1.11.6: + resolution: {integrity: sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==} + dependencies: + '@webassemblyjs/ast': 1.11.6 + '@webassemblyjs/helper-api-error': 1.11.6 + '@webassemblyjs/helper-wasm-bytecode': 1.11.6 + '@webassemblyjs/ieee754': 1.11.6 + '@webassemblyjs/leb128': 1.11.6 + '@webassemblyjs/utf8': 1.11.6 + dev: true + + /@webassemblyjs/wast-printer@1.11.6: + resolution: {integrity: sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==} + dependencies: + '@webassemblyjs/ast': 1.11.6 + '@xtuc/long': 4.2.2 + dev: true + + /@xmldom/xmldom@0.8.6: + resolution: {integrity: sha512-uRjjusqpoqfmRkTaNuLJ2VohVr67Q5YwDATW3VU7PfzTj6IRaihGrYI7zckGZjxQPBIp63nfvJbM+Yu5ICh0Bg==} + engines: {node: '>=10.0.0'} + dev: false + + /@xtuc/ieee754@1.2.0: + resolution: {integrity: sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==} + dev: true + + /@xtuc/long@4.2.2: + resolution: {integrity: sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==} + dev: true + + /@yarnpkg/esbuild-plugin-pnp@3.0.0-rc.15(esbuild@0.17.14): + resolution: {integrity: sha512-kYzDJO5CA9sy+on/s2aIW0411AklfCi8Ck/4QDivOqsMKpStZA2SsR+X27VTggGwpStWaLrjJcDcdDMowtG8MA==} + engines: {node: '>=14.15.0'} + peerDependencies: + esbuild: '>=0.10.0' + dependencies: + esbuild: 0.17.14 + tslib: 2.6.1 + dev: true + + /@zxing/text-encoding@0.9.0: + resolution: {integrity: sha512-U/4aVJ2mxI0aDNI8Uq0wEhMgY+u4CNtEb0om3+y3+niDAsoTCOB33UF0sxpzqzdqXLqmvc+vZyAt4O8pPdfkwA==} + requiresBuild: true + dev: false + optional: true + + /abab@2.0.6: + resolution: {integrity: sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==} + dev: false + + /abbrev@1.1.1: + resolution: {integrity: sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==} + dev: false + + /accepts@1.3.8: + resolution: {integrity: sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==} + engines: {node: '>= 0.6'} + dependencies: + mime-types: 2.1.34 + negotiator: 0.6.3 + dev: true + + /acorn-import-assertions@1.9.0(acorn@8.10.0): + resolution: {integrity: sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==} + peerDependencies: + acorn: ^8 + dependencies: + acorn: 8.10.0 + dev: true + + /acorn-jsx@5.3.2(acorn@8.10.0): + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + dependencies: + acorn: 8.10.0 + + /acorn-node@1.8.2: + resolution: {integrity: sha512-8mt+fslDufLYntIoPAaIMUe/lrbrehIiwmR3t2k9LljIzoigEPF27eLk2hy8zSGzmR/ogr7zbRKINMo1u0yh5A==} + dependencies: + acorn: 7.4.1 + acorn-walk: 7.2.0 + xtend: 4.0.2 + + /acorn-walk@7.2.0: + resolution: {integrity: sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA==} + engines: {node: '>=0.4.0'} + + /acorn-walk@8.2.0: + resolution: {integrity: sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==} + engines: {node: '>=0.4.0'} + dev: false + + /acorn@7.4.1: + resolution: {integrity: sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==} + engines: {node: '>=0.4.0'} + hasBin: true + + /acorn@8.10.0: + resolution: {integrity: sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==} + engines: {node: '>=0.4.0'} + hasBin: true + + /acorn@8.8.2: + resolution: {integrity: sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==} + engines: {node: '>=0.4.0'} + hasBin: true + + /address@1.2.2: + resolution: {integrity: sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==} + engines: {node: '>= 10.0.0'} + dev: true + + /adjust-sourcemap-loader@4.0.0: + resolution: {integrity: sha512-OXwN5b9pCUXNQHJpwwD2qP40byEmSgzj8B4ydSN0uMNYWiFmJ6x6KwUllMmfk8Rwu/HJDFR7U8ubsWBoN0Xp0A==} + engines: {node: '>=8.9'} + dependencies: + loader-utils: 2.0.4 + regex-parser: 2.2.11 + dev: true + + /agent-base@5.1.1: + resolution: {integrity: sha512-TMeqbNl2fMW0nMjTEPOwe3J/PRFP4vqeoNuQMG0HlMrtm5QxKqdvAkZ1pRBQ/ulIyDD5Yq0nJ7YbdD8ey0TO3g==} + engines: {node: '>= 6.0.0'} + dev: true + + /agent-base@6.0.2: + resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==} + engines: {node: '>= 6.0.0'} + dependencies: + debug: 4.3.4 + transitivePeerDependencies: + - supports-color + + /aggregate-error@3.1.0: + resolution: {integrity: sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==} + engines: {node: '>=8'} + dependencies: + clean-stack: 2.2.0 + indent-string: 4.0.0 + dev: true + + /ajv-keywords@3.5.2(ajv@6.12.6): + resolution: {integrity: sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==} + peerDependencies: + ajv: ^6.9.1 + dependencies: + ajv: 6.12.6 + dev: true + + /ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + /ansi-colors@4.1.3: + resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==} + engines: {node: '>=6'} + + /ansi-escapes@4.3.2: + resolution: {integrity: sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==} + engines: {node: '>=8'} + dependencies: + type-fest: 0.21.3 + dev: false + + /ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + /ansi-styles@3.2.1: + resolution: {integrity: sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==} + engines: {node: '>=4'} + dependencies: + color-convert: 1.9.3 + + /ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + dependencies: + color-convert: 2.0.1 + + /ansi-styles@5.2.0: + resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==} + engines: {node: '>=10'} + + /anymatch@3.1.2: + resolution: {integrity: sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==} + engines: {node: '>= 8'} + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + + /app-root-dir@1.0.2: + resolution: {integrity: sha512-jlpIfsOoNoafl92Sz//64uQHGSyMrD2vYG5d8o2a4qGvyNCvXur7bzIsWtAC/6flI2RYAp3kv8rsfBtaLm7w0g==} + dev: true + + /aproba@2.0.0: + resolution: {integrity: sha512-lYe4Gx7QT+MKGbDsA+Z+he/Wtef0BiwDOlK/XkBrdfsh9J/jPPXbX0tE9x9cl27Tmu5gg3QUbUrQYa/y+KOHPQ==} + + /are-we-there-yet@2.0.0: + resolution: {integrity: sha512-Ci/qENmwHnsYo9xKIcUJN5LeDKdJ6R1Z1j9V/J5wyq8nh/mYPEpIKJbBZXtZjG04HiK7zV/p6Vs9952MrMeUIw==} + engines: {node: '>=10'} + dependencies: + delegates: 1.0.0 + readable-stream: 3.6.0 + + /arg@5.0.2: + resolution: {integrity: sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==} + + /argparse@1.0.10: + resolution: {integrity: sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==} + dependencies: + sprintf-js: 1.0.3 + + /argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + /aria-query@5.0.0: + resolution: {integrity: sha512-V+SM7AbUwJ+EBnB8+DXs0hPZHO0W6pqBcc0dW90OwtVG02PswOu/teuARoLQjdDOH+t9pJgGnW5/Qmouf3gPJg==} + engines: {node: '>=6.0'} + + /aria-query@5.3.0: + resolution: {integrity: sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==} + dependencies: + dequal: 2.0.3 + + /array-buffer-byte-length@1.0.0: + resolution: {integrity: sha512-LPuwb2P+NrQw3XhxGc36+XSvuBPopovXYTR9Ew++Du9Yb/bx5AzBfrIsBoj0EZUifjQU+sHL21sseZ3jerWO/A==} + dependencies: + call-bind: 1.0.2 + is-array-buffer: 3.0.2 + dev: false + + /array-flatten@1.1.1: + resolution: {integrity: sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==} + dev: true + + /array-union@2.1.0: + resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} + engines: {node: '>=8'} + + /array.prototype.flat@1.3.1: + resolution: {integrity: sha512-roTU0KWIOmJ4DRLmwKd19Otg0/mT3qPNt0Qb3GWW8iObuZXxrjB/pzn0R3hqpRSWg4HCwqx+0vwOnWnvlOyeIA==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.2.0 + es-abstract: 1.21.2 + es-shim-unscopables: 1.0.0 + dev: false + + /arrify@1.0.1: + resolution: {integrity: sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==} + engines: {node: '>=0.10.0'} + dev: false + + /assert@2.0.0: + resolution: {integrity: sha512-se5Cd+js9dXJnu6Ag2JFc00t+HmHOen+8Q+L7O9zI0PqQXr20uk2J0XQqMxZEeo5U50o8Nvmmx7dZrl+Ufr35A==} + dependencies: + es6-object-assign: 1.1.0 + is-nan: 1.3.2 + object-is: 1.1.5 + util: 0.12.5 + dev: true + + /assertion-error@1.1.0: + resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==} + dev: false + + /ast-types@0.15.2: + resolution: {integrity: sha512-c27loCv9QkZinsa5ProX751khO9DJl/AcB5c2KNtA6NRvHKS0PgLfcftz72KVq504vB0Gku5s2kUZzDBvQWvHg==} + engines: {node: '>=4'} + dependencies: + tslib: 2.6.1 + dev: true + + /ast-types@0.16.1: + resolution: {integrity: sha512-6t10qk83GOG8p0vKmaCr8eiilZwO171AvbROMtvvNiwrTly62t+7XkA8RdIIVbpMhCASAsxgAzdRSwh6nw/5Dg==} + engines: {node: '>=4'} + dependencies: + tslib: 2.6.1 + dev: true + + /async-limiter@1.0.1: + resolution: {integrity: sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==} + dev: true + + /async-sema@3.1.1: + resolution: {integrity: sha512-tLRNUXati5MFePdAk8dw7Qt7DpxPB60ofAgn8WRhW6a2rcimZnYBP9oxHiv0OHy+Wz7kPMG+t4LGdt31+4EmGg==} + dev: false + + /async@3.2.4: + resolution: {integrity: sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ==} + dev: true + + /asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + + /automation-events@6.0.8: + resolution: {integrity: sha512-OXI9rEbA0LwWr+Tmvka4EHtVHBIVw8KD2NM7fIGjd4dyGnuiM3ULZL+Jlo4aKXZDY98raT4R4rEDOHAbz8Jm9A==} + engines: {node: '>=16.1.0'} + dependencies: + '@babel/runtime': 7.22.6 + tslib: 2.6.1 + dev: false + + /autoprefixer@10.4.4(postcss@8.4.27): + resolution: {integrity: sha512-Tm8JxsB286VweiZ5F0anmbyGiNI3v3wGv3mz9W+cxEDYB/6jbnj6GM9H9mK3wIL8ftgl+C07Lcwb8PG5PCCPzA==} + engines: {node: ^10 || ^12 || >=14} + hasBin: true + peerDependencies: + postcss: ^8.1.0 + dependencies: + browserslist: 4.20.2 + caniuse-lite: 1.0.30001317 + fraction.js: 4.2.0 + normalize-range: 0.1.2 + picocolors: 1.0.0 + postcss: 8.4.27 + postcss-value-parser: 4.2.0 + dev: false + + /available-typed-arrays@1.0.5: + resolution: {integrity: sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==} + engines: {node: '>= 0.4'} + + /axe-core@4.7.2: + resolution: {integrity: sha512-zIURGIS1E1Q4pcrMjp+nnEh+16G56eG/MUllJH8yEvw7asDo7Ac9uhC9KIH5jzpITueEZolfYglnCGIuSBz39g==} + engines: {node: '>=4'} + dev: true + + /axobject-query@3.2.1: + resolution: {integrity: sha512-jsyHu61e6N4Vbz/v18DHwWYKK0bSWLqn47eeDSKPB7m8tqMHF9YJ+mhIk2lVteyZrY8tnSj/jHOv4YiTCuCJgg==} + dependencies: + dequal: 2.0.3 + + /babel-core@7.0.0-bridge.0(@babel/core@7.22.5): + resolution: {integrity: sha512-poPX9mZH/5CSanm50Q+1toVci6pv5KSRv/5TWCwtzQS5XEwn40BcCrgIeMFWP9CKKIniKXNxoIOnOq4VVlGXhg==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + dev: true + + /babel-plugin-istanbul@6.1.1: + resolution: {integrity: sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==} + engines: {node: '>=8'} + dependencies: + '@babel/helper-plugin-utils': 7.22.5 + '@istanbuljs/load-nyc-config': 1.1.0 + '@istanbuljs/schema': 0.1.3 + istanbul-lib-instrument: 5.2.1 + test-exclude: 6.0.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-polyfill-corejs2@0.3.3(@babel/core@7.21.8): + resolution: {integrity: sha512-8hOdmFYFSZhqg2C/JgLUQ+t52o5nirNwaWM2B9LWteozwIvM14VSwdsCAUET10qT+kmySAlseadmfeeSWFCy+Q==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/compat-data': 7.22.5 + '@babel/core': 7.21.8 + '@babel/helper-define-polyfill-provider': 0.3.3(@babel/core@7.21.8) + semver: 6.3.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-polyfill-corejs2@0.4.3(@babel/core@7.22.5): + resolution: {integrity: sha512-bM3gHc337Dta490gg+/AseNB9L4YLHxq1nGKZZSHbhXv4aTYU2MD2cjza1Ru4S6975YLTaL1K8uJf6ukJhhmtw==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/compat-data': 7.22.5 + '@babel/core': 7.22.5 + '@babel/helper-define-polyfill-provider': 0.4.0(@babel/core@7.22.5) + semver: 6.3.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-polyfill-corejs3@0.6.0(@babel/core@7.21.8): + resolution: {integrity: sha512-+eHqR6OPcBhJOGgsIar7xoAB1GcSwVUA3XjAd7HJNzOXT4wv6/H7KIdA/Nc60cvUlDbKApmqNvD1B1bzOt4nyA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-define-polyfill-provider': 0.3.3(@babel/core@7.21.8) + core-js-compat: 3.31.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-polyfill-corejs3@0.8.1(@babel/core@7.22.5): + resolution: {integrity: sha512-ikFrZITKg1xH6pLND8zT14UPgjKHiGLqex7rGEZCH2EvhsneJaJPemmpQaIZV5AL03II+lXylw3UmddDK8RU5Q==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-define-polyfill-provider': 0.4.0(@babel/core@7.22.5) + core-js-compat: 3.31.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-polyfill-regenerator@0.4.1(@babel/core@7.21.8): + resolution: {integrity: sha512-NtQGmyQDXjQqQ+IzRkBVwEOz9lQ4zxAQZgoAYEtU9dJjnl1Oc98qnN7jcp+bE7O7aYzVpavXE3/VKXNzUbh7aw==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.21.8 + '@babel/helper-define-polyfill-provider': 0.3.3(@babel/core@7.21.8) + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-polyfill-regenerator@0.5.0(@babel/core@7.22.5): + resolution: {integrity: sha512-hDJtKjMLVa7Z+LwnTCxoDLQj6wdc+B8dun7ayF2fYieI6OzfuvcLMB32ihJZ4UhCBwNYGl5bg/x/P9cMdnkc2g==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.22.5 + '@babel/helper-define-polyfill-provider': 0.4.0(@babel/core@7.22.5) + transitivePeerDependencies: + - supports-color + dev: true + + /babylonjs-gltf2interface@4.2.2: + resolution: {integrity: sha512-LCQgW1lM+EpKK4yWMiPEgi6ONwJ7W4JrSu3t9JixNRgvnic72OnN2f0bt91rE30EJr1ZaokvkXD/aEiBp/Juyg==} + dev: false + + /babylonjs-gltf2interface@5.18.0: + resolution: {integrity: sha512-VucYtbedtHv89lEhgrD5ULoXTCcU8ZjyBygdh06VybiVSaCzQqTxam6ZFibazpkzB6maSHe8yAm3wE1EPfOxhg==} + dev: false + + /babylonjs-loaders@4.2.2: + resolution: {integrity: sha512-IuShR5N4zkDMzKEGsCZ0uZDCn07BLLj8LlKwyiSwFR1V9KxAALkt2INTMCdXjuWELTcZEALZlyO85mKJ2pDPHw==} + dependencies: + babylonjs: 4.2.2 + babylonjs-gltf2interface: 4.2.2 + dev: false + + /babylonjs-loaders@5.18.0: + resolution: {integrity: sha512-O4v8kGylkWlcHJfhToKiyQprkhcIWe0PiT5yudfTSkpXrWy7YXb1VzssxilfkLRFOr7MWHlNvW+zZNcTs+Sk8Q==} + dependencies: + babylonjs: 5.18.0 + babylonjs-gltf2interface: 5.18.0 + dev: false + + /babylonjs@4.2.2: + resolution: {integrity: sha512-p7mTi6+nLuWJTLbwxEJxLOh/QMHMV2KA0bviEoQSK5VtsAq1F0JghoOZYRs4aEqAZF/deFPWvMQk1vbXJ+4eEA==} + dev: false + + /babylonjs@5.18.0: + resolution: {integrity: sha512-d4WrcR3e1FOnOlEtOofRH+OniZT4cx6EuDvKB0OkqnPnjD7ALuo5cmJDQmILWnDTOJRqojK7Psz9etEeNDionA==} + requiresBuild: true + dev: false + + /balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + /base-64@1.0.0: + resolution: {integrity: sha512-kwDPIFCGx0NZHog36dj+tHiwP4QMzsZ3AgMViUBKI0+V5n4U0ufTCUMhnQ04diaRI8EX/QcPfql7zlhZ7j4zgg==} + dev: true + + /base64-js@1.5.1: + resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} + + /better-opn@2.1.1: + resolution: {integrity: sha512-kIPXZS5qwyKiX/HcRvDYfmBQUa8XP17I0mYZZ0y4UhpYOSvtsLHDYqmomS+Mj20aDvD3knEiQ0ecQy2nhio3yA==} + engines: {node: '>8.0.0'} + dependencies: + open: 7.4.2 + dev: true + + /better-path-resolve@1.0.0: + resolution: {integrity: sha512-pbnl5XzGBdrFU/wT4jqmJVPn2B6UHPBOhzMQkY/SPUPB6QtUXtmBHBIwCbXJol93mOpGMnQyP/+BB19q04xj7g==} + engines: {node: '>=4'} + dependencies: + is-windows: 1.0.2 + dev: false + + /big-integer@1.6.51: + resolution: {integrity: sha512-GPEid2Y9QU1Exl1rpO9B2IPJGHPSupF5GnVIP0blYvNOMer2bTvSWs1jGOUg04hTmu67nmLsQ9TBo1puaotBHg==} + engines: {node: '>=0.6'} + dev: true + + /big.js@5.2.2: + resolution: {integrity: sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==} + dev: true + + /binary-extensions@2.2.0: + resolution: {integrity: sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==} + engines: {node: '>=8'} + + /bindings@1.5.0: + resolution: {integrity: sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==} + dependencies: + file-uri-to-path: 1.0.0 + dev: false + + /bl@4.1.0: + resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==} + dependencies: + buffer: 5.7.1 + inherits: 2.0.4 + readable-stream: 3.6.0 + + /body-parser@1.20.1: + resolution: {integrity: sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==} + engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} + dependencies: + bytes: 3.1.2 + content-type: 1.0.5 + debug: 2.6.9 + depd: 2.0.0 + destroy: 1.2.0 + http-errors: 2.0.0 + iconv-lite: 0.4.24 + on-finished: 2.4.1 + qs: 6.11.0 + raw-body: 2.5.1 + type-is: 1.6.18 + unpipe: 1.0.0 + transitivePeerDependencies: + - supports-color + dev: true + + /boolbase@1.0.0: + resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} + dev: false + + /bplist-parser@0.2.0: + resolution: {integrity: sha512-z0M+byMThzQmD9NILRniCUXYsYpjwnlO8N5uCFaCqIOpqRsJCrQL9NK3JsD67CN5a08nF5oIL2bD6loTdHOuKw==} + engines: {node: '>= 5.10.0'} + dependencies: + big-integer: 1.6.51 + dev: true + + /brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + /brace-expansion@2.0.1: + resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} + dependencies: + balanced-match: 1.0.2 + dev: true + + /braces@3.0.2: + resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==} + engines: {node: '>=8'} + dependencies: + fill-range: 7.0.1 + + /breakword@1.0.5: + resolution: {integrity: sha512-ex5W9DoOQ/LUEU3PMdLs9ua/CYZl1678NUkKOdUSi8Aw5F1idieaiRURCBFJCwVcrD1J8Iy3vfWSloaMwO2qFg==} + dependencies: + wcwidth: 1.0.1 + dev: false + + /broker-factory@3.0.68: + resolution: {integrity: sha512-QrbDJ/7YwZ2+TuSreT8WMKrssIO3VjywMu5C5Jq+pJ+OkIVIXhUkxdBhNX2mmRXlzkU+jVXz8uMyRP+2uAgx8w==} + dependencies: + '@babel/runtime': 7.22.6 + fast-unique-numbers: 6.0.21 + tslib: 2.6.1 + worker-factory: 6.0.69 + dev: false + + /broker-factory@3.0.84: + resolution: {integrity: sha512-Z5ihhfifx7n//u99AG88z1q4csVt8vgf72jN5JYD4FqkPWTMp+xCvUaKdWQUaiPFOmNYOV8uPIcqB1MtrLGjiQ==} + dependencies: + '@babel/runtime': 7.22.6 + fast-unique-numbers: 8.0.7 + tslib: 2.6.1 + worker-factory: 7.0.9 + dev: false + + /browser-assert@1.2.1: + resolution: {integrity: sha512-nfulgvOR6S4gt9UKCeGJOuSGBPGiFT6oQ/2UBnvTY/5aQ1PnksW72fhZkM30DzoRRv2WpwZf1vHHEr3mtuXIWQ==} + dev: true + + /browserify-zlib@0.1.4: + resolution: {integrity: sha512-19OEpq7vWgsH6WkvkBJQDFvJS1uPcbFOQ4v9CU839dO+ZZXUZO6XpE6hNCqvlIIj+4fZvRiJ6DsAQ382GwiyTQ==} + dependencies: + pako: 0.2.9 + dev: true + + /browserslist@4.20.2: + resolution: {integrity: sha512-CQOBCqp/9pDvDbx3xfMi+86pr4KXIf2FDkTTdeuYw8OxS9t898LA1Khq57gtufFILXpfgsSx5woNgsBgvGjpsA==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + dependencies: + caniuse-lite: 1.0.30001317 + electron-to-chromium: 1.4.86 + escalade: 3.1.1 + node-releases: 2.0.2 + picocolors: 1.0.0 + dev: false + + /browserslist@4.21.9: + resolution: {integrity: sha512-M0MFoZzbUrRU4KNfCrDLnvyE7gub+peetoTid3TBIqtunaDJyXlwhakT+/VkvSXcfIzFfK/nkCs4nmyTmxdNSg==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + dependencies: + caniuse-lite: 1.0.30001512 + electron-to-chromium: 1.4.449 + node-releases: 2.0.12 + update-browserslist-db: 1.0.11(browserslist@4.21.9) + + /bser@2.1.1: + resolution: {integrity: sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==} + dependencies: + node-int64: 0.4.0 + dev: true + + /buffer-crc32@0.2.13: + resolution: {integrity: sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==} + + /buffer-from@1.1.2: + resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} + dev: true + + /buffer@5.7.1: + resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + + /bufferutil@4.0.7: + resolution: {integrity: sha512-kukuqc39WOHtdxtw4UScxF/WVnMFVSQVKhtx3AjZJzhd0RGZZldcrfSEbVsWWe6KNH253574cq5F+wpv0G9pJw==} + engines: {node: '>=6.14.2'} + requiresBuild: true + dependencies: + node-gyp-build: 4.6.0 + + /busboy@1.6.0: + resolution: {integrity: sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==} + engines: {node: '>=10.16.0'} + dependencies: + streamsearch: 1.1.0 + + /bytes@3.0.0: + resolution: {integrity: sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==} + engines: {node: '>= 0.8'} + dev: true + + /bytes@3.1.2: + resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} + engines: {node: '>= 0.8'} + dev: true + + /cac@6.7.14: + resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} + engines: {node: '>=8'} + dev: false + + /call-bind@1.0.2: + resolution: {integrity: sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==} + dependencies: + function-bind: 1.1.1 + get-intrinsic: 1.2.0 + + /callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + + /camelcase-css@2.0.1: + resolution: {integrity: sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==} + engines: {node: '>= 6'} + + /camelcase-keys@6.2.2: + resolution: {integrity: sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==} + engines: {node: '>=8'} + dependencies: + camelcase: 5.3.1 + map-obj: 4.3.0 + quick-lru: 4.0.1 + dev: false + + /camelcase@5.3.1: + resolution: {integrity: sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==} + engines: {node: '>=6'} + + /caniuse-lite@1.0.30001317: + resolution: {integrity: sha512-xIZLh8gBm4dqNX0gkzrBeyI86J2eCjWzYAs40q88smG844YIrN4tVQl/RhquHvKEKImWWFIVh1Lxe5n1G/N+GQ==} + dev: false + + /caniuse-lite@1.0.30001512: + resolution: {integrity: sha512-2S9nK0G/mE+jasCUsMPlARhRCts1ebcp2Ji8Y8PWi4NDE1iRdLCnEPHkEfeBrGC45L4isBx5ur3IQ6yTE2mRZw==} + + /case@1.6.3: + resolution: {integrity: sha512-mzDSXIPaFwVDvZAHqZ9VlbyF4yyXRuX6IvB06WvPYkqJVO24kX1PPhv9bfpKNFZyxYFmmgo03HUiD8iklmJYRQ==} + engines: {node: '>= 0.8.0'} + dev: false + + /chai@4.3.7: + resolution: {integrity: sha512-HLnAzZ2iupm25PlN0xFreAlBA5zaBSv3og0DdeGA4Ar6h6rJ3A0rolRUKJhSF2V10GZKDgWF/VmAEsNWjCRB+A==} + engines: {node: '>=4'} + dependencies: + assertion-error: 1.1.0 + check-error: 1.0.2 + deep-eql: 4.1.3 + get-func-name: 2.0.0 + loupe: 2.3.6 + pathval: 1.1.1 + type-detect: 4.0.8 + dev: false + + /chalk@2.4.2: + resolution: {integrity: sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==} + engines: {node: '>=4'} + dependencies: + ansi-styles: 3.2.1 + escape-string-regexp: 1.0.5 + supports-color: 5.5.0 + + /chalk@3.0.0: + resolution: {integrity: sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==} + engines: {node: '>=8'} + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + dev: false + + /chalk@4.1.1: + resolution: {integrity: sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg==} + engines: {node: '>=10'} + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + dev: false + + /chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + /chardet@0.7.0: + resolution: {integrity: sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==} + dev: false + + /check-error@1.0.2: + resolution: {integrity: sha512-BrgHpW9NURQgzoNyjfq0Wu6VFO6D7IZEmJNdtgNqpzGG8RuNFHt2jQxWlAs4HMe119chBnv+34syEZtc6IhLtA==} + dev: false + + /chokidar@3.5.3: + resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==} + engines: {node: '>= 8.10.0'} + dependencies: + anymatch: 3.1.2 + braces: 3.0.2 + glob-parent: 5.1.2 + is-binary-path: 2.1.0 + is-glob: 4.0.3 + normalize-path: 3.0.0 + readdirp: 3.6.0 + optionalDependencies: + fsevents: 2.3.2 + + /chownr@1.1.4: + resolution: {integrity: sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==} + dev: true + + /chownr@2.0.0: + resolution: {integrity: sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==} + engines: {node: '>=10'} + + /chromatic@7.0.0: + resolution: {integrity: sha512-ZPHRO/6v0jAKk0G1oW+HAngQmbq+opAVkgjy3Yx5Cvxvpfv+Y7zyfBuQ8KHewoNuIqLTfvRTbKzekqbGYT+7UQ==} + hasBin: true + dev: true + + /chrome-trace-event@1.0.3: + resolution: {integrity: sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==} + engines: {node: '>=6.0'} + dev: true + + /ci-info@3.8.0: + resolution: {integrity: sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==} + engines: {node: '>=8'} + + /clean-stack@2.2.0: + resolution: {integrity: sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==} + engines: {node: '>=6'} + dev: true + + /cli-color@2.0.3: + resolution: {integrity: sha512-OkoZnxyC4ERN3zLzZaY9Emb7f/MhBOIpePv0Ycok0fJYT+Ouo00UBEIwsVsr0yoow++n5YWlSUgST9GKhNHiRQ==} + engines: {node: '>=0.10'} + dependencies: + d: 1.0.1 + es5-ext: 0.10.62 + es6-iterator: 2.0.3 + memoizee: 0.4.15 + timers-ext: 0.1.7 + dev: false + + /cli-cursor@3.1.0: + resolution: {integrity: sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==} + engines: {node: '>=8'} + dependencies: + restore-cursor: 3.1.0 + + /cli-spinners@2.7.0: + resolution: {integrity: sha512-qu3pN8Y3qHNgE2AFweciB1IfMnmZ/fsNTEE+NOFjmGB2F/7rLhnhzppvpCnN4FovtP26k8lHyy9ptEbNwWFLzw==} + engines: {node: '>=6'} + + /cli-table3@0.6.3: + resolution: {integrity: sha512-w5Jac5SykAeZJKntOxJCrm63Eg5/4dhMWIcuTbo9rpE+brgaSZo0RuNJZeOyMgsUdhDeojvgyQLmjI+K50ZGyg==} + engines: {node: 10.* || >= 12.*} + dependencies: + string-width: 4.2.3 + optionalDependencies: + '@colors/colors': 1.5.0 + dev: true + + /cli-width@3.0.0: + resolution: {integrity: sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==} + engines: {node: '>= 10'} + dev: false + + /cliui@6.0.0: + resolution: {integrity: sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==} + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 6.2.0 + dev: false + + /cliui@8.0.1: + resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==} + engines: {node: '>=12'} + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 7.0.0 + dev: false + + /clone-deep@4.0.1: + resolution: {integrity: sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==} + engines: {node: '>=6'} + dependencies: + is-plain-object: 2.0.4 + kind-of: 6.0.3 + shallow-clone: 3.0.1 + dev: true + + /clone@1.0.4: + resolution: {integrity: sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==} + engines: {node: '>=0.8'} + + /clone@2.1.2: + resolution: {integrity: sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w==} + engines: {node: '>=0.8'} + dev: false + + /cm6-theme-basic-dark@0.2.0(@codemirror/language@6.6.0)(@codemirror/state@6.1.2)(@codemirror/view@6.4.1)(@lezer/highlight@1.1.3): + resolution: {integrity: sha512-+mNNJecRtxS/KkloMDCQF0oTrT6aFGRZTjnBcdT5UG1pcDO4Brq8l1+0KR/8dZ7hub2gOGOzoi3rGFD8GzlH7Q==} + peerDependencies: + '@codemirror/language': ^6.0.0 + '@codemirror/state': ^6.0.0 + '@codemirror/view': ^6.0.0 + '@lezer/highlight': ^1.0.0 + dependencies: + '@codemirror/language': 6.6.0 + '@codemirror/state': 6.1.2 + '@codemirror/view': 6.4.1 + '@lezer/highlight': 1.1.3 + dev: false + + /cm6-theme-basic-light@0.2.0(@codemirror/language@6.6.0)(@codemirror/state@6.1.2)(@codemirror/view@6.4.1)(@lezer/highlight@1.1.3): + resolution: {integrity: sha512-1prg2gv44sYfpHscP26uLT/ePrh0mlmVwMSoSd3zYKQ92Ab3jPRLzyCnpyOCQLJbK+YdNs4HvMRqMNYdy4pMhA==} + peerDependencies: + '@codemirror/language': ^6.0.0 + '@codemirror/state': ^6.0.0 + '@codemirror/view': ^6.0.0 + '@lezer/highlight': ^1.0.0 + dependencies: + '@codemirror/language': 6.6.0 + '@codemirror/state': 6.1.2 + '@codemirror/view': 6.4.1 + '@lezer/highlight': 1.1.3 + dev: false + + /code-red@1.0.3: + resolution: {integrity: sha512-kVwJELqiILQyG5aeuyKFbdsI1fmQy1Cmf7dQ8eGmVuJoaRVdwey7WaMknr2ZFeVSYSKT0rExsa8EGw0aoI/1QQ==} + dependencies: + '@jridgewell/sourcemap-codec': 1.4.15 + '@types/estree': 1.0.0 + acorn: 8.8.2 + estree-walker: 3.0.3 + periscopic: 3.1.0 + + /codemirror@6.0.1(@lezer/common@1.0.2): + resolution: {integrity: sha512-J8j+nZ+CdWmIeFIGXEFbFPtpiYacFMDR8GlHK3IyHQJMCaVRfGx9NT+Hxivv1ckLWPvNdZqndbr/7lVhrf/Svg==} + dependencies: + '@codemirror/autocomplete': 6.3.0(@codemirror/language@6.6.0)(@codemirror/state@6.1.2)(@codemirror/view@6.4.1)(@lezer/common@1.0.2) + '@codemirror/commands': 6.1.2 + '@codemirror/language': 6.6.0 + '@codemirror/lint': 6.0.0 + '@codemirror/search': 6.2.2 + '@codemirror/state': 6.1.2 + '@codemirror/view': 6.4.1 + transitivePeerDependencies: + - '@lezer/common' + dev: false + + /color-convert@1.9.3: + resolution: {integrity: sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==} + dependencies: + color-name: 1.1.3 + + /color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + dependencies: + color-name: 1.1.4 + + /color-name@1.1.3: + resolution: {integrity: sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==} + + /color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + /color-support@1.1.3: + resolution: {integrity: sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==} + hasBin: true + + /colorette@2.0.20: + resolution: {integrity: sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==} + dev: true + + /combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + dependencies: + delayed-stream: 1.0.0 + + /commander@2.20.3: + resolution: {integrity: sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==} + + /commander@6.2.1: + resolution: {integrity: sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA==} + engines: {node: '>= 6'} + dev: true + + /commander@7.2.0: + resolution: {integrity: sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==} + engines: {node: '>= 10'} + dev: false + + /commander@8.3.0: + resolution: {integrity: sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==} + engines: {node: '>= 12'} + dev: false + + /commander@9.4.1: + resolution: {integrity: sha512-5EEkTNyHNGFPD2H+c/dXXfQZYa/scCKasxWcXJaWnNJ99pnQN9Vnmqow+p+PlFPE63Q6mThaZws1T+HxfpgtPw==} + engines: {node: ^12.20.0 || >=14} + dev: false + + /commondir@1.0.1: + resolution: {integrity: sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==} + dev: true + + /compilerr@11.0.8: + resolution: {integrity: sha512-tKUqynRzLkmVXCFDohSNUOPe3LfAGbvI+mVvF/21YydavzWrm+v27bpH6us9zYKwbSz7xF1/ncRgiXoNWT1hsQ==} + engines: {node: '>=16.1.0'} + dependencies: + '@babel/runtime': 7.22.6 + dashify: 2.0.0 + indefinite-article: 0.0.2 + tslib: 2.6.1 + dev: false + + /compilerr@9.0.21: + resolution: {integrity: sha512-H6ZnGHPBiwVdWt8GbAPuQK4mmtRTJ5yucysgFFhGxmPoLCAmaMSxtvHNzhAAGNqBRZOTsGjkwT8clNw6CJcGgQ==} + engines: {node: '>=12.20.1'} + dependencies: + '@babel/runtime': 7.22.6 + dashify: 2.0.0 + indefinite-article: 0.0.2 + tslib: 2.6.1 + dev: false + + /compressible@2.0.18: + resolution: {integrity: sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==} + engines: {node: '>= 0.6'} + dependencies: + mime-db: 1.51.0 + dev: true + + /compression@1.7.4: + resolution: {integrity: sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==} + engines: {node: '>= 0.8.0'} + dependencies: + accepts: 1.3.8 + bytes: 3.0.0 + compressible: 2.0.18 + debug: 2.6.9 + on-headers: 1.0.2 + safe-buffer: 5.1.2 + vary: 1.1.2 + transitivePeerDependencies: + - supports-color + dev: true + + /concat-map@0.0.1: + resolution: {integrity: sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=} + + /concat-stream@1.6.2: + resolution: {integrity: sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==} + engines: {'0': node >= 0.8} + dependencies: + buffer-from: 1.1.2 + inherits: 2.0.4 + readable-stream: 2.3.8 + typedarray: 0.0.6 + dev: true + + /console-clear@1.1.1: + resolution: {integrity: sha512-pMD+MVR538ipqkG5JXeOEbKWS5um1H4LUUccUQG68qpeqBYbzYy79Gh55jkd2TtPdRfUaLWdv6LPP//5Zt0aPQ==} + engines: {node: '>=4'} + dev: false + + /console-control-strings@1.1.0: + resolution: {integrity: sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==} + + /content-disposition@0.5.4: + resolution: {integrity: sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==} + engines: {node: '>= 0.6'} + dependencies: + safe-buffer: 5.2.1 + dev: true + + /content-type@1.0.5: + resolution: {integrity: sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==} + engines: {node: '>= 0.6'} + dev: true + + /convert-source-map@1.9.0: + resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==} + + /convert-source-map@2.0.0: + resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} + dev: true + + /cookie-signature@1.0.6: + resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} + dev: true + + /cookie@0.4.2: + resolution: {integrity: sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA==} + engines: {node: '>= 0.6'} + dev: false + + /cookie@0.5.0: + resolution: {integrity: sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==} + engines: {node: '>= 0.6'} + + /copy-anything@2.0.6: + resolution: {integrity: sha512-1j20GZTsvKNkc4BY3NpMOM8tt///wY3FpIzozTOFO2ffuZcV61nojHXVKIy3WM+7ADCy5FVhdZYHYDdgTU0yJw==} + dependencies: + is-what: 3.14.1 + + /core-js-compat@3.31.0: + resolution: {integrity: sha512-hM7YCu1cU6Opx7MXNu0NuumM0ezNeAeRKadixyiQELWY3vT3De9S4J5ZBMraWV2vZnrE1Cirl0GtFtDtMUXzPw==} + dependencies: + browserslist: 4.21.9 + dev: true + + /core-util-is@1.0.3: + resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} + dev: true + + /cosmiconfig@8.2.0: + resolution: {integrity: sha512-3rTMnFJA1tCOPwRxtgF4wd7Ab2qvDbL8jX+3smjIbS4HlZBagTlpERbdN7iAbWlrfxE3M8c27kTwTawQ7st+OQ==} + engines: {node: '>=14'} + dependencies: + import-fresh: 3.3.0 + js-yaml: 4.1.0 + parse-json: 5.2.0 + path-type: 4.0.0 + dev: true + + /crelt@1.0.5: + resolution: {integrity: sha512-+BO9wPPi+DWTDcNYhr/W90myha8ptzftZT+LwcmUbbok0rcP/fequmFYCw8NMoH7pkAZQzU78b3kYrlua5a9eA==} + dev: false + + /cropperjs@1.5.12: + resolution: {integrity: sha512-re7UdjE5UnwdrovyhNzZ6gathI4Rs3KGCBSc8HCIjUo5hO42CtzyblmWLj6QWVw7huHyDMfpKxhiO2II77nhDw==} + dev: false + + /cross-spawn@5.1.0: + resolution: {integrity: sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==} + dependencies: + lru-cache: 4.1.5 + shebang-command: 1.2.0 + which: 1.3.1 + dev: false + + /cross-spawn@6.0.5: + resolution: {integrity: sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==} + engines: {node: '>=4.8'} + dependencies: + nice-try: 1.0.5 + path-key: 2.0.1 + semver: 5.7.1 + shebang-command: 1.2.0 + which: 1.3.1 + dev: false + + /cross-spawn@7.0.3: + resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} + engines: {node: '>= 8'} + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + /crypto-random-string@2.0.0: + resolution: {integrity: sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==} + engines: {node: '>=8'} + dev: true + + /css-declaration-sorter@7.0.0(postcss@8.4.27): + resolution: {integrity: sha512-jV48Uxg3jWGLthtZhYAH3JFWMjdurJrSQAgWCc/t2ZE0UUFAIWlgcXcLJNutZT8GXrLAr36Kp+O1w3yBdxCr/A==} + engines: {node: ^14 || ^16 || >=18} + peerDependencies: + postcss: ^8.0.9 + dependencies: + postcss: 8.4.27 + dev: false + + /css-loader@6.8.1(webpack@5.88.1): + resolution: {integrity: sha512-xDAXtEVGlD0gJ07iclwWVkLoZOpEvAWaSyf6W18S2pOC//K8+qUDIx8IIT3D+HjnmkJPQeesOPv5aiUaJsCM2g==} + engines: {node: '>= 12.13.0'} + peerDependencies: + webpack: ^5.0.0 + dependencies: + icss-utils: 5.1.0(postcss@8.4.27) + postcss: 8.4.27 + postcss-modules-extract-imports: 3.0.0(postcss@8.4.27) + postcss-modules-local-by-default: 4.0.3(postcss@8.4.27) + postcss-modules-scope: 3.0.0(postcss@8.4.27) + postcss-modules-values: 4.0.0(postcss@8.4.27) + postcss-value-parser: 4.2.0 + semver: 7.4.0 + webpack: 5.88.1(esbuild@0.17.14) + dev: true + + /css-select@4.3.0: + resolution: {integrity: sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==} + dependencies: + boolbase: 1.0.0 + css-what: 6.1.0 + domhandler: 4.3.1 + domutils: 2.8.0 + nth-check: 2.1.1 + dev: false + + /css-tree@2.3.1: + resolution: {integrity: sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==} + engines: {node: ^10 || ^12.20.0 || ^14.13.0 || >=15.0.0} + dependencies: + mdn-data: 2.0.30 + source-map-js: 1.0.2 + + /css-vars-ponyfill@2.4.8: + resolution: {integrity: sha512-4/j4AX4htytYHWyHVZ2BFQ+NoCGZEcOH2h4/2mmgE4SkrFg4Xq6tGYR77DtvvUIDsaXuJN+sj41bbgauA0Gfmg==} + dependencies: + balanced-match: 1.0.2 + get-css-data: 2.1.0 + dev: false + + /css-what@6.1.0: + resolution: {integrity: sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==} + engines: {node: '>= 6'} + dev: false + + /css.escape@1.5.1: + resolution: {integrity: sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==} + dev: false + + /cssesc@3.0.0: + resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} + engines: {node: '>=4'} + hasBin: true + + /cssstyle@3.0.0: + resolution: {integrity: sha512-N4u2ABATi3Qplzf0hWbVCdjenim8F3ojEXpBDF5hBpjzW182MjNGLqfmQ0SkSPeQ+V86ZXgeH8aXj6kayd4jgg==} + engines: {node: '>=14'} + dependencies: + rrweb-cssom: 0.6.0 + dev: false + + /csstype@3.1.2: + resolution: {integrity: sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==} + dev: true + + /csv-generate@3.4.3: + resolution: {integrity: sha512-w/T+rqR0vwvHqWs/1ZyMDWtHHSJaN06klRqJXBEpDJaM/+dZkso0OKh1VcuuYvK3XM53KysVNq8Ko/epCK8wOw==} + dev: false + + /csv-parse@4.16.3: + resolution: {integrity: sha512-cO1I/zmz4w2dcKHVvpCr7JVRu8/FymG5OEpmvsZYlccYolPBLoVGKUHgNoc4ZGkFeFlWGEDmMyBM+TTqRdW/wg==} + dev: false + + /csv-stringify@5.6.5: + resolution: {integrity: sha512-PjiQ659aQ+fUTQqSrd1XEDnOr52jh30RBurfzkscaE2tPaFsDH5wOAHJiw8XAHphRknCwMUE9KRayc4K/NbO8A==} + dev: false + + /csv@5.5.3: + resolution: {integrity: sha512-QTaY0XjjhTQOdguARF0lGKm5/mEq9PD9/VhZZegHDIBq2tQwgNpHc3dneD4mGo2iJs+fTKv5Bp0fZ+BRuY3Z0g==} + engines: {node: '>= 0.1.90'} + dependencies: + csv-generate: 3.4.3 + csv-parse: 4.16.3 + csv-stringify: 5.6.5 + stream-transform: 2.1.3 + dev: false + + /d3-array@3.1.1: + resolution: {integrity: sha512-33qQ+ZoZlli19IFiQx4QEpf2CBEayMRzhlisJHSCsSUbDXv6ZishqS1x7uFVClKG4Wr7rZVHvaAttoLow6GqdQ==} + engines: {node: '>=12'} + dependencies: + internmap: 2.0.3 + dev: false + + /d3-color@3.0.1: + resolution: {integrity: sha512-6/SlHkDOBLyQSJ1j1Ghs82OIUXpKWlR0hCsw0XrLSQhuUPuCSmLQ1QPH98vpnQxMUQM2/gfAkUEWsupVpd9JGw==} + engines: {node: '>=12'} + dev: false + + /d3-delaunay@6.0.2: + resolution: {integrity: sha512-IMLNldruDQScrcfT+MWnazhHbDJhcRJyOEBAJfwQnHle1RPh6WDuLvxNArUju2VSMSUuKlY5BGHRJ2cYyoFLQQ==} + engines: {node: '>=12'} + dependencies: + delaunator: 5.0.0 + dev: false + + /d3-dispatch@3.0.1: + resolution: {integrity: sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==} + engines: {node: '>=12'} + dev: false + + /d3-dsv@3.0.1: + resolution: {integrity: sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==} + engines: {node: '>=12'} + hasBin: true + dependencies: + commander: 7.2.0 + iconv-lite: 0.6.3 + rw: 1.3.3 + dev: false + + /d3-force@3.0.0: + resolution: {integrity: sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==} + engines: {node: '>=12'} + dependencies: + d3-dispatch: 3.0.1 + d3-quadtree: 3.0.1 + d3-timer: 3.0.1 + dev: false + + /d3-format@3.1.0: + resolution: {integrity: sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==} + engines: {node: '>=12'} + dev: false + + /d3-geo-projection@4.0.0: + resolution: {integrity: sha512-p0bK60CEzph1iqmnxut7d/1kyTmm3UWtPlwdkM31AU+LW+BXazd5zJdoCn7VFxNCHXRngPHRnsNn5uGjLRGndg==} + engines: {node: '>=12'} + hasBin: true + dependencies: + commander: 7.2.0 + d3-array: 3.1.1 + d3-geo: 3.0.1 + dev: false + + /d3-geo@3.0.1: + resolution: {integrity: sha512-Wt23xBych5tSy9IYAM1FR2rWIBFWa52B/oF/GYe5zbdHrg08FU8+BuI6X4PvTwPDdqdAdq04fuWJpELtsaEjeA==} + engines: {node: '>=12'} + dependencies: + d3-array: 3.1.1 + dev: false + + /d3-hierarchy@3.1.2: + resolution: {integrity: sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==} + engines: {node: '>=12'} + dev: false + + /d3-interpolate@3.0.1: + resolution: {integrity: sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==} + engines: {node: '>=12'} + dependencies: + d3-color: 3.0.1 + dev: false + + /d3-path@3.0.1: + resolution: {integrity: sha512-gq6gZom9AFZby0YLduxT1qmrp4xpBA1YZr19OI717WIdKE2OM5ETq5qrHLb301IgxhLwcuxvGZVLeeWc/k1I6w==} + engines: {node: '>=12'} + dev: false + + /d3-path@3.1.0: + resolution: {integrity: sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==} + engines: {node: '>=12'} + dev: false + + /d3-quadtree@3.0.1: + resolution: {integrity: sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==} + engines: {node: '>=12'} + dev: false + + /d3-scale@4.0.2: + resolution: {integrity: sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==} + engines: {node: '>=12'} + dependencies: + d3-array: 3.1.1 + d3-format: 3.1.0 + d3-interpolate: 3.0.1 + d3-time: 3.0.0 + d3-time-format: 4.1.0 + dev: false + + /d3-shape@3.1.0: + resolution: {integrity: sha512-tGDh1Muf8kWjEDT/LswZJ8WF85yDZLvVJpYU9Nq+8+yW1Z5enxrmXOhTArlkaElU+CTn0OTVNli+/i+HP45QEQ==} + engines: {node: '>=12'} + dependencies: + d3-path: 3.0.1 + dev: false + + /d3-shape@3.2.0: + resolution: {integrity: sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==} + engines: {node: '>=12'} + dependencies: + d3-path: 3.1.0 + dev: false + + /d3-time-format@4.1.0: + resolution: {integrity: sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==} + engines: {node: '>=12'} + dependencies: + d3-time: 3.0.0 + dev: false + + /d3-time@3.0.0: + resolution: {integrity: sha512-zmV3lRnlaLI08y9IMRXSDshQb5Nj77smnfpnd2LrBa/2K281Jijactokeak14QacHs/kKq0AQ121nidNYlarbQ==} + engines: {node: '>=12'} + dependencies: + d3-array: 3.1.1 + dev: false + + /d3-timer@3.0.1: + resolution: {integrity: sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==} + engines: {node: '>=12'} + dev: false + + /d@1.0.1: + resolution: {integrity: sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==} + dependencies: + es5-ext: 0.10.62 + type: 1.2.0 + dev: false + + /dashify@2.0.0: + resolution: {integrity: sha512-hpA5C/YrPjucXypHPPc0oJ1l9Hf6wWbiOL7Ik42cxnsUOhWiCB/fylKbKqqJalW9FgkNQCw16YO8uW9Hs0Iy1A==} + engines: {node: '>=4'} + dev: false + + /data-urls@4.0.0: + resolution: {integrity: sha512-/mMTei/JXPqvFqQtfyTowxmJVwr2PVAeCcDxyFf6LhoOu/09TX2OX3kb2wzi4DMXcfj4OItwDOnhl5oziPnT6g==} + engines: {node: '>=14'} + dependencies: + abab: 2.0.6 + whatwg-mimetype: 3.0.0 + whatwg-url: 12.0.1 + dev: false + + /dataloader@1.4.0: + resolution: {integrity: sha512-68s5jYdlvasItOJnCuI2Q9s4q98g0pCyL3HrcKJu8KNugUl8ahgmZYg38ysLTgQjjXX3H8CJLkAvWrclWfcalw==} + dev: false + + /debug@2.6.9: + resolution: {integrity: sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.0.0 + dev: true + + /debug@3.2.7: + resolution: {integrity: sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==} + requiresBuild: true + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.1.3 + optional: true + + /debug@4.3.4: + resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.1.2 + + /decamelize-keys@1.1.1: + resolution: {integrity: sha512-WiPxgEirIV0/eIOMcnFBA3/IJZAZqKnwAwWyvvdi4lsr1WCN22nhdf/3db3DoZcUjTV2SqfzIwNyp6y2xs3nmg==} + engines: {node: '>=0.10.0'} + dependencies: + decamelize: 1.2.0 + map-obj: 1.0.1 + dev: false + + /decamelize@1.2.0: + resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} + engines: {node: '>=0.10.0'} + dev: false + + /decimal.js@10.4.3: + resolution: {integrity: sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==} + dev: false + + /dedent@1.5.1: + resolution: {integrity: sha512-+LxW+KLWxu3HW3M2w2ympwtqPrqYRzU8fqi6Fhd18fBALe15blJPI/I4+UHveMVG6lJqB4JNd4UG0S5cnVHwIg==} + peerDependencies: + babel-plugin-macros: ^3.1.0 + peerDependenciesMeta: + babel-plugin-macros: + optional: true + dev: true + + /deep-eql@4.1.3: + resolution: {integrity: sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==} + engines: {node: '>=6'} + dependencies: + type-detect: 4.0.8 + dev: false + + /deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + + /deepmerge@4.2.2: + resolution: {integrity: sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg==} + engines: {node: '>=0.10.0'} + dev: false + + /deepmerge@4.3.1: + resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==} + engines: {node: '>=0.10.0'} + + /default-browser-id@3.0.0: + resolution: {integrity: sha512-OZ1y3y0SqSICtE8DE4S8YOE9UZOJ8wO16fKWVP5J1Qz42kV9jcnMVFrEE/noXb/ss3Q4pZIH79kxofzyNNtUNA==} + engines: {node: '>=12'} + dependencies: + bplist-parser: 0.2.0 + untildify: 4.0.0 + dev: true + + /defaults@1.0.4: + resolution: {integrity: sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==} + dependencies: + clone: 1.0.4 + + /define-lazy-prop@2.0.0: + resolution: {integrity: sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==} + engines: {node: '>=8'} + dev: true + + /define-properties@1.1.3: + resolution: {integrity: sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==} + engines: {node: '>= 0.4'} + dependencies: + object-keys: 1.1.1 + dev: false + + /define-properties@1.2.0: + resolution: {integrity: sha512-xvqAVKGfT1+UAvPwKTVw/njhdQ8ZhXK4lI0bCIuCMrp2up9nPnaDftrLtmpTazqd1o+UY4zgzU+avtMbDP+ldA==} + engines: {node: '>= 0.4'} + dependencies: + has-property-descriptors: 1.0.0 + object-keys: 1.1.1 + + /defined@1.0.0: + resolution: {integrity: sha512-Y2caI5+ZwS5c3RiNDJ6u53VhQHv+hHKwhkI1iHvceKUHw9Df6EK2zRLfjejRgMuCuxK7PfSWIMwWecceVvThjQ==} + + /defu@6.1.2: + resolution: {integrity: sha512-+uO4+qr7msjNNWKYPHqN/3+Dx3NFkmIzayk2L1MyZQlvgZb/J1A0fo410dpKrN2SnqFjt8n4JL8fDJE0wIgjFQ==} + dev: true + + /del@6.1.1: + resolution: {integrity: sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==} + engines: {node: '>=10'} + dependencies: + globby: 11.1.0 + graceful-fs: 4.2.9 + is-glob: 4.0.3 + is-path-cwd: 2.2.0 + is-path-inside: 3.0.3 + p-map: 4.0.0 + rimraf: 3.0.2 + slash: 3.0.0 + dev: true + + /delaunator@5.0.0: + resolution: {integrity: sha512-AyLvtyJdbv/U1GkiS6gUUzclRoAY4Gs75qkMygJJhU75LW4DNuSF2RMzpxs9jw9Oz1BobHjTdkG3zdP55VxAqw==} + dependencies: + robust-predicates: 3.0.1 + dev: false + + /delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + + /delegates@1.0.0: + resolution: {integrity: sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==} + + /depd@2.0.0: + resolution: {integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==} + engines: {node: '>= 0.8'} + dev: true + + /dequal@2.0.2: + resolution: {integrity: sha512-q9K8BlJVxK7hQYqa6XISGmBZbtQQWVXSrRrWreHC94rMt1QL/Impruc+7p2CYSYuVIUr+YCt6hjrs1kkdJRTug==} + engines: {node: '>=6'} + dev: false + + /dequal@2.0.3: + resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} + engines: {node: '>=6'} + + /destroy@1.2.0: + resolution: {integrity: sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==} + engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} + dev: true + + /detect-indent@6.1.0: + resolution: {integrity: sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA==} + engines: {node: '>=8'} + + /detect-libc@2.0.2: + resolution: {integrity: sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==} + engines: {node: '>=8'} + dev: false + + /detect-package-manager@2.0.1: + resolution: {integrity: sha512-j/lJHyoLlWi6G1LDdLgvUtz60Zo5GEj+sVYtTVXnYLDPuzgC3llMxonXym9zIwhhUII8vjdw0LXxavpLqTbl1A==} + engines: {node: '>=12'} + dependencies: + execa: 5.1.1 + dev: true + + /detect-port@1.5.1: + resolution: {integrity: sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==} + hasBin: true + dependencies: + address: 1.2.2 + debug: 4.3.4 + transitivePeerDependencies: + - supports-color + dev: true + + /detective@5.2.1: + resolution: {integrity: sha512-v9XE1zRnz1wRtgurGu0Bs8uHKFSTdteYZNbIPFVhUZ39L/S79ppMpdmVOZAnoz1jfEFodc48n6MX483Xo3t1yw==} + engines: {node: '>=0.8.0'} + hasBin: true + dependencies: + acorn-node: 1.8.2 + defined: 1.0.0 + minimist: 1.2.6 + + /devalue@4.3.0: + resolution: {integrity: sha512-n94yQo4LI3w7erwf84mhRUkUJfhLoCZiLyoOZ/QFsDbcWNZePrLwbQpvZBUG2TNxwV3VjCKPxkiiQA6pe3TrTA==} + + /didyoumean@1.2.2: + resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==} + + /diff-sequences@29.4.3: + resolution: {integrity: sha512-ofrBgwpPhCD85kMKtE9RYFFq6OC1A89oW2vvgWZNCwxrUpRUILopY7lsYyMDSjc8g6U6aiO0Qubg6r4Wgt5ZnA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dev: false + + /dir-glob@3.0.1: + resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} + engines: {node: '>=8'} + dependencies: + path-type: 4.0.0 + + /dlv@1.1.3: + resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==} + + /doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: '>=6.0.0'} + dependencies: + esutils: 2.0.3 + + /dom-accessibility-api@0.5.13: + resolution: {integrity: sha512-R305kwb5CcMDIpSHUnLyIAp7SrSPBx6F0VfQFB3M75xVMHhXJJIdePYgbPPh1o57vCHNu5QztokWUPsLjWzFqw==} + + /dom-serializer@1.4.1: + resolution: {integrity: sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==} + dependencies: + domelementtype: 2.3.0 + domhandler: 4.3.1 + entities: 2.2.0 + + /domelementtype@2.3.0: + resolution: {integrity: sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==} + + /domexception@4.0.0: + resolution: {integrity: sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==} + engines: {node: '>=12'} + dependencies: + webidl-conversions: 7.0.0 + dev: false + + /domhandler@3.3.0: + resolution: {integrity: sha512-J1C5rIANUbuYK+FuFL98650rihynUOEzRLxW+90bKZRWB6A1X1Tf82GxR1qAWLyfNPRvjqfip3Q5tdYlmAa9lA==} + engines: {node: '>= 4'} + dependencies: + domelementtype: 2.3.0 + dev: true + + /domhandler@4.3.1: + resolution: {integrity: sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==} + engines: {node: '>= 4'} + dependencies: + domelementtype: 2.3.0 + + /dompurify@3.0.3: + resolution: {integrity: sha512-axQ9zieHLnAnHh0sfAamKYiqXMJAVwu+LM/alQ7WDagoWessyWvMSFyW65CqF3owufNu8HBcE4cM2Vflu7YWcQ==} + dev: false + + /domutils@2.8.0: + resolution: {integrity: sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==} + dependencies: + dom-serializer: 1.4.1 + domelementtype: 2.3.0 + domhandler: 4.3.1 + + /dotenv-expand@10.0.0: + resolution: {integrity: sha512-GopVGCpVS1UKH75VKHGuQFqS1Gusej0z4FyQkPdwjil2gNIv+LNsqBlboOzpJFZKVT95GkCyWJbBSdFEFUWI2A==} + engines: {node: '>=12'} + dev: true + + /dotenv@16.3.1: + resolution: {integrity: sha512-IPzF4w4/Rd94bA9imS68tZBaYyBWSCE47V1RGuMrB94iyTOIEwRmVL2x/4An+6mETpLrKJ5hQkB8W4kFAadeIQ==} + engines: {node: '>=12'} + dev: true + + /dotenv@8.6.0: + resolution: {integrity: sha512-IrPdXQsk2BbzvCBGBOTmmSH5SodmqZNt4ERAZDmW4CT+tL8VtvinqywuANaFu4bOMWki16nqf0e4oC0QIaDr/g==} + engines: {node: '>=10'} + dev: false + + /duplexify@3.7.1: + resolution: {integrity: sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==} + dependencies: + end-of-stream: 1.4.4 + inherits: 2.0.4 + readable-stream: 2.3.8 + stream-shift: 1.0.1 + dev: true + + /ee-first@1.1.1: + resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} + dev: true + + /ejs@3.1.9: + resolution: {integrity: sha512-rC+QVNMJWv+MtPgkt0y+0rVEIdbtxVADApW9JXrUVlzHetgcyczP/E7DJmWJ4fJCZF2cPcBk0laWO9ZHMG3DmQ==} + engines: {node: '>=0.10.0'} + hasBin: true + dependencies: + jake: 10.8.7 + dev: true + + /electron-to-chromium@1.4.449: + resolution: {integrity: sha512-TxLRpRUj/107ATefeP8VIUWNOv90xJxZZbCW/eIbSZQiuiFANCx2b7u+GbVc9X4gU+xnbvypNMYVM/WArE1DNQ==} + + /electron-to-chromium@1.4.86: + resolution: {integrity: sha512-EVTZ+igi8x63pK4bPuA95PXIs2b2Cowi3WQwI9f9qManLiZJOD1Lash1J3W4TvvcUCcIR4o/rgi9o8UicXSO+w==} + dev: false + + /emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + + /emojis-list@3.0.0: + resolution: {integrity: sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==} + engines: {node: '>= 4'} + dev: true + + /encodeurl@1.0.2: + resolution: {integrity: sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==} + engines: {node: '>= 0.8'} + dev: true + + /end-of-stream@1.4.4: + resolution: {integrity: sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==} + dependencies: + once: 1.4.0 + dev: true + + /enhanced-resolve@5.15.0: + resolution: {integrity: sha512-LXYT42KJ7lpIKECr2mAXIaMldcNCh/7E0KBKOu4KSfkHmP+mZmSs+8V5gBAqisWBy0OO4W5Oyys0GO1Y8KtdKg==} + engines: {node: '>=10.13.0'} + dependencies: + graceful-fs: 4.2.9 + tapable: 2.2.1 + dev: true + + /enquirer@2.3.6: + resolution: {integrity: sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==} + engines: {node: '>=8.6'} + dependencies: + ansi-colors: 4.1.3 + + /entities@2.2.0: + resolution: {integrity: sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==} + + /entities@4.5.0: + resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} + engines: {node: '>=0.12'} + dev: false + + /envinfo@7.10.0: + resolution: {integrity: sha512-ZtUjZO6l5mwTHvc1L9+1q5p/R3wTopcfqMW8r5t8SJSKqeVI/LtajORwRFEKpEFuekjD0VBjwu1HMxL4UalIRw==} + engines: {node: '>=4'} + hasBin: true + dev: true + + /errno@0.1.8: + resolution: {integrity: sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A==} + hasBin: true + requiresBuild: true + dependencies: + prr: 1.0.1 + optional: true + + /error-ex@1.3.2: + resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==} + dependencies: + is-arrayish: 0.2.1 + + /es-abstract@1.19.1: + resolution: {integrity: sha512-2vJ6tjA/UfqLm2MPs7jxVybLoB8i1t1Jd9R3kISld20sIxPcTbLuggQOUxeWeAvIUkduv/CfMjuh4WmiXr2v9w==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + es-to-primitive: 1.2.1 + function-bind: 1.1.1 + get-intrinsic: 1.2.0 + get-symbol-description: 1.0.0 + has: 1.0.3 + has-symbols: 1.0.3 + internal-slot: 1.0.5 + is-callable: 1.2.7 + is-negative-zero: 2.0.2 + is-regex: 1.1.4 + is-shared-array-buffer: 1.0.2 + is-string: 1.0.7 + is-weakref: 1.0.2 + object-inspect: 1.12.3 + object-keys: 1.1.1 + object.assign: 4.1.4 + string.prototype.trimend: 1.0.4 + string.prototype.trimstart: 1.0.4 + unbox-primitive: 1.0.1 + dev: false + + /es-abstract@1.21.2: + resolution: {integrity: sha512-y/B5POM2iBnIxCiernH1G7rC9qQoM77lLIMQLuob0zhp8C56Po81+2Nj0WFKnd0pNReDTnkYryc+zhOzpEIROg==} + engines: {node: '>= 0.4'} + dependencies: + array-buffer-byte-length: 1.0.0 + available-typed-arrays: 1.0.5 + call-bind: 1.0.2 + es-set-tostringtag: 2.0.1 + es-to-primitive: 1.2.1 + function.prototype.name: 1.1.5 + get-intrinsic: 1.2.0 + get-symbol-description: 1.0.0 + globalthis: 1.0.3 + gopd: 1.0.1 + has: 1.0.3 + has-property-descriptors: 1.0.0 + has-proto: 1.0.1 + has-symbols: 1.0.3 + internal-slot: 1.0.5 + is-array-buffer: 3.0.2 + is-callable: 1.2.7 + is-negative-zero: 2.0.2 + is-regex: 1.1.4 + is-shared-array-buffer: 1.0.2 + is-string: 1.0.7 + is-typed-array: 1.1.10 + is-weakref: 1.0.2 + object-inspect: 1.12.3 + object-keys: 1.1.1 + object.assign: 4.1.4 + regexp.prototype.flags: 1.5.0 + safe-regex-test: 1.0.0 + string.prototype.trim: 1.2.7 + string.prototype.trimend: 1.0.6 + string.prototype.trimstart: 1.0.6 + typed-array-length: 1.0.4 + unbox-primitive: 1.0.2 + which-typed-array: 1.1.9 + dev: false + + /es-module-lexer@0.9.3: + resolution: {integrity: sha512-1HQ2M2sPtxwnvOvT1ZClHyQDiggdNjURWpY2we6aMKCQiUVxTmVs2UYPLIrD84sS+kMdUwfBSylbJPwNnBrnHQ==} + dev: true + + /es-module-lexer@1.3.0: + resolution: {integrity: sha512-vZK7T0N2CBmBOixhmjdqx2gWVbFZ4DXZ/NyRMZVlJXPa7CyFS+/a4QQsDGDQy9ZfEzxFuNEsMLeQJnKP2p5/JA==} + dev: true + + /es-set-tostringtag@2.0.1: + resolution: {integrity: sha512-g3OMbtlwY3QewlqAiMLI47KywjWZoEytKr8pf6iTC8uJq5bIAH52Z9pnQ8pVL6whrCto53JZDuUIsifGeLorTg==} + engines: {node: '>= 0.4'} + dependencies: + get-intrinsic: 1.2.0 + has: 1.0.3 + has-tostringtag: 1.0.0 + dev: false + + /es-shim-unscopables@1.0.0: + resolution: {integrity: sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==} + dependencies: + has: 1.0.3 + dev: false + + /es-to-primitive@1.2.1: + resolution: {integrity: sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==} + engines: {node: '>= 0.4'} + dependencies: + is-callable: 1.2.7 + is-date-object: 1.0.5 + is-symbol: 1.0.4 + dev: false + + /es5-ext@0.10.62: + resolution: {integrity: sha512-BHLqn0klhEpnOKSrzn/Xsz2UIW8j+cGmo9JLzr8BiUapV8hPL9+FliFqjwr9ngW7jWdnxv6eO+/LqyhJVqgrjA==} + engines: {node: '>=0.10'} + requiresBuild: true + dependencies: + es6-iterator: 2.0.3 + es6-symbol: 3.1.3 + next-tick: 1.1.0 + dev: false + + /es6-iterator@2.0.3: + resolution: {integrity: sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==} + dependencies: + d: 1.0.1 + es5-ext: 0.10.62 + es6-symbol: 3.1.3 + dev: false + + /es6-object-assign@1.1.0: + resolution: {integrity: sha512-MEl9uirslVwqQU369iHNWZXsI8yaZYGg/D65aOgZkeyFJwHYSxilf7rQzXKI7DdDuBPrBXbfk3sl9hJhmd5AUw==} + dev: true + + /es6-promise@3.3.1: + resolution: {integrity: sha512-SOp9Phqvqn7jtEUxPWdWfWoLmyt2VaJ6MpvP9Comy1MceMXqE6bxvaTu4iaxpYYPzhny28Lc+M87/c2cPK6lDg==} + + /es6-symbol@3.1.3: + resolution: {integrity: sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==} + dependencies: + d: 1.0.1 + ext: 1.7.0 + dev: false + + /es6-weak-map@2.0.3: + resolution: {integrity: sha512-p5um32HOTO1kP+w7PRnB+5lQ43Z6muuMuIMffvDN8ZB4GcnjLBV6zGStpbASIMk4DCAvEaamhe2zhyCb/QXXsA==} + dependencies: + d: 1.0.1 + es5-ext: 0.10.62 + es6-iterator: 2.0.3 + es6-symbol: 3.1.3 + dev: false + + /esbuild-plugin-alias@0.2.1: + resolution: {integrity: sha512-jyfL/pwPqaFXyKnj8lP8iLk6Z0m099uXR45aSN8Av1XD4vhvQutxxPzgA2bTcAwQpa1zCXDcWOlhFgyP3GKqhQ==} + dev: true + + /esbuild-register@3.4.2(esbuild@0.17.14): + resolution: {integrity: sha512-kG/XyTDyz6+YDuyfB9ZoSIOOmgyFCH+xPRtsCa8W85HLRV5Csp+o3jWVbOSHgSLfyLc5DmP+KFDNwty4mEjC+Q==} + peerDependencies: + esbuild: '>=0.12 <1' + dependencies: + debug: 4.3.4 + esbuild: 0.17.14 + transitivePeerDependencies: + - supports-color + dev: true + + /esbuild@0.17.14: + resolution: {integrity: sha512-vOO5XhmVj/1XQR9NQ1UPq6qvMYL7QFJU57J5fKBKBKxp17uDt5PgxFDb4A2nEiXhr1qQs4x0F5+66hVVw4ruNw==} + engines: {node: '>=12'} + hasBin: true + requiresBuild: true + optionalDependencies: + '@esbuild/android-arm': 0.17.14 + '@esbuild/android-arm64': 0.17.14 + '@esbuild/android-x64': 0.17.14 + '@esbuild/darwin-arm64': 0.17.14 + '@esbuild/darwin-x64': 0.17.14 + '@esbuild/freebsd-arm64': 0.17.14 + '@esbuild/freebsd-x64': 0.17.14 + '@esbuild/linux-arm': 0.17.14 + '@esbuild/linux-arm64': 0.17.14 + '@esbuild/linux-ia32': 0.17.14 + '@esbuild/linux-loong64': 0.17.14 + '@esbuild/linux-mips64el': 0.17.14 + '@esbuild/linux-ppc64': 0.17.14 + '@esbuild/linux-riscv64': 0.17.14 + '@esbuild/linux-s390x': 0.17.14 + '@esbuild/linux-x64': 0.17.14 + '@esbuild/netbsd-x64': 0.17.14 + '@esbuild/openbsd-x64': 0.17.14 + '@esbuild/sunos-x64': 0.17.14 + '@esbuild/win32-arm64': 0.17.14 + '@esbuild/win32-ia32': 0.17.14 + '@esbuild/win32-x64': 0.17.14 + + /esbuild@0.18.20: + resolution: {integrity: sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA==} + engines: {node: '>=12'} + hasBin: true + requiresBuild: true + optionalDependencies: + '@esbuild/android-arm': 0.18.20 + '@esbuild/android-arm64': 0.18.20 + '@esbuild/android-x64': 0.18.20 + '@esbuild/darwin-arm64': 0.18.20 + '@esbuild/darwin-x64': 0.18.20 + '@esbuild/freebsd-arm64': 0.18.20 + '@esbuild/freebsd-x64': 0.18.20 + '@esbuild/linux-arm': 0.18.20 + '@esbuild/linux-arm64': 0.18.20 + '@esbuild/linux-ia32': 0.18.20 + '@esbuild/linux-loong64': 0.18.20 + '@esbuild/linux-mips64el': 0.18.20 + '@esbuild/linux-ppc64': 0.18.20 + '@esbuild/linux-riscv64': 0.18.20 + '@esbuild/linux-s390x': 0.18.20 + '@esbuild/linux-x64': 0.18.20 + '@esbuild/netbsd-x64': 0.18.20 + '@esbuild/openbsd-x64': 0.18.20 + '@esbuild/sunos-x64': 0.18.20 + '@esbuild/win32-arm64': 0.18.20 + '@esbuild/win32-ia32': 0.18.20 + '@esbuild/win32-x64': 0.18.20 + dev: false + + /esbuild@0.19.0: + resolution: {integrity: sha512-i7i8TP4vuG55bKeLyqqk5sTPu1ZjPH3wkcLvAj/0X/222iWFo3AJUYRKjbOoY6BWFMH3teizxHEdV9Su5ESl0w==} + engines: {node: '>=12'} + hasBin: true + requiresBuild: true + optionalDependencies: + '@esbuild/android-arm': 0.19.0 + '@esbuild/android-arm64': 0.19.0 + '@esbuild/android-x64': 0.19.0 + '@esbuild/darwin-arm64': 0.19.0 + '@esbuild/darwin-x64': 0.19.0 + '@esbuild/freebsd-arm64': 0.19.0 + '@esbuild/freebsd-x64': 0.19.0 + '@esbuild/linux-arm': 0.19.0 + '@esbuild/linux-arm64': 0.19.0 + '@esbuild/linux-ia32': 0.19.0 + '@esbuild/linux-loong64': 0.19.0 + '@esbuild/linux-mips64el': 0.19.0 + '@esbuild/linux-ppc64': 0.19.0 + '@esbuild/linux-riscv64': 0.19.0 + '@esbuild/linux-s390x': 0.19.0 + '@esbuild/linux-x64': 0.19.0 + '@esbuild/netbsd-x64': 0.19.0 + '@esbuild/openbsd-x64': 0.19.0 + '@esbuild/sunos-x64': 0.19.0 + '@esbuild/win32-arm64': 0.19.0 + '@esbuild/win32-ia32': 0.19.0 + '@esbuild/win32-x64': 0.19.0 + dev: true + + /escalade@3.1.1: + resolution: {integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==} + engines: {node: '>=6'} + + /escape-html@1.0.3: + resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==} + dev: true + + /escape-string-regexp@1.0.5: + resolution: {integrity: sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=} + engines: {node: '>=0.8.0'} + + /escape-string-regexp@2.0.0: + resolution: {integrity: sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==} + engines: {node: '>=8'} + dev: false + + /escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + + /escape-string-regexp@5.0.0: + resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==} + engines: {node: '>=12'} + dev: false + + /eslint-plugin-svelte@2.32.4(eslint@8.46.0)(svelte@4.0.0): + resolution: {integrity: sha512-VJ12i2Iogug1jvhwxSlognnfGj76P5gks/V4pUD4SCSVQOp14u47MNP0zAG8AQR3LT0Fi1iUvIFnY4l9z5Rwbg==} + engines: {node: ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0-0 + svelte: ^3.37.0 || ^4.0.0 + peerDependenciesMeta: + svelte: + optional: true + dependencies: + '@eslint-community/eslint-utils': 4.4.0(eslint@8.46.0) + '@jridgewell/sourcemap-codec': 1.4.15 + debug: 4.3.4 + eslint: 8.46.0 + esutils: 2.0.3 + known-css-properties: 0.28.0 + postcss: 8.4.27 + postcss-load-config: 3.1.4(postcss@8.4.27) + postcss-safe-parser: 6.0.0(postcss@8.4.27) + postcss-selector-parser: 6.0.13 + semver: 7.5.4 + svelte: 4.0.0 + svelte-eslint-parser: 0.32.2(svelte@4.0.0) + transitivePeerDependencies: + - supports-color + - ts-node + dev: false + + /eslint-scope@5.1.1: + resolution: {integrity: sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==} + engines: {node: '>=8.0.0'} + dependencies: + esrecurse: 4.3.0 + estraverse: 4.3.0 + dev: true + + /eslint-scope@7.2.0: + resolution: {integrity: sha512-DYj5deGlHBfMt15J7rdtyKNq/Nqlv5KfU4iodrQ019XESsRnwXH9KAE0y3cwtUHDo2ob7CypAnCqefh6vioWRw==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + + /eslint-scope@7.2.2: + resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + dev: false + + /eslint-utils@3.0.0(eslint@8.4.1): + resolution: {integrity: sha512-uuQC43IGctw68pJA1RgbQS8/NP7rch6Cwd4j3ZBtgo4/8Flj4eGE7ZYSZRN3iq5pVUv6GPdW5Z1RFleo84uLDA==} + engines: {node: ^10.0.0 || ^12.0.0 || >= 14.0.0} + peerDependencies: + eslint: '>=5' + dependencies: + eslint: 8.4.1 + eslint-visitor-keys: 2.1.0 + dev: true + + /eslint-visitor-keys@2.1.0: + resolution: {integrity: sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==} + engines: {node: '>=10'} + dev: true + + /eslint-visitor-keys@3.4.1: + resolution: {integrity: sha512-pZnmmLwYzf+kWaM/Qgrvpen51upAktaaiI01nsJD/Yr3lMOdNtq0cxkrrg16w64VtisN6okbs7Q8AfGqj4c9fA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + /eslint-visitor-keys@3.4.2: + resolution: {integrity: sha512-8drBzUEyZ2llkpCA67iYrgEssKDUu68V8ChqqOfFupIaG/LCVPUT+CoGJpT77zJprs4T/W7p07LP7zAIMuweVw==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dev: false + + /eslint@8.4.1: + resolution: {integrity: sha512-TxU/p7LB1KxQ6+7aztTnO7K0i+h0tDi81YRY9VzB6Id71kNz+fFYnf5HD5UOQmxkzcoa0TlVZf9dpMtUv0GpWg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + hasBin: true + dependencies: + '@eslint/eslintrc': 1.4.1 + '@humanwhocodes/config-array': 0.9.5 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.3 + debug: 4.3.4 + doctrine: 3.0.0 + enquirer: 2.3.6 + escape-string-regexp: 4.0.0 + eslint-scope: 7.2.0 + eslint-utils: 3.0.0(eslint@8.4.1) + eslint-visitor-keys: 3.4.1 + espree: 9.5.2 + esquery: 1.5.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 6.0.1 + functional-red-black-tree: 1.0.1 + glob-parent: 6.0.2 + globals: 13.20.0 + ignore: 4.0.6 + import-fresh: 3.3.0 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + js-yaml: 4.1.0 + json-stable-stringify-without-jsonify: 1.0.1 + levn: 0.4.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.1 + progress: 2.0.3 + regexpp: 3.2.0 + semver: 7.4.0 + strip-ansi: 6.0.1 + strip-json-comments: 3.1.1 + text-table: 0.2.0 + v8-compile-cache: 2.3.0 + transitivePeerDependencies: + - supports-color + dev: true + + /eslint@8.46.0: + resolution: {integrity: sha512-cIO74PvbW0qU8e0mIvk5IV3ToWdCq5FYG6gWPHHkx6gNdjlbAYvtfHmlCMXxjcoVaIdwy/IAt3+mDkZkfvb2Dg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + hasBin: true + dependencies: + '@eslint-community/eslint-utils': 4.4.0(eslint@8.46.0) + '@eslint-community/regexpp': 4.6.2 + '@eslint/eslintrc': 2.1.1 + '@eslint/js': 8.46.0 + '@humanwhocodes/config-array': 0.11.10 + '@humanwhocodes/module-importer': 1.0.1 + '@nodelib/fs.walk': 1.2.8 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.3 + debug: 4.3.4 + doctrine: 3.0.0 + escape-string-regexp: 4.0.0 + eslint-scope: 7.2.2 + eslint-visitor-keys: 3.4.2 + espree: 9.6.1 + esquery: 1.5.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 6.0.1 + find-up: 5.0.0 + glob-parent: 6.0.2 + globals: 13.20.0 + graphemer: 1.4.0 + ignore: 5.2.4 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + is-path-inside: 3.0.3 + js-yaml: 4.1.0 + json-stable-stringify-without-jsonify: 1.0.1 + levn: 0.4.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.3 + strip-ansi: 6.0.1 + text-table: 0.2.0 + transitivePeerDependencies: + - supports-color + dev: false + + /esm-env@1.0.0: + resolution: {integrity: sha512-Cf6VksWPsTuW01vU9Mk/3vRue91Zevka5SjyNf3nEpokFRuqt/KjUQoGAwq9qMmhpLTHmXzSIrFRw8zxWzmFBA==} + + /espree@9.2.0: + resolution: {integrity: sha512-oP3utRkynpZWF/F2x/HZJ+AGtnIclaR7z1pYPxy7NYM2fSO6LgK/Rkny8anRSPK/VwEA1eqm2squui0T7ZMOBg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + acorn: 8.10.0 + acorn-jsx: 5.3.2(acorn@8.10.0) + eslint-visitor-keys: 3.4.1 + dev: true + + /espree@9.5.2: + resolution: {integrity: sha512-7OASN1Wma5fum5SrNhFMAMJxOUAbhyfQ8dQ//PJaJbNw0URTPWqIghHWt1MmAANKhHZIYOHruW4Kw4ruUWOdGw==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + acorn: 8.10.0 + acorn-jsx: 5.3.2(acorn@8.10.0) + eslint-visitor-keys: 3.4.1 + + /espree@9.6.1: + resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + acorn: 8.10.0 + acorn-jsx: 5.3.2(acorn@8.10.0) + eslint-visitor-keys: 3.4.2 + dev: false + + /esprima@4.0.1: + resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==} + engines: {node: '>=4'} + hasBin: true + + /esquery@1.5.0: + resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==} + engines: {node: '>=0.10'} + dependencies: + estraverse: 5.3.0 + + /esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + dependencies: + estraverse: 5.3.0 + + /estraverse@4.3.0: + resolution: {integrity: sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==} + engines: {node: '>=4.0'} + dev: true + + /estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + + /estree-walker@2.0.2: + resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} + dev: false + + /estree-walker@3.0.3: + resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} + dependencies: + '@types/estree': 1.0.0 + + /esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + + /etag@1.8.1: + resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} + engines: {node: '>= 0.6'} + dev: true + + /event-emitter@0.3.5: + resolution: {integrity: sha512-D9rRn9y7kLPnJ+hMq7S/nhvoKwwvVJahBi2BPmx3bvbsEdK3W9ii8cBSGjP+72/LnM4n6fo3+dkCX5FeTQruXA==} + dependencies: + d: 1.0.1 + es5-ext: 0.10.62 + dev: false + + /events@3.3.0: + resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==} + engines: {node: '>=0.8.x'} + + /execa@5.1.1: + resolution: {integrity: sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==} + engines: {node: '>=10'} + dependencies: + cross-spawn: 7.0.3 + get-stream: 6.0.1 + human-signals: 2.1.0 + is-stream: 2.0.1 + merge-stream: 2.0.0 + npm-run-path: 4.0.1 + onetime: 5.1.2 + signal-exit: 3.0.7 + strip-final-newline: 2.0.0 + dev: true + + /expect@29.5.0: + resolution: {integrity: sha512-yM7xqUrCO2JdpFo4XpM82t+PJBFybdqoQuJLDGeDX2ij8NZzqRHyu3Hp188/JX7SWqud+7t4MUdvcgGBICMHZg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/expect-utils': 29.5.0 + jest-get-type: 29.4.3 + jest-matcher-utils: 29.5.0 + jest-message-util: 29.5.0 + jest-util: 29.5.0 + dev: false + + /express@4.18.2: + resolution: {integrity: sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==} + engines: {node: '>= 0.10.0'} + dependencies: + accepts: 1.3.8 + array-flatten: 1.1.1 + body-parser: 1.20.1 + content-disposition: 0.5.4 + content-type: 1.0.5 + cookie: 0.5.0 + cookie-signature: 1.0.6 + debug: 2.6.9 + depd: 2.0.0 + encodeurl: 1.0.2 + escape-html: 1.0.3 + etag: 1.8.1 + finalhandler: 1.2.0 + fresh: 0.5.2 + http-errors: 2.0.0 + merge-descriptors: 1.0.1 + methods: 1.1.2 + on-finished: 2.4.1 + parseurl: 1.3.3 + path-to-regexp: 0.1.7 + proxy-addr: 2.0.7 + qs: 6.11.0 + range-parser: 1.2.1 + safe-buffer: 5.2.1 + send: 0.18.0 + serve-static: 1.15.0 + setprototypeof: 1.2.0 + statuses: 2.0.1 + type-is: 1.6.18 + utils-merge: 1.0.1 + vary: 1.1.2 + transitivePeerDependencies: + - supports-color + dev: true + + /ext@1.7.0: + resolution: {integrity: sha512-6hxeJYaL110a9b5TEJSj0gojyHQAmA2ch5Os+ySCiA1QGdS697XWY1pzsrSjqA9LDEEgdB/KypIlR59RcLuHYw==} + dependencies: + type: 2.7.2 + dev: false + + /extend@3.0.2: + resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} + dev: true + + /extendable-error@0.1.7: + resolution: {integrity: sha512-UOiS2in6/Q0FK0R0q6UY9vYpQ21mr/Qn1KOnte7vsACuNJf514WvCCUHSRCPcgjPT2bAhNIJdlE6bVap1GKmeg==} + dev: false + + /extendable-media-recorder-wav-encoder-broker@7.0.70: + resolution: {integrity: sha512-nnVAxiLBdf0PLDXP/8+bKYYRs2PmoJMoJzpcDGOra8GsHIPS+ytmS+85DUFSYGxaxohrhovgN0jVXSyjJ6hQSQ==} + dependencies: + '@babel/runtime': 7.22.6 + broker-factory: 3.0.68 + extendable-media-recorder-wav-encoder-worker: 8.0.69 + tslib: 2.5.3 + dev: false + + /extendable-media-recorder-wav-encoder-broker@7.0.88: + resolution: {integrity: sha512-QmihcCI51YJbbH/09NaFxQPbK3pwp71mXUX+f4xKN/ACWUu4s/XQx65KEpK02TnufmX9wCom5xagbtPiCQzWGQ==} + dependencies: + '@babel/runtime': 7.22.6 + broker-factory: 3.0.84 + extendable-media-recorder-wav-encoder-worker: 8.0.86 + tslib: 2.6.1 + dev: false + + /extendable-media-recorder-wav-encoder-worker@8.0.69: + resolution: {integrity: sha512-8RJgKYTTHkzDoCWrnPMMqX+TyJpwzP9lwqxQWDpa9J5J1DP0SybgoYWP8Dtty/R5xT344lU+NKo7g1661i7Ujg==} + dependencies: + '@babel/runtime': 7.22.6 + tslib: 2.5.3 + worker-factory: 6.0.69 + dev: false + + /extendable-media-recorder-wav-encoder-worker@8.0.86: + resolution: {integrity: sha512-CCJzijR+w1+V/j9fFelKhU7h20Bw3oIkIUNG/egevYUbwGhZC+5Y0edttU66MbiL1CsopOJ6lHpB3ag7+M6R5A==} + dependencies: + '@babel/runtime': 7.22.6 + tslib: 2.6.1 + worker-factory: 7.0.9 + dev: false + + /extendable-media-recorder-wav-encoder@7.0.76: + resolution: {integrity: sha512-HLeyR9R0mUPOo7zG3d3GRWltNaSYUjyUZGQ8amRjuQVkZFXszmOIAAUVBq3fou0Z3V1mAEo+mXnCqbEfYtgZXQ==} + dependencies: + '@babel/runtime': 7.21.0 + extendable-media-recorder-wav-encoder-broker: 7.0.70 + extendable-media-recorder-wav-encoder-worker: 8.0.69 + tslib: 2.4.0 + dev: false + + /extendable-media-recorder@9.0.0: + resolution: {integrity: sha512-50HNf/4wv2V7H7YBXVfxYLnaXQwMVcna9UHZxFPXmgWdThetWNu+TYKwA+xOPWA+rkr8Tqvtldxc/sPC/s/wXg==} + dependencies: + '@babel/runtime': 7.22.6 + media-encoder-host: 8.0.99 + multi-buffer-data-view: 5.0.8 + recorder-audio-worklet: 6.0.13 + standardized-audio-context: 25.3.55 + subscribable-things: 2.1.23 + tslib: 2.6.1 + dev: false + + /external-editor@3.1.0: + resolution: {integrity: sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==} + engines: {node: '>=4'} + dependencies: + chardet: 0.7.0 + iconv-lite: 0.4.24 + tmp: 0.0.33 + dev: false + + /extract-zip@1.7.0: + resolution: {integrity: sha512-xoh5G1W/PB0/27lXgMQyIhP5DSY/LhoCsOyZgb+6iMmRtCwVBo55uKaMoEYrDCKQhWvqEip5ZPKAc6eFNyf/MA==} + hasBin: true + dependencies: + concat-stream: 1.6.2 + debug: 2.6.9 + mkdirp: 0.5.5 + yauzl: 2.10.0 + transitivePeerDependencies: + - supports-color + dev: true + + /fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + + /fast-glob@3.2.11: + resolution: {integrity: sha512-xrO3+1bxSo3ZVHAnqzyuewYT6aMFHRAd4Kcs92MAonjwQZLsK9d0SF1IyQ3k5PoirxTW0Oe/RqFgMQ6TcNE5Ew==} + engines: {node: '>=8.6.0'} + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.4 + + /fast-json-patch@3.1.1: + resolution: {integrity: sha512-vf6IHUX2SBcA+5/+4883dsIjpBTqmfBjmYiWK1savxQmFk4JfBMLa7ynTYOs1Rolp/T1betJxHiGD3g1Mn8lUQ==} + dev: false + + /fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + /fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + + /fast-unique-numbers@6.0.21: + resolution: {integrity: sha512-MW8UAAypyhNtbnMlSch9EiEAuiMo1y6O02WzI5mcHAzvirdIm/hXMVp4QH9ijWnU1xzW23GXk6Bf+5B1kv9hzw==} + engines: {node: '>=12.20.1'} + dependencies: + '@babel/runtime': 7.22.6 + tslib: 2.6.1 + dev: false + + /fast-unique-numbers@8.0.7: + resolution: {integrity: sha512-I+VCWGlHB6HSqE0W0FxB5mgmgBHJiBs19kS9y6JJKXDp84IzuE7H24NRwpnZbuONK7T2r+7T0z1OZbehc5URxA==} + engines: {node: '>=16.1.0'} + dependencies: + '@babel/runtime': 7.22.6 + tslib: 2.6.1 + dev: false + + /fastq@1.13.0: + resolution: {integrity: sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==} + dependencies: + reusify: 1.0.4 + + /fb-watchman@2.0.2: + resolution: {integrity: sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==} + dependencies: + bser: 2.1.1 + dev: true + + /fd-slicer@1.1.0: + resolution: {integrity: sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==} + dependencies: + pend: 1.2.0 + dev: true + + /fetch-retry@5.0.6: + resolution: {integrity: sha512-3yurQZ2hD9VISAhJJP9bpYFNQrHHBXE2JxxjY5aLEcDi46RmAzJE2OC9FAde0yis5ElW0jTTzs0zfg/Cca4XqQ==} + dev: true + + /figures@3.2.0: + resolution: {integrity: sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==} + engines: {node: '>=8'} + dependencies: + escape-string-regexp: 1.0.5 + dev: false + + /file-entry-cache@6.0.1: + resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} + engines: {node: ^10.12.0 || >=12.0.0} + dependencies: + flat-cache: 3.0.4 + + /file-system-cache@2.3.0: + resolution: {integrity: sha512-l4DMNdsIPsVnKrgEXbJwDJsA5mB8rGwHYERMgqQx/xAUtChPJMre1bXBzDEqqVbWv9AIbFezXMxeEkZDSrXUOQ==} + dependencies: + fs-extra: 11.1.1 + ramda: 0.29.0 + dev: true + + /file-system-cache@2.4.1: + resolution: {integrity: sha512-mzEiUdjzqhxwppIJVSBq8C9evWM1j0v/lCg7gFMDiQDQPlQSm8kRfXSPFScT1p/Fxy0N2LSIps6g28e8itumlg==} + dependencies: + fs-extra: 11.1.1 + ramda: 0.29.0 + dev: true + + /file-uri-to-path@1.0.0: + resolution: {integrity: sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==} + dev: false + + /filelist@1.0.4: + resolution: {integrity: sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==} + dependencies: + minimatch: 5.1.6 + dev: true + + /fill-range@7.0.1: + resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==} + engines: {node: '>=8'} + dependencies: + to-regex-range: 5.0.1 + + /finalhandler@1.2.0: + resolution: {integrity: sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==} + engines: {node: '>= 0.8'} + dependencies: + debug: 2.6.9 + encodeurl: 1.0.2 + escape-html: 1.0.3 + on-finished: 2.4.1 + parseurl: 1.3.3 + statuses: 2.0.1 + unpipe: 1.0.0 + transitivePeerDependencies: + - supports-color + dev: true + + /find-cache-dir@2.1.0: + resolution: {integrity: sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==} + engines: {node: '>=6'} + dependencies: + commondir: 1.0.1 + make-dir: 2.1.0 + pkg-dir: 3.0.0 + dev: true + + /find-cache-dir@3.3.2: + resolution: {integrity: sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==} + engines: {node: '>=8'} + dependencies: + commondir: 1.0.1 + make-dir: 3.1.0 + pkg-dir: 4.2.0 + dev: true + + /find-up@3.0.0: + resolution: {integrity: sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==} + engines: {node: '>=6'} + dependencies: + locate-path: 3.0.0 + dev: true + + /find-up@4.1.0: + resolution: {integrity: sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==} + engines: {node: '>=8'} + dependencies: + locate-path: 5.0.0 + path-exists: 4.0.0 + + /find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + /find-yarn-workspace-root2@1.2.16: + resolution: {integrity: sha512-hr6hb1w8ePMpPVUK39S4RlwJzi+xPLuVuG8XlwXU3KD5Yn3qgBWVfy3AzNlDhWvE1EORCE65/Qm26rFQt3VLVA==} + dependencies: + micromatch: 4.0.4 + pkg-dir: 4.2.0 + dev: false + + /flat-cache@3.0.4: + resolution: {integrity: sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==} + engines: {node: ^10.12.0 || >=12.0.0} + dependencies: + flatted: 3.2.7 + rimraf: 3.0.2 + + /flatted@3.2.7: + resolution: {integrity: sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==} + + /flow-parser@0.210.2: + resolution: {integrity: sha512-kQiVau1WnXMCxJziuOF9wk4EoE/sPTU5H7dWOJN+7lsh+tmUh6LXz1dcLE44D+ouVIg8RRnfRZQymZqzKfh5fA==} + engines: {node: '>=0.4.0'} + dev: true + + /for-each@0.3.3: + resolution: {integrity: sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==} + dependencies: + is-callable: 1.2.7 + + /form-data@3.0.1: + resolution: {integrity: sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==} + engines: {node: '>= 6'} + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + mime-types: 2.1.34 + dev: true + + /form-data@4.0.0: + resolution: {integrity: sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==} + engines: {node: '>= 6'} + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + mime-types: 2.1.34 + dev: false + + /forwarded@0.2.0: + resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==} + engines: {node: '>= 0.6'} + dev: true + + /fraction.js@4.2.0: + resolution: {integrity: sha512-MhLuK+2gUcnZe8ZHlaaINnQLl0xRIGRfcGk2yl8xoQAfHrSsL3rYu6FCmBdkdbhc9EPlwyGHewaRsvwRMJtAlA==} + dev: false + + /fresh@0.5.2: + resolution: {integrity: sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==} + engines: {node: '>= 0.6'} + dev: true + + /fs-constants@1.0.0: + resolution: {integrity: sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==} + dev: true + + /fs-extra@11.1.1: + resolution: {integrity: sha512-MGIE4HOvQCeUCzmlHs0vXpih4ysz4wg9qiSAu6cd42lVwPbTM1TjV7RusoyQqMmk/95gdQZX72u+YW+c3eEpFQ==} + engines: {node: '>=14.14'} + dependencies: + graceful-fs: 4.2.9 + jsonfile: 6.1.0 + universalify: 2.0.0 + dev: true + + /fs-extra@7.0.1: + resolution: {integrity: sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==} + engines: {node: '>=6 <7 || >=8'} + dependencies: + graceful-fs: 4.2.9 + jsonfile: 4.0.0 + universalify: 0.1.2 + dev: false + + /fs-extra@8.1.0: + resolution: {integrity: sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==} + engines: {node: '>=6 <7 || >=8'} + dependencies: + graceful-fs: 4.2.9 + jsonfile: 4.0.0 + universalify: 0.1.2 + dev: false + + /fs-minipass@2.1.0: + resolution: {integrity: sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==} + engines: {node: '>= 8'} + dependencies: + minipass: 3.3.6 + + /fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + + /fsevents@2.3.2: + resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + requiresBuild: true + optional: true + + /function-bind@1.1.1: + resolution: {integrity: sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==} + + /function.prototype.name@1.1.5: + resolution: {integrity: sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.2.0 + es-abstract: 1.21.2 + functions-have-names: 1.2.3 + dev: false + + /functional-red-black-tree@1.0.1: + resolution: {integrity: sha512-dsKNQNdj6xA3T+QlADDA7mOSlX0qiMINjn0cgr+eGHGsbSHzTabcIogz2+p/iqP1Xs6EP/sS2SbqH+brGTbq0g==} + dev: true + + /functions-have-names@1.2.3: + resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} + dev: false + + /gauge@3.0.2: + resolution: {integrity: sha512-+5J6MS/5XksCuXq++uFRsnUd7Ovu1XenbeuIuNRJxYWjgQbPuFhT14lAvsWfqfAmnwluf1OwMjz39HjfLPci0Q==} + engines: {node: '>=10'} + dependencies: + aproba: 2.0.0 + color-support: 1.1.3 + console-control-strings: 1.1.0 + has-unicode: 2.0.1 + object-assign: 4.1.1 + signal-exit: 3.0.7 + string-width: 4.2.3 + strip-ansi: 6.0.1 + wide-align: 1.1.5 + + /gensync@1.0.0-beta.2: + resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} + engines: {node: '>=6.9.0'} + + /get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + dev: false + + /get-css-data@2.1.0: + resolution: {integrity: sha512-HtPrzGk8aBF9rLeQNuImcXci7YVqsMEKzVflEWaCJu25ehxyDNiZRWoSxqSFUBfma8LERqKo70t/TcaGjIsM9g==} + dev: false + + /get-func-name@2.0.0: + resolution: {integrity: sha512-Hm0ixYtaSZ/V7C8FJrtZIuBBI+iSgL+1Aq82zSu8VQNB4S3Gk8e7Qs3VwBDJAhmRZcFqkl3tQu36g/Foh5I5ig==} + dev: false + + /get-intrinsic@1.2.0: + resolution: {integrity: sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==} + dependencies: + function-bind: 1.1.1 + has: 1.0.3 + has-symbols: 1.0.3 + + /get-npm-tarball-url@2.0.3: + resolution: {integrity: sha512-R/PW6RqyaBQNWYaSyfrh54/qtcnOp22FHCCiRhSSZj0FP3KQWCsxxt0DzIdVTbwTqe9CtQfvl/FPD4UIPt4pqw==} + engines: {node: '>=12.17'} + dev: true + + /get-package-type@0.1.0: + resolution: {integrity: sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==} + engines: {node: '>=8.0.0'} + dev: true + + /get-port@3.2.0: + resolution: {integrity: sha1-3Xzn3hh8Bsi/NTeWrHHgmfCYDrw=} + engines: {node: '>=4'} + dev: false + + /get-port@5.1.1: + resolution: {integrity: sha512-g/Q1aTSDOxFpchXC4i8ZWvxA1lnPqx/JHqcpIw0/LX9T8x/GBbi6YnlN5nhaKIFkT8oFsscUKgDJYxfwfS6QsQ==} + engines: {node: '>=8'} + dev: true + + /get-stream@6.0.1: + resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==} + engines: {node: '>=10'} + dev: true + + /get-symbol-description@1.0.0: + resolution: {integrity: sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + get-intrinsic: 1.2.0 + dev: false + + /giget@1.1.2: + resolution: {integrity: sha512-HsLoS07HiQ5oqvObOI+Qb2tyZH4Gj5nYGfF9qQcZNrPw+uEFhdXtgJr01aO2pWadGHucajYDLxxbtQkm97ON2A==} + hasBin: true + dependencies: + colorette: 2.0.20 + defu: 6.1.2 + https-proxy-agent: 5.0.1 + mri: 1.2.0 + node-fetch-native: 1.2.0 + pathe: 1.1.1 + tar: 6.1.15 + transitivePeerDependencies: + - supports-color + dev: true + + /github-slugger@1.5.0: + resolution: {integrity: sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==} + dev: true + + /glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + dependencies: + is-glob: 4.0.3 + + /glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + dependencies: + is-glob: 4.0.3 + + /glob-promise@6.0.3(glob@8.1.0): + resolution: {integrity: sha512-m+kxywR5j/2Z2V9zvHKfwwL5Gp7gIFEBX+deTB9w2lJB+wSuw9kcS43VfvTAMk8TXL5JCl/cCjsR+tgNVspGyA==} + engines: {node: '>=16'} + peerDependencies: + glob: ^8.0.3 + dependencies: + '@types/glob': 8.1.0 + glob: 8.1.0 + dev: true + + /glob-to-regexp@0.4.1: + resolution: {integrity: sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==} + dev: true + + /glob@7.2.0: + resolution: {integrity: sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + + /glob@8.1.0: + resolution: {integrity: sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==} + engines: {node: '>=12'} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 5.1.6 + once: 1.4.0 + dev: true + + /globals@11.12.0: + resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} + engines: {node: '>=4'} + + /globals@13.20.0: + resolution: {integrity: sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==} + engines: {node: '>=8'} + dependencies: + type-fest: 0.20.2 + + /globalthis@1.0.3: + resolution: {integrity: sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==} + engines: {node: '>= 0.4'} + dependencies: + define-properties: 1.2.0 + dev: false + + /globalyzer@0.1.0: + resolution: {integrity: sha512-40oNTM9UfG6aBmuKxk/giHn5nQ8RVz/SS4Ir6zgzOv9/qC3kKZ9v4etGTcJbEl/NyVQH7FGU7d+X1egr57Md2Q==} + + /globby@11.1.0: + resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==} + engines: {node: '>=10'} + dependencies: + array-union: 2.1.0 + dir-glob: 3.0.1 + fast-glob: 3.2.11 + ignore: 5.2.4 + merge2: 1.4.1 + slash: 3.0.0 + + /globrex@0.1.2: + resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==} + + /gopd@1.0.1: + resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==} + dependencies: + get-intrinsic: 1.2.0 + + /graceful-fs@4.2.9: + resolution: {integrity: sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==} + + /grapheme-splitter@1.0.4: + resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==} + dev: false + + /graphemer@1.4.0: + resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + dev: false + + /graphql@16.6.0: + resolution: {integrity: sha512-KPIBPDlW7NxrbT/eh4qPXz5FiFdL5UbaA0XUNz2Rp3Z3hqBSkbj0GVjwFDztsWVauZUWsbKHgMg++sk8UX0bkw==} + engines: {node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0} + dev: false + + /gunzip-maybe@1.4.2: + resolution: {integrity: sha512-4haO1M4mLO91PW57BMsDFf75UmwoRX0GkdD+Faw+Lr+r/OZrOCS0pIBwOL1xCKQqnQzbNFGgK2V2CpBUPeFNTw==} + hasBin: true + dependencies: + browserify-zlib: 0.1.4 + is-deflate: 1.0.0 + is-gzip: 1.0.0 + peek-stream: 1.1.3 + pumpify: 1.5.1 + through2: 2.0.5 + dev: true + + /handlebars@4.7.7: + resolution: {integrity: sha512-aAcXm5OAfE/8IXkcZvCepKU3VzW1/39Fb5ZuqMtgI/hT8X2YgoMvBY5dLhq/cpOvw7Lk1nK/UF71aLG/ZnVYRA==} + engines: {node: '>=0.4.7'} + hasBin: true + dependencies: + minimist: 1.2.6 + neo-async: 2.6.2 + source-map: 0.6.1 + wordwrap: 1.0.0 + optionalDependencies: + uglify-js: 3.17.4 + dev: true + + /hard-rejection@2.1.0: + resolution: {integrity: sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==} + engines: {node: '>=6'} + dev: false + + /has-bigints@1.0.2: + resolution: {integrity: sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==} + dev: false + + /has-flag@3.0.0: + resolution: {integrity: sha1-tdRU3CGZriJWmfNGfloH87lVuv0=} + engines: {node: '>=4'} + + /has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + /has-property-descriptors@1.0.0: + resolution: {integrity: sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==} + dependencies: + get-intrinsic: 1.2.0 + + /has-proto@1.0.1: + resolution: {integrity: sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==} + engines: {node: '>= 0.4'} + dev: false + + /has-symbols@1.0.3: + resolution: {integrity: sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==} + engines: {node: '>= 0.4'} + + /has-tostringtag@1.0.0: + resolution: {integrity: sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==} + engines: {node: '>= 0.4'} + dependencies: + has-symbols: 1.0.3 + + /has-unicode@2.0.1: + resolution: {integrity: sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==} + + /has@1.0.3: + resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==} + engines: {node: '>= 0.4.0'} + dependencies: + function-bind: 1.1.1 + + /hast-util-to-string@3.0.0: + resolution: {integrity: sha512-OGkAxX1Ua3cbcW6EJ5pT/tslVb90uViVkcJ4ZZIMW/R33DX/AkcJcRrPebPwJkHYwlDHXz4aIwvAAaAdtrACFA==} + dependencies: + '@types/hast': 3.0.0 + dev: false + + /he@1.2.0: + resolution: {integrity: sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==} + hasBin: true + dev: false + + /headers-polyfill@3.1.2: + resolution: {integrity: sha512-tWCK4biJ6hcLqTviLXVR9DTRfYGQMXEIUj3gwJ2rZ5wO/at3XtkI4g8mCvFdUF9l1KMBNCfmNAdnahm1cgavQA==} + dev: false + + /hosted-git-info@2.8.9: + resolution: {integrity: sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==} + + /html-encoding-sniffer@3.0.0: + resolution: {integrity: sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==} + engines: {node: '>=12'} + dependencies: + whatwg-encoding: 2.0.0 + dev: false + + /htmlparser2-svelte@4.1.0: + resolution: {integrity: sha512-+4f4RBFz7Rj2Hp0ZbFbXC+Kzbd6S9PgjiuFtdT76VMNgKogrEZy0pG2UrPycPbrZzVEIM5lAT3lAdkSTCHLPjg==} + dependencies: + domelementtype: 2.3.0 + domhandler: 3.3.0 + domutils: 2.8.0 + entities: 2.2.0 + dev: true + + /http-errors@2.0.0: + resolution: {integrity: sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==} + engines: {node: '>= 0.8'} + dependencies: + depd: 2.0.0 + inherits: 2.0.4 + setprototypeof: 1.2.0 + statuses: 2.0.1 + toidentifier: 1.0.1 + dev: true + + /http-proxy-agent@5.0.0: + resolution: {integrity: sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==} + engines: {node: '>= 6'} + dependencies: + '@tootallnate/once': 2.0.0 + agent-base: 6.0.2 + debug: 4.3.4 + transitivePeerDependencies: + - supports-color + dev: false + + /https-proxy-agent@4.0.0: + resolution: {integrity: sha512-zoDhWrkR3of1l9QAL8/scJZyLu8j/gBkcwcaQOZh7Gyh/+uJQzGVETdgT30akuwkpL8HTRfssqI3BZuV18teDg==} + engines: {node: '>= 6.0.0'} + dependencies: + agent-base: 5.1.1 + debug: 4.3.4 + transitivePeerDependencies: + - supports-color + dev: true + + /https-proxy-agent@5.0.1: + resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} + engines: {node: '>= 6'} + dependencies: + agent-base: 6.0.2 + debug: 4.3.4 + transitivePeerDependencies: + - supports-color + + /human-id@1.0.2: + resolution: {integrity: sha512-UNopramDEhHJD+VR+ehk8rOslwSfByxPIZyJRfV739NDhN5LF1fa1MqnzKm2lGTQRjNrjK19Q5fhkgIfjlVUKw==} + dev: false + + /human-signals@2.1.0: + resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==} + engines: {node: '>=10.17.0'} + dev: true + + /iconv-lite@0.4.24: + resolution: {integrity: sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==} + engines: {node: '>=0.10.0'} + dependencies: + safer-buffer: 2.1.2 + + /iconv-lite@0.6.3: + resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} + engines: {node: '>=0.10.0'} + dependencies: + safer-buffer: 2.1.2 + + /icss-utils@5.1.0(postcss@8.4.27): + resolution: {integrity: sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==} + engines: {node: ^10 || ^12 || >= 14} + peerDependencies: + postcss: ^8.1.0 + dependencies: + postcss: 8.4.27 + dev: true + + /ieee754@1.2.1: + resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} + + /ignore@4.0.6: + resolution: {integrity: sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==} + engines: {node: '>= 4'} + dev: true + + /ignore@5.2.4: + resolution: {integrity: sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==} + engines: {node: '>= 4'} + + /image-size@0.5.5: + resolution: {integrity: sha512-6TDAlDPZxUFCv+fuOkIoXT/V/f3Qbq8e37p+YOiYrUv3v9cc3/6x78VdfPgFVaB9dZYeLUfKgHRebpkm/oP2VQ==} + engines: {node: '>=0.10.0'} + hasBin: true + requiresBuild: true + optional: true + + /import-fresh@3.3.0: + resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==} + engines: {node: '>=6'} + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + + /import-meta-resolve@3.0.0: + resolution: {integrity: sha512-4IwhLhNNA8yy445rPjD/lWh++7hMDOml2eHtd58eG7h+qK3EryMuuRbsHGPikCoAgIkkDnckKfWSk2iDla/ejg==} + dev: true + + /imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + /indefinite-article@0.0.2: + resolution: {integrity: sha512-Au/2XzRkvxq2J6w5uvSSbBKPZ5kzINx5F2wb0SF8xpRL8BP9Lav81TnRbfPp6p+SYjYxwaaLn4EUwI3/MmYKSw==} + dev: false + + /indent-string@4.0.0: + resolution: {integrity: sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==} + engines: {node: '>=8'} + + /inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + + /inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + /inquirer@8.2.5: + resolution: {integrity: sha512-QAgPDQMEgrDssk1XiwwHoOGYF9BAbUcc1+j+FhEvaOt8/cKRqyLn0U5qA6F74fGhTMGxf92pOvPBeh29jQJDTQ==} + engines: {node: '>=12.0.0'} + dependencies: + ansi-escapes: 4.3.2 + chalk: 4.1.2 + cli-cursor: 3.1.0 + cli-width: 3.0.0 + external-editor: 3.1.0 + figures: 3.2.0 + lodash: 4.17.21 + mute-stream: 0.0.8 + ora: 5.4.1 + run-async: 2.4.1 + rxjs: 7.8.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + through: 2.3.8 + wrap-ansi: 7.0.0 + dev: false + + /internal-slot@1.0.5: + resolution: {integrity: sha512-Y+R5hJrzs52QCG2laLn4udYVnxsfny9CpOhNhUvk/SSSVyF6T27FzRbF0sroPidSu3X8oEAkOn2K804mjpt6UQ==} + engines: {node: '>= 0.4'} + dependencies: + get-intrinsic: 1.2.0 + has: 1.0.3 + side-channel: 1.0.4 + dev: false + + /internmap@2.0.3: + resolution: {integrity: sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==} + engines: {node: '>=12'} + dev: false + + /interpret@1.4.0: + resolution: {integrity: sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==} + engines: {node: '>= 0.10'} + dev: true + + /intl-messageformat@9.13.0: + resolution: {integrity: sha512-7sGC7QnSQGa5LZP7bXLDhVDtQOeKGeBFGHF2Y8LVBwYZoQZCgWeKoPGTa5GMG8g/TzDgeXuYJQis7Ggiw2xTOw==} + dependencies: + '@formatjs/ecma402-abstract': 1.11.4 + '@formatjs/fast-memoize': 1.2.1 + '@formatjs/icu-messageformat-parser': 2.1.0 + tslib: 2.6.1 + dev: false + + /ip@2.0.0: + resolution: {integrity: sha512-WKa+XuLG1A1R0UWhl2+1XQSi+fZWMsYKffMZTTYsiZaUD8k2yDAj5atimTUD2TZkyCkNEeYE5NhFZmupOGtjYQ==} + dev: true + + /ipaddr.js@1.9.1: + resolution: {integrity: sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==} + engines: {node: '>= 0.10'} + dev: true + + /is-absolute-url@3.0.3: + resolution: {integrity: sha512-opmNIX7uFnS96NtPmhWQgQx6/NYFgsUXYMllcfzwWKUMwfo8kku1TvE6hkNcH+Q1ts5cMVrsY7j0bxXQDciu9Q==} + engines: {node: '>=8'} + dev: true + + /is-arguments@1.1.1: + resolution: {integrity: sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + has-tostringtag: 1.0.0 + + /is-array-buffer@3.0.2: + resolution: {integrity: sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==} + dependencies: + call-bind: 1.0.2 + get-intrinsic: 1.2.0 + is-typed-array: 1.1.10 + dev: false + + /is-arrayish@0.2.1: + resolution: {integrity: sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=} + + /is-bigint@1.0.4: + resolution: {integrity: sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==} + dependencies: + has-bigints: 1.0.2 + dev: false + + /is-binary-path@2.1.0: + resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} + engines: {node: '>=8'} + dependencies: + binary-extensions: 2.2.0 + + /is-boolean-object@1.1.2: + resolution: {integrity: sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + has-tostringtag: 1.0.0 + dev: false + + /is-callable@1.2.7: + resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} + engines: {node: '>= 0.4'} + + /is-ci@3.0.1: + resolution: {integrity: sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==} + hasBin: true + dependencies: + ci-info: 3.8.0 + dev: false + + /is-core-module@2.9.0: + resolution: {integrity: sha512-+5FPy5PnwmO3lvfMb0AsoPaBG+5KHUI0wYFXOtYPnVVVspTFUuMZNfNaNVRt3FZadstu2c8x23vykRW/NBoU6A==} + dependencies: + has: 1.0.3 + + /is-date-object@1.0.5: + resolution: {integrity: sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==} + engines: {node: '>= 0.4'} + dependencies: + has-tostringtag: 1.0.0 + dev: false + + /is-deflate@1.0.0: + resolution: {integrity: sha512-YDoFpuZWu1VRXlsnlYMzKyVRITXj7Ej/V9gXQ2/pAe7X1J7M/RNOqaIYi6qUn+B7nGyB9pDXrv02dsB58d2ZAQ==} + dev: true + + /is-docker@2.2.1: + resolution: {integrity: sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==} + engines: {node: '>=8'} + hasBin: true + dev: true + + /is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + /is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + + /is-generator-function@1.0.10: + resolution: {integrity: sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==} + engines: {node: '>= 0.4'} + dependencies: + has-tostringtag: 1.0.0 + + /is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + dependencies: + is-extglob: 2.1.1 + + /is-gzip@1.0.0: + resolution: {integrity: sha512-rcfALRIb1YewtnksfRIHGcIY93QnK8BIQ/2c9yDYcG/Y6+vRoJuTWBmmSEbyLLYtXm7q35pHOHbZFQBaLrhlWQ==} + engines: {node: '>=0.10.0'} + dev: true + + /is-interactive@1.0.0: + resolution: {integrity: sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==} + engines: {node: '>=8'} + + /is-nan@1.3.2: + resolution: {integrity: sha512-E+zBKpQ2t6MEo1VsonYmluk9NxGrbzpeeLC2xIViuO2EjU2xsXsBPwTr3Ykv9l08UYEVEdWeRZNouaZqF6RN0w==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.2.0 + dev: true + + /is-negative-zero@2.0.2: + resolution: {integrity: sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==} + engines: {node: '>= 0.4'} + dev: false + + /is-node-process@1.2.0: + resolution: {integrity: sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw==} + dev: false + + /is-number-object@1.0.6: + resolution: {integrity: sha512-bEVOqiRcvo3zO1+G2lVMy+gkkEm9Yh7cDMRusKKu5ZJKPUYSJwICTKZrNKHA2EbSP0Tu0+6B/emsYNHZyn6K8g==} + engines: {node: '>= 0.4'} + dependencies: + has-tostringtag: 1.0.0 + dev: false + + /is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + /is-path-cwd@2.2.0: + resolution: {integrity: sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==} + engines: {node: '>=6'} + dev: true + + /is-path-inside@3.0.3: + resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==} + engines: {node: '>=8'} + + /is-plain-obj@1.1.0: + resolution: {integrity: sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==} + engines: {node: '>=0.10.0'} + dev: false + + /is-plain-object@2.0.4: + resolution: {integrity: sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==} + engines: {node: '>=0.10.0'} + dependencies: + isobject: 3.0.1 + dev: true + + /is-potential-custom-element-name@1.0.1: + resolution: {integrity: sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==} + dev: false + + /is-promise@2.2.2: + resolution: {integrity: sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==} + dev: false + + /is-reference@3.0.1: + resolution: {integrity: sha512-baJJdQLiYaJdvFbJqXrcGv3WU3QCzBlUcI5QhbesIm6/xPsvmO+2CDoi/GMOFBQEQm+PXkwOPrp9KK5ozZsp2w==} + dependencies: + '@types/estree': 1.0.0 + + /is-regex@1.1.4: + resolution: {integrity: sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + has-tostringtag: 1.0.0 + dev: false + + /is-shared-array-buffer@1.0.2: + resolution: {integrity: sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==} + dependencies: + call-bind: 1.0.2 + dev: false + + /is-stream@2.0.1: + resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} + engines: {node: '>=8'} + dev: true + + /is-string@1.0.7: + resolution: {integrity: sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==} + engines: {node: '>= 0.4'} + dependencies: + has-tostringtag: 1.0.0 + dev: false + + /is-subdir@1.2.0: + resolution: {integrity: sha512-2AT6j+gXe/1ueqbW6fLZJiIw3F8iXGJtt0yDrZaBhAZEG1raiTxKWU+IPqMCzQAXOUCKdA4UDMgacKH25XG2Cw==} + engines: {node: '>=4'} + dependencies: + better-path-resolve: 1.0.0 + dev: false + + /is-symbol@1.0.4: + resolution: {integrity: sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==} + engines: {node: '>= 0.4'} + dependencies: + has-symbols: 1.0.3 + dev: false + + /is-typed-array@1.1.10: + resolution: {integrity: sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A==} + engines: {node: '>= 0.4'} + dependencies: + available-typed-arrays: 1.0.5 + call-bind: 1.0.2 + for-each: 0.3.3 + gopd: 1.0.1 + has-tostringtag: 1.0.0 + + /is-unicode-supported@0.1.0: + resolution: {integrity: sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==} + engines: {node: '>=10'} + + /is-weakref@1.0.2: + resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==} + dependencies: + call-bind: 1.0.2 + dev: false + + /is-what@3.14.1: + resolution: {integrity: sha512-sNxgpk9793nzSs7bA6JQJGeIuRBQhAaNGG77kzYQgMkrID+lS6SlK07K5LaptscDlSaIgH+GPFzf+d75FVxozA==} + + /is-windows@1.0.2: + resolution: {integrity: sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==} + engines: {node: '>=0.10.0'} + dev: false + + /is-wsl@2.2.0: + resolution: {integrity: sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==} + engines: {node: '>=8'} + dependencies: + is-docker: 2.2.1 + dev: true + + /isarray@1.0.0: + resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==} + dev: true + + /isexe@2.0.0: + resolution: {integrity: sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=} + + /isobject@3.0.1: + resolution: {integrity: sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==} + engines: {node: '>=0.10.0'} + dev: true + + /isomorphic-unfetch@3.1.0: + resolution: {integrity: sha512-geDJjpoZ8N0kWexiwkX8F9NkTsXhetLPVbZFQ+JTW239QNOwvB0gniuR1Wc6f0AMTn7/mFGyXvHTifrCp/GH8Q==} + dependencies: + node-fetch: 2.6.7 + unfetch: 4.2.0 + transitivePeerDependencies: + - encoding + dev: true + + /istanbul-lib-coverage@3.2.0: + resolution: {integrity: sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==} + engines: {node: '>=8'} + dev: true + + /istanbul-lib-instrument@5.2.1: + resolution: {integrity: sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==} + engines: {node: '>=8'} + dependencies: + '@babel/core': 7.22.5 + '@babel/parser': 7.22.5 + '@istanbuljs/schema': 0.1.3 + istanbul-lib-coverage: 3.2.0 + semver: 6.3.0 + transitivePeerDependencies: + - supports-color + dev: true + + /jake@10.8.7: + resolution: {integrity: sha512-ZDi3aP+fG/LchyBzUM804VjddnwfSfsdeYkwt8NcbKRvo4rFkjhs456iLFn3k2ZUWvNe4i48WACDbza8fhq2+w==} + engines: {node: '>=10'} + hasBin: true + dependencies: + async: 3.2.4 + chalk: 4.1.2 + filelist: 1.0.4 + minimatch: 3.1.2 + dev: true + + /javascript-stringify@2.1.0: + resolution: {integrity: sha512-JVAfqNPTvNq3sB/VHQJAFxN/sPgKnsKrCwyRt15zwNCdrMMJDdcEOdubuy+DuJYYdm0ox1J4uzEuYKkN+9yhVg==} + dev: false + + /jest-diff@29.5.0: + resolution: {integrity: sha512-LtxijLLZBduXnHSniy0WMdaHjmQnt3g5sa16W4p0HqukYTTsyTW3GD1q41TyGl5YFXj/5B2U6dlh5FM1LIMgxw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + chalk: 4.1.2 + diff-sequences: 29.4.3 + jest-get-type: 29.4.3 + pretty-format: 29.5.0 + dev: false + + /jest-get-type@29.4.3: + resolution: {integrity: sha512-J5Xez4nRRMjk8emnTpWrlkyb9pfRQQanDrvWHhsR1+VUfbwxi30eVcZFlcdGInRibU4G5LwHXpI7IRHU0CY+gg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dev: false + + /jest-haste-map@29.5.0: + resolution: {integrity: sha512-IspOPnnBro8YfVYSw6yDRKh/TiCdRngjxeacCps1cQ9cgVN6+10JUcuJ1EabrgYLOATsIAigxA0rLR9x/YlrSA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/types': 29.5.0 + '@types/graceful-fs': 4.1.6 + '@types/node': 20.3.2 + anymatch: 3.1.2 + fb-watchman: 2.0.2 + graceful-fs: 4.2.9 + jest-regex-util: 29.4.3 + jest-util: 29.5.0 + jest-worker: 29.5.0 + micromatch: 4.0.4 + walker: 1.0.8 + optionalDependencies: + fsevents: 2.3.2 + dev: true + + /jest-matcher-utils@29.5.0: + resolution: {integrity: sha512-lecRtgm/rjIK0CQ7LPQwzCs2VwW6WAahA55YBuI+xqmhm7LAaxokSB8C97yJeYyT+HvQkH741StzpU41wohhWw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + chalk: 4.1.2 + jest-diff: 29.5.0 + jest-get-type: 29.4.3 + pretty-format: 29.5.0 + dev: false + + /jest-message-util@29.5.0: + resolution: {integrity: sha512-Kijeg9Dag6CKtIDA7O21zNTACqD5MD/8HfIV8pdD94vFyFuer52SigdC3IQMhab3vACxXMiFk+yMHNdbqtyTGA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@babel/code-frame': 7.16.7 + '@jest/types': 29.5.0 + '@types/stack-utils': 2.0.1 + chalk: 4.1.2 + graceful-fs: 4.2.9 + micromatch: 4.0.4 + pretty-format: 29.5.0 + slash: 3.0.0 + stack-utils: 2.0.6 + dev: false + + /jest-mock@27.5.1: + resolution: {integrity: sha512-K4jKbY1d4ENhbrG2zuPWaQBvDly+iZ2yAW+T1fATN78hc0sInwn7wZB8XtlNnvHug5RMwV897Xm4LqmPM4e2Og==} + engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} + dependencies: + '@jest/types': 27.5.1 + '@types/node': 20.3.2 + dev: true + + /jest-regex-util@29.4.3: + resolution: {integrity: sha512-O4FglZaMmWXbGHSQInfXewIsd1LMn9p3ZXB/6r4FOkyhX2/iP/soMG98jGvk/A3HAN78+5VWcBGO0BJAPRh4kg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dev: true + + /jest-util@29.5.0: + resolution: {integrity: sha512-RYMgG/MTadOr5t8KdhejfvUU82MxsCu5MF6KuDUHl+NuwzUt+Sm6jJWxTJVrDR1j5M/gJVCPKQEpWXY+yIQ6lQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/types': 29.5.0 + '@types/node': 20.3.2 + chalk: 4.1.2 + ci-info: 3.8.0 + graceful-fs: 4.2.9 + picomatch: 2.3.1 + + /jest-worker@27.5.1: + resolution: {integrity: sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==} + engines: {node: '>= 10.13.0'} + dependencies: + '@types/node': 20.3.2 + merge-stream: 2.0.0 + supports-color: 8.1.1 + dev: true + + /jest-worker@29.5.0: + resolution: {integrity: sha512-NcrQnevGoSp4b5kg+akIpthoAFHxPBcb5P6mYPY0fUNT+sSvmtu6jlkEle3anczUKIKEbMxFimk9oTP/tpIPgA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@types/node': 20.3.2 + jest-util: 29.5.0 + merge-stream: 2.0.0 + supports-color: 8.1.1 + dev: true + + /jiti@1.18.2: + resolution: {integrity: sha512-QAdOptna2NYiSSpv0O/BwoHBSmz4YhpzJHyi+fnMRTXFjp7B8i/YG5Z8IfusxB1ufjcD2Sre1F3R+nX3fvy7gg==} + hasBin: true + dev: true + + /jju@1.4.0: + resolution: {integrity: sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA==} + dev: false + + /js-levenshtein@1.1.6: + resolution: {integrity: sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g==} + engines: {node: '>=0.10.0'} + dev: false + + /js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + + /js-yaml@3.14.1: + resolution: {integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==} + hasBin: true + dependencies: + argparse: 1.0.10 + esprima: 4.0.1 + + /js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + dependencies: + argparse: 2.0.1 + + /jscodeshift@0.14.0(@babel/preset-env@7.21.5): + resolution: {integrity: sha512-7eCC1knD7bLUPuSCwXsMZUH51O8jIcoVyKtI6P0XM0IVzlGjckPy3FIwQlorzbN0Sg79oK+RlohN32Mqf/lrYA==} + hasBin: true + peerDependencies: + '@babel/preset-env': ^7.1.6 + dependencies: + '@babel/core': 7.22.5 + '@babel/parser': 7.22.5 + '@babel/plugin-proposal-class-properties': 7.18.6(@babel/core@7.22.5) + '@babel/plugin-proposal-nullish-coalescing-operator': 7.18.6(@babel/core@7.22.5) + '@babel/plugin-proposal-optional-chaining': 7.21.0(@babel/core@7.22.5) + '@babel/plugin-transform-modules-commonjs': 7.22.5(@babel/core@7.22.5) + '@babel/preset-env': 7.21.5(@babel/core@7.21.8) + '@babel/preset-flow': 7.22.5(@babel/core@7.22.5) + '@babel/preset-typescript': 7.22.5(@babel/core@7.22.5) + '@babel/register': 7.22.5(@babel/core@7.22.5) + babel-core: 7.0.0-bridge.0(@babel/core@7.22.5) + chalk: 4.1.2 + flow-parser: 0.210.2 + graceful-fs: 4.2.9 + micromatch: 4.0.4 + neo-async: 2.6.2 + node-dir: 0.1.17 + recast: 0.21.5 + temp: 0.8.4 + write-file-atomic: 2.4.3 + transitivePeerDependencies: + - supports-color + dev: true + + /jscodeshift@0.14.0(@babel/preset-env@7.22.5): + resolution: {integrity: sha512-7eCC1knD7bLUPuSCwXsMZUH51O8jIcoVyKtI6P0XM0IVzlGjckPy3FIwQlorzbN0Sg79oK+RlohN32Mqf/lrYA==} + hasBin: true + peerDependencies: + '@babel/preset-env': ^7.1.6 + dependencies: + '@babel/core': 7.22.5 + '@babel/parser': 7.22.5 + '@babel/plugin-proposal-class-properties': 7.18.6(@babel/core@7.22.5) + '@babel/plugin-proposal-nullish-coalescing-operator': 7.18.6(@babel/core@7.22.5) + '@babel/plugin-proposal-optional-chaining': 7.21.0(@babel/core@7.22.5) + '@babel/plugin-transform-modules-commonjs': 7.22.5(@babel/core@7.22.5) + '@babel/preset-env': 7.22.5(@babel/core@7.22.5) + '@babel/preset-flow': 7.22.5(@babel/core@7.22.5) + '@babel/preset-typescript': 7.22.5(@babel/core@7.22.5) + '@babel/register': 7.22.5(@babel/core@7.22.5) + babel-core: 7.0.0-bridge.0(@babel/core@7.22.5) + chalk: 4.1.2 + flow-parser: 0.210.2 + graceful-fs: 4.2.9 + micromatch: 4.0.4 + neo-async: 2.6.2 + node-dir: 0.1.17 + recast: 0.21.5 + temp: 0.8.4 + write-file-atomic: 2.4.3 + transitivePeerDependencies: + - supports-color + dev: true + + /jsdom@22.1.0: + resolution: {integrity: sha512-/9AVW7xNbsBv6GfWho4TTNjEo9fe6Zhf9O7s0Fhhr3u+awPwAJMKwAMXnkk5vBxflqLW9hTHX/0cs+P3gW+cQw==} + engines: {node: '>=16'} + peerDependencies: + canvas: ^2.5.0 + peerDependenciesMeta: + canvas: + optional: true + dependencies: + abab: 2.0.6 + cssstyle: 3.0.0 + data-urls: 4.0.0 + decimal.js: 10.4.3 + domexception: 4.0.0 + form-data: 4.0.0 + html-encoding-sniffer: 3.0.0 + http-proxy-agent: 5.0.0 + https-proxy-agent: 5.0.1 + is-potential-custom-element-name: 1.0.1 + nwsapi: 2.2.5 + parse5: 7.1.2 + rrweb-cssom: 0.6.0 + saxes: 6.0.0 + symbol-tree: 3.2.4 + tough-cookie: 4.1.3 + w3c-xmlserializer: 4.0.0 + webidl-conversions: 7.0.0 + whatwg-encoding: 2.0.0 + whatwg-mimetype: 3.0.0 + whatwg-url: 12.0.1 + ws: 8.13.0(bufferutil@4.0.7) + xml-name-validator: 4.0.0 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: false + + /jsesc@0.5.0: + resolution: {integrity: sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==} + hasBin: true + dev: true + + /jsesc@2.5.2: + resolution: {integrity: sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==} + engines: {node: '>=4'} + hasBin: true + + /json-parse-better-errors@1.0.2: + resolution: {integrity: sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==} + dev: false + + /json-parse-even-better-errors@2.3.1: + resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + + /json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + + /json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + + /json-stringify-pretty-compact@3.0.0: + resolution: {integrity: sha512-Rc2suX5meI0S3bfdZuA7JMFBGkJ875ApfVyq2WHELjBiiG22My/l7/8zPpH/CfFVQHuVLd8NLR0nv6vi0BYYKA==} + dev: false + + /json5@2.2.3: + resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==} + engines: {node: '>=6'} + hasBin: true + + /jsonc-parser@3.2.0: + resolution: {integrity: sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==} + dev: false + + /jsonfile@4.0.0: + resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} + optionalDependencies: + graceful-fs: 4.2.9 + dev: false + + /jsonfile@6.1.0: + resolution: {integrity: sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==} + dependencies: + universalify: 2.0.0 + optionalDependencies: + graceful-fs: 4.2.9 + dev: true + + /katex@0.16.7: + resolution: {integrity: sha512-Xk9C6oGKRwJTfqfIbtr0Kes9OSv6IFsuhFGc7tW4urlpMJtuh+7YhzU6YEG9n8gmWKcMAFzkp7nr+r69kV0zrA==} + hasBin: true + dependencies: + commander: 8.3.0 + dev: false + + /kind-of@6.0.3: + resolution: {integrity: sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==} + engines: {node: '>=0.10.0'} + + /kleur@3.0.3: + resolution: {integrity: sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==} + engines: {node: '>=6'} + dev: true + + /kleur@4.1.5: + resolution: {integrity: sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==} + engines: {node: '>=6'} + + /known-css-properties@0.28.0: + resolution: {integrity: sha512-9pSL5XB4J+ifHP0e0jmmC98OGC1nL8/JjS+fi6mnTlIf//yt/MfVLtKg7S6nCtj/8KTcWX7nRlY0XywoYY1ISQ==} + dev: false + + /lazy-brush@1.0.1: + resolution: {integrity: sha512-xT/iSClTVi7vLoF8dCWTBhCuOWqsLXCMPa6ucVmVAk6hyNCM5JeS1NLhXqIrJktUg+caEYKlqSOUU4u3cpXzKg==} + dev: false + + /lazy-universal-dotenv@4.0.0: + resolution: {integrity: sha512-aXpZJRnTkpK6gQ/z4nk+ZBLd/Qdp118cvPruLSIQzQNRhKwEcdXCOzXuF55VDqIiuAaY3UGZ10DJtvZzDcvsxg==} + engines: {node: '>=14.0.0'} + dependencies: + app-root-dir: 1.0.2 + dotenv: 16.3.1 + dotenv-expand: 10.0.0 + dev: true + + /less-loader@11.1.3(less@4.1.3)(webpack@5.88.1): + resolution: {integrity: sha512-A5b7O8dH9xpxvkosNrP0dFp2i/dISOJa9WwGF3WJflfqIERE2ybxh1BFDj5CovC2+jCE4M354mk90hN6ziXlVw==} + engines: {node: '>= 14.15.0'} + peerDependencies: + less: ^3.5.0 || ^4.0.0 + webpack: ^5.0.0 + dependencies: + less: 4.1.3 + webpack: 5.88.1(esbuild@0.17.14) + dev: true + + /less@4.1.3: + resolution: {integrity: sha512-w16Xk/Ta9Hhyei0Gpz9m7VS8F28nieJaL/VyShID7cYvP6IL5oHeL6p4TXSDJqZE/lNv0oJ2pGVjJsRkfwm5FA==} + engines: {node: '>=6'} + hasBin: true + dependencies: + copy-anything: 2.0.6 + parse-node-version: 1.0.1 + tslib: 2.6.1 + optionalDependencies: + errno: 0.1.8 + graceful-fs: 4.2.9 + image-size: 0.5.5 + make-dir: 2.1.0 + mime: 1.6.0 + needle: 3.2.0 + source-map: 0.6.1 + transitivePeerDependencies: + - supports-color + + /leven@3.1.0: + resolution: {integrity: sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==} + engines: {node: '>=6'} + dev: true + + /levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + /lilconfig@2.0.6: + resolution: {integrity: sha512-9JROoBW7pobfsx+Sq2JsASvCo6Pfo6WWoUW79HuB1BCoBXD4PLWJPqDF6fNj67pqBYTbAHkE57M1kS/+L1neOg==} + engines: {node: '>=10'} + + /lines-and-columns@1.2.4: + resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} + + /load-json-file@4.0.0: + resolution: {integrity: sha1-L19Fq5HjMhYjT9U62rZo607AmTs=} + engines: {node: '>=4'} + dependencies: + graceful-fs: 4.2.9 + parse-json: 4.0.0 + pify: 3.0.0 + strip-bom: 3.0.0 + dev: false + + /load-yaml-file@0.2.0: + resolution: {integrity: sha512-OfCBkGEw4nN6JLtgRidPX6QxjBQGQf72q3si2uvqyFEMbycSFFHwAZeXx6cJgFM9wmLrf9zBwCP3Ivqa+LLZPw==} + engines: {node: '>=6'} + dependencies: + graceful-fs: 4.2.9 + js-yaml: 3.14.1 + pify: 4.0.1 + strip-bom: 3.0.0 + dev: false + + /loader-runner@4.3.0: + resolution: {integrity: sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==} + engines: {node: '>=6.11.5'} + dev: true + + /loader-utils@2.0.4: + resolution: {integrity: sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==} + engines: {node: '>=8.9.0'} + dependencies: + big.js: 5.2.2 + emojis-list: 3.0.0 + json5: 2.2.3 + dev: true + + /local-access@1.1.0: + resolution: {integrity: sha512-XfegD5pyTAfb+GY6chk283Ox5z8WexG56OvM06RWLpAc/UHozO8X6xAxEkIitZOtsSMM1Yr3DkHgW5W+onLhCw==} + engines: {node: '>=6'} + dev: false + + /local-pkg@0.4.3: + resolution: {integrity: sha512-SFppqq5p42fe2qcZQqqEOiVRXl+WCP1MdT6k7BDEW1j++sp5fIY+/fdRQitvKgB5BrBcmrs5m/L0v2FrU5MY1g==} + engines: {node: '>=14'} + dev: false + + /locate-character@3.0.0: + resolution: {integrity: sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA==} + + /locate-path@3.0.0: + resolution: {integrity: sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==} + engines: {node: '>=6'} + dependencies: + p-locate: 3.0.0 + path-exists: 3.0.0 + dev: true + + /locate-path@5.0.0: + resolution: {integrity: sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==} + engines: {node: '>=8'} + dependencies: + p-locate: 4.1.0 + + /locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + dependencies: + p-locate: 5.0.0 + + /lodash.castarray@4.4.0: + resolution: {integrity: sha512-aVx8ztPv7/2ULbArGJ2Y42bG1mEQ5mGjpdvrbJcJFU3TbYybe+QlLS4pst9zV52ymy2in1KpFPiZnAOATxD4+Q==} + dev: true + + /lodash.debounce@4.0.8: + resolution: {integrity: sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==} + dev: true + + /lodash.isplainobject@4.0.6: + resolution: {integrity: sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==} + dev: true + + /lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + + /lodash.startcase@4.4.0: + resolution: {integrity: sha512-+WKqsK294HMSc2jEbNgpHpd0JfIBhp7rEV4aqXWqFr6AlXov+SlcgB1Fv01y2kGe3Gc8nMW7VA0SrGuSkRfIEg==} + dev: false + + /lodash@4.17.21: + resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} + + /log-symbols@4.1.0: + resolution: {integrity: sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==} + engines: {node: '>=10'} + dependencies: + chalk: 4.1.2 + is-unicode-supported: 0.1.0 + + /loose-envify@1.4.0: + resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==} + hasBin: true + dependencies: + js-tokens: 4.0.0 + dev: true + + /loupe@2.3.6: + resolution: {integrity: sha512-RaPMZKiMy8/JruncMU5Bt6na1eftNoo++R4Y+N2FrxkDVTrGvcyzFTsaGif4QTeKESheMGegbhw6iUAq+5A8zA==} + dependencies: + get-func-name: 2.0.0 + dev: false + + /lru-cache@4.1.5: + resolution: {integrity: sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==} + dependencies: + pseudomap: 1.0.2 + yallist: 2.1.2 + dev: false + + /lru-cache@5.1.1: + resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} + dependencies: + yallist: 3.1.1 + + /lru-cache@6.0.0: + resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==} + engines: {node: '>=10'} + dependencies: + yallist: 4.0.0 + + /lru-queue@0.1.0: + resolution: {integrity: sha512-BpdYkt9EvGl8OfWHDQPISVpcl5xZthb+XPsbELj5AQXxIC8IriDZIQYjBJPEm5rS420sjZ0TLEzRcq5KdBhYrQ==} + dependencies: + es5-ext: 0.10.62 + dev: false + + /lz-string@1.4.4: + resolution: {integrity: sha512-0ckx7ZHRPqb0oUm8zNr+90mtf9DQB60H1wMCjBtfi62Kl3a7JbHob6gA2bC+xRvZoOL+1hzUK8jeuEIQE8svEQ==} + hasBin: true + + /magic-string@0.27.0: + resolution: {integrity: sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==} + engines: {node: '>=12'} + dependencies: + '@jridgewell/sourcemap-codec': 1.4.15 + + /magic-string@0.30.0: + resolution: {integrity: sha512-LA+31JYDJLs82r2ScLrlz1GjSgu66ZV518eyWT+S8VhyQn/JL0u9MeBOvQMGYiPk1DBiSN9DDMOcXvigJZaViQ==} + engines: {node: '>=12'} + dependencies: + '@jridgewell/sourcemap-codec': 1.4.15 + + /magic-string@0.30.1: + resolution: {integrity: sha512-mbVKXPmS0z0G4XqFDCTllmDQ6coZzn94aMlb0o/A4HEHJCKcanlDZwYJgwnkmgD3jyWhUgj9VsPrfd972yPffA==} + engines: {node: '>=12'} + dependencies: + '@jridgewell/sourcemap-codec': 1.4.15 + + /make-dir@2.1.0: + resolution: {integrity: sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==} + engines: {node: '>=6'} + dependencies: + pify: 4.0.1 + semver: 5.7.1 + + /make-dir@3.1.0: + resolution: {integrity: sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==} + engines: {node: '>=8'} + dependencies: + semver: 6.3.0 + + /makeerror@1.0.12: + resolution: {integrity: sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==} + dependencies: + tmpl: 1.0.5 + dev: true + + /map-obj@1.0.1: + resolution: {integrity: sha512-7N/q3lyZ+LVCp7PzuxrJr4KMbBE2hW7BT7YNia330OFxIf4d3r5zVpicP2650l7CPN6RM9zOJRl3NGpqSiw3Eg==} + engines: {node: '>=0.10.0'} + dev: false + + /map-obj@4.3.0: + resolution: {integrity: sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==} + engines: {node: '>=8'} + dev: false + + /map-obj@5.0.2: + resolution: {integrity: sha512-K6K2NgKnTXimT3779/4KxSvobxOtMmx1LBZ3NwRxT/MDIR3Br/fQ4Q+WCX5QxjyUR8zg5+RV9Tbf2c5pAWTD2A==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dev: false + + /map-or-similar@1.5.0: + resolution: {integrity: sha512-0aF7ZmVon1igznGI4VS30yugpduQW3y3GkcgGJOp7d8x8QrizhigUxjI/m2UojsXXto+jLAH3KSz+xOJTiORjg==} + dev: true + + /markdown-to-jsx@7.2.1(react@18.2.0): + resolution: {integrity: sha512-9HrdzBAo0+sFz9ZYAGT5fB8ilzTW+q6lPocRxrIesMO+aB40V9MgFfbfMXxlGjf22OpRy+IXlvVaQenicdpgbg==} + engines: {node: '>= 10'} + peerDependencies: + react: '>= 0.14.0' + dependencies: + react: 18.2.0 + dev: true + + /marked-highlight@2.0.1(marked@7.0.0): + resolution: {integrity: sha512-LDUfR/zDvD+dJ+lQOWHkxvBLNxiXcaN8pBtwJ/i4pI0bkDC/Ef6Mz1qUrAuHXfnpdr2rabdMpVFhqFuU+5Mskg==} + peerDependencies: + marked: ^4 || ^5 + dependencies: + marked: 7.0.0 + dev: false + + /marked@7.0.0: + resolution: {integrity: sha512-7Gv1Ry8tqR352ElQOQfxdGpIh8kNZh/yYjNCxAQCN1DDbY4bCTG3qDCSkZWlRElSseeEILDxkY/G9w7cgziBNw==} + engines: {node: '>= 16'} + hasBin: true + dev: false + + /mdast-util-definitions@4.0.0: + resolution: {integrity: sha512-k8AJ6aNnUkB7IE+5azR9h81O5EQ/cTDXtWdMq9Kk5KcEW/8ritU5CeLg/9HhOC++nALHBlaogJ5jz0Ybk3kPMQ==} + dependencies: + unist-util-visit: 2.0.3 + dev: true + + /mdast-util-to-string@1.1.0: + resolution: {integrity: sha512-jVU0Nr2B9X3MU4tSK7JP1CMkSvOj7X5l/GboG1tKRw52lLF1x2Ju92Ms9tNetCcbfX3hzlM73zYo2NKkWSfF/A==} + dev: true + + /mdn-data@2.0.30: + resolution: {integrity: sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==} + + /mdsvex@0.11.0(svelte@3.59.2): + resolution: {integrity: sha512-gJF1s0N2nCmdxcKn8HDn0LKrN8poStqAicp6bBcsKFd/zkUBGLP5e7vnxu+g0pjBbDFOscUyI1mtHz+YK2TCDw==} + peerDependencies: + svelte: '>=3 <5' + dependencies: + '@types/unist': 2.0.6 + prism-svelte: 0.4.7 + prismjs: 1.29.0 + svelte: 3.59.2 + vfile-message: 2.0.4 + dev: false + + /media-encoder-host-broker@7.0.89: + resolution: {integrity: sha512-2J57Dj1jBvo3EfmON8LK1Xuwe/3gJ+7N5c22nTxvMrxDo79xPYbSbVRGXFvY/rpO6rlhMlNcLoGecV6Tq1bu0g==} + dependencies: + '@babel/runtime': 7.22.6 + broker-factory: 3.0.84 + fast-unique-numbers: 8.0.7 + media-encoder-host-worker: 9.1.11 + tslib: 2.6.1 + dev: false + + /media-encoder-host-worker@9.1.11: + resolution: {integrity: sha512-7eBfNSFtJ366f3iLtOchuMY8a4+n8wAyVlVYeyh8d9EdxAytieYU+zjjluXYGSczqKKKFrP4tE/8Y3TJtQBhCg==} + dependencies: + '@babel/runtime': 7.22.6 + extendable-media-recorder-wav-encoder-broker: 7.0.88 + tslib: 2.6.1 + worker-factory: 7.0.9 + dev: false + + /media-encoder-host@8.0.99: + resolution: {integrity: sha512-F+4v70AsYbDO4d9TusWDDeSbdJuRK5XlcAsv29o2vbv4Cn212WwR78w9MYEq0iBAkkHg9/aLrJpog3Yhz5BzFQ==} + dependencies: + '@babel/runtime': 7.22.6 + media-encoder-host-broker: 7.0.89 + media-encoder-host-worker: 9.1.11 + tslib: 2.6.1 + dev: false + + /media-typer@0.3.0: + resolution: {integrity: sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==} + engines: {node: '>= 0.6'} + dev: true + + /memoizee@0.4.15: + resolution: {integrity: sha512-UBWmJpLZd5STPm7PMUlOw/TSy972M+z8gcyQ5veOnSDRREz/0bmpyTfKt3/51DhEBqCZQn1udM/5flcSPYhkdQ==} + dependencies: + d: 1.0.1 + es5-ext: 0.10.62 + es6-weak-map: 2.0.3 + event-emitter: 0.3.5 + is-promise: 2.2.2 + lru-queue: 0.1.0 + next-tick: 1.1.0 + timers-ext: 0.1.7 + dev: false + + /memoizerific@1.11.3: + resolution: {integrity: sha512-/EuHYwAPdLtXwAwSZkh/Gutery6pD2KYd44oQLhAvQp/50mpyduZh8Q7PYHXTCJ+wuXxt7oij2LXyIJOOYFPog==} + dependencies: + map-or-similar: 1.5.0 + dev: true + + /memorystream@0.3.1: + resolution: {integrity: sha1-htcJCzDORV1j+64S3aUaR93K+bI=} + engines: {node: '>= 0.10.0'} + dev: false + + /meow@6.1.1: + resolution: {integrity: sha512-3YffViIt2QWgTy6Pale5QpopX/IvU3LPL03jOTqp6pGj3VjesdO/U8CuHMKpnQr4shCNCM5fd5XFFvIIl6JBHg==} + engines: {node: '>=8'} + dependencies: + '@types/minimist': 1.2.2 + camelcase-keys: 6.2.2 + decamelize-keys: 1.1.1 + hard-rejection: 2.1.0 + minimist-options: 4.1.0 + normalize-package-data: 2.5.0 + read-pkg-up: 7.0.1 + redent: 3.0.0 + trim-newlines: 3.0.1 + type-fest: 0.13.1 + yargs-parser: 18.1.3 + dev: false + + /merge-descriptors@1.0.1: + resolution: {integrity: sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==} + dev: true + + /merge-stream@2.0.0: + resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} + dev: true + + /merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} + + /methods@1.1.2: + resolution: {integrity: sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==} + engines: {node: '>= 0.6'} + dev: true + + /micromatch@4.0.4: + resolution: {integrity: sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg==} + engines: {node: '>=8.6'} + dependencies: + braces: 3.0.2 + picomatch: 2.3.1 + + /mime-db@1.51.0: + resolution: {integrity: sha512-5y8A56jg7XVQx2mbv1lu49NR4dokRnhZYTtL+KGfaa27uq4pSTXkwQkFJl4pkRMyNFz/EtYDSkiiEHx3F7UN6g==} + engines: {node: '>= 0.6'} + + /mime-types@2.1.34: + resolution: {integrity: sha512-6cP692WwGIs9XXdOO4++N+7qjqv0rqxxVvJ3VHPh/Sc9mVZcQP+ZGhkKiTvWMQRr2tbHkJP/Yn7Y0npb3ZBs4A==} + engines: {node: '>= 0.6'} + dependencies: + mime-db: 1.51.0 + + /mime@1.6.0: + resolution: {integrity: sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==} + engines: {node: '>=4'} + hasBin: true + + /mime@2.6.0: + resolution: {integrity: sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg==} + engines: {node: '>=4.0.0'} + hasBin: true + dev: true + + /mime@3.0.0: + resolution: {integrity: sha512-jSCU7/VB1loIWBZe14aEYHU/+1UMEHoaO7qxCOVJOw9GgH72VAWppxNcjU+x9a2k3GSIBXNKxXQFqRvvZ7vr3A==} + engines: {node: '>=10.0.0'} + hasBin: true + + /mimic-fn@2.1.0: + resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==} + engines: {node: '>=6'} + + /min-indent@1.0.1: + resolution: {integrity: sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==} + engines: {node: '>=4'} + + /mini-svg-data-uri@1.4.4: + resolution: {integrity: sha512-r9deDe9p5FJUPZAk3A59wGH7Ii9YrjjWw0jmw/liSbHl2CHiyXj6FcDXDu2K3TjVAXqiJdaw3xxwlZZr9E6nHg==} + hasBin: true + + /minimatch@3.0.4: + resolution: {integrity: sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==} + dependencies: + brace-expansion: 1.1.11 + dev: false + + /minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + dependencies: + brace-expansion: 1.1.11 + + /minimatch@5.1.6: + resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==} + engines: {node: '>=10'} + dependencies: + brace-expansion: 2.0.1 + dev: true + + /minimist-options@4.1.0: + resolution: {integrity: sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==} + engines: {node: '>= 6'} + dependencies: + arrify: 1.0.1 + is-plain-obj: 1.1.0 + kind-of: 6.0.3 + dev: false + + /minimist@1.2.6: + resolution: {integrity: sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==} + + /minipass@3.3.6: + resolution: {integrity: sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==} + engines: {node: '>=8'} + dependencies: + yallist: 4.0.0 + + /minipass@5.0.0: + resolution: {integrity: sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==} + engines: {node: '>=8'} + + /minizlib@2.1.2: + resolution: {integrity: sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==} + engines: {node: '>= 8'} + dependencies: + minipass: 3.3.6 + yallist: 4.0.0 + + /mixme@0.5.9: + resolution: {integrity: sha512-VC5fg6ySUscaWUpI4gxCBTQMH2RdUpNrk+MsbpCYtIvf9SBJdiUey4qE7BXviJsJR4nDQxCZ+3yaYNW3guz/Pw==} + engines: {node: '>= 8.0.0'} + dev: false + + /mkdirp-classic@0.5.3: + resolution: {integrity: sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==} + dev: true + + /mkdirp@0.5.5: + resolution: {integrity: sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==} + hasBin: true + dependencies: + minimist: 1.2.6 + + /mkdirp@1.0.4: + resolution: {integrity: sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==} + engines: {node: '>=10'} + hasBin: true + + /mlly@1.4.0: + resolution: {integrity: sha512-ua8PAThnTwpprIaU47EPeZ/bPUVp2QYBbWMphUQpVdBI3Lgqzm5KZQ45Agm3YJedHXaIHl6pBGabaLSUPPSptg==} + dependencies: + acorn: 8.10.0 + pathe: 1.1.1 + pkg-types: 1.0.3 + ufo: 1.1.2 + dev: false + + /mri@1.2.0: + resolution: {integrity: sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==} + engines: {node: '>=4'} + + /mrmime@1.0.0: + resolution: {integrity: sha512-a70zx7zFfVO7XpnQ2IX1Myh9yY4UYvfld/dikWRnsXxbyvMcfz+u6UfgNAtH+k2QqtJuzVpv6eLTx1G2+WKZbQ==} + engines: {node: '>=10'} + + /ms@2.0.0: + resolution: {integrity: sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==} + dev: true + + /ms@2.1.1: + resolution: {integrity: sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==} + dev: true + + /ms@2.1.2: + resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} + + /ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + /msw@1.2.2(typescript@5.1.3): + resolution: {integrity: sha512-GsW3PE/Es/a1tYThXcM8YHOZ1S1MtivcS3He/LQbbTCx3rbWJYCtWD5XXyJ53KlNPT7O1VI9sCW3xMtgFe8XpQ==} + engines: {node: '>=14'} + hasBin: true + requiresBuild: true + peerDependencies: + typescript: '>= 4.4.x <= 5.1.x' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@mswjs/cookies': 0.2.2 + '@mswjs/interceptors': 0.17.7 + '@open-draft/until': 1.0.3 + '@types/cookie': 0.4.1 + '@types/js-levenshtein': 1.1.1 + chalk: 4.1.1 + chokidar: 3.5.3 + cookie: 0.4.2 + graphql: 16.6.0 + headers-polyfill: 3.1.2 + inquirer: 8.2.5 + is-node-process: 1.2.0 + js-levenshtein: 1.1.6 + node-fetch: 2.6.7 + outvariant: 1.4.0 + path-to-regexp: 6.2.1 + strict-event-emitter: 0.4.6 + type-fest: 2.19.0 + typescript: 5.1.3 + yargs: 17.7.1 + transitivePeerDependencies: + - encoding + - supports-color + dev: false + + /multi-buffer-data-view@5.0.8: + resolution: {integrity: sha512-12yXpvoXK35COAqzXKzAReXxl87aza1TQTJbhqHrT7p60xvu1D7l5wayGZ0UwsVek2FsaSIO03rJFzjW79EW9A==} + engines: {node: '>=16.1.0'} + dependencies: + '@babel/runtime': 7.22.6 + tslib: 2.6.1 + dev: false + + /mute-stream@0.0.8: + resolution: {integrity: sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==} + dev: false + + /nanoid@3.3.6: + resolution: {integrity: sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + /natural-compare-lite@1.4.0: + resolution: {integrity: sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==} + dev: false + + /natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + /needle@3.2.0: + resolution: {integrity: sha512-oUvzXnyLiVyVGoianLijF9O/RecZUf7TkBfimjGrLM4eQhXyeJwM6GeAWccwfQ9aa4gMCZKqhAOuLaMIcQxajQ==} + engines: {node: '>= 4.4.x'} + hasBin: true + requiresBuild: true + dependencies: + debug: 3.2.7 + iconv-lite: 0.6.3 + sax: 1.2.4 + transitivePeerDependencies: + - supports-color + optional: true + + /negotiator@0.6.3: + resolution: {integrity: sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==} + engines: {node: '>= 0.6'} + dev: true + + /neo-async@2.6.2: + resolution: {integrity: sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==} + dev: true + + /next-tick@1.1.0: + resolution: {integrity: sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==} + dev: false + + /nice-try@1.0.5: + resolution: {integrity: sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==} + dev: false + + /node-dir@0.1.17: + resolution: {integrity: sha512-tmPX422rYgofd4epzrNoOXiE8XFZYOcCq1vD7MAXCDO+O+zndlA2ztdKKMa+EeuBG5tHETpr4ml4RGgpqDCCAg==} + engines: {node: '>= 0.10.5'} + dependencies: + minimatch: 3.1.2 + dev: true + + /node-fetch-native@1.2.0: + resolution: {integrity: sha512-5IAMBTl9p6PaAjYCnMv5FmqIF6GcZnawAVnzaCG0rX2aYZJ4CxEkZNtVPuTRug7fL7wyM5BQYTlAzcyMPi6oTQ==} + dev: true + + /node-fetch@2.6.7: + resolution: {integrity: sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==} + engines: {node: 4.x || >=6.0.0} + peerDependencies: + encoding: ^0.1.0 + peerDependenciesMeta: + encoding: + optional: true + dependencies: + whatwg-url: 5.0.0 + + /node-gyp-build@4.6.0: + resolution: {integrity: sha512-NTZVKn9IylLwUzaKjkas1e4u2DLNcV4rdYagA4PWdPwW87Bi7z+BznyKSRwS/761tV/lzCGXplWsiaMjLqP2zQ==} + hasBin: true + + /node-html-parser@6.0.0: + resolution: {integrity: sha512-o4vS5Jm7ZdV5WN4/jHmCEVJOpm4exLCeXOcZnNzXi0BGv0AS8FsGwyQ4k0Ujmui1NMQs6qsTy+amjjpr9rmz4Q==} + dependencies: + css-select: 4.3.0 + he: 1.2.0 + dev: false + + /node-int64@0.4.0: + resolution: {integrity: sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==} + dev: true + + /node-releases@2.0.12: + resolution: {integrity: sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ==} + + /node-releases@2.0.2: + resolution: {integrity: sha512-XxYDdcQ6eKqp/YjI+tb2C5WM2LgjnZrfYg4vgQt49EK268b6gYCHsBLrK2qvJo4FmCtqmKezb0WZFK4fkrZNsg==} + dev: false + + /nopt@5.0.0: + resolution: {integrity: sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==} + engines: {node: '>=6'} + hasBin: true + dependencies: + abbrev: 1.1.1 + dev: false + + /normalize-package-data@2.5.0: + resolution: {integrity: sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==} + dependencies: + hosted-git-info: 2.8.9 + resolve: 1.22.1 + semver: 5.7.1 + validate-npm-package-license: 3.0.4 + + /normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + + /normalize-range@0.1.2: + resolution: {integrity: sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=} + engines: {node: '>=0.10.0'} + dev: false + + /npm-run-all@4.1.5: + resolution: {integrity: sha512-Oo82gJDAVcaMdi3nuoKFavkIHBRVqQ1qvMb+9LHk/cF4P6B2m8aP04hGf7oL6wZ9BuGwX1onlLhpuoofSyoQDQ==} + engines: {node: '>= 4'} + hasBin: true + dependencies: + ansi-styles: 3.2.1 + chalk: 2.4.2 + cross-spawn: 6.0.5 + memorystream: 0.3.1 + minimatch: 3.0.4 + pidtree: 0.3.1 + read-pkg: 3.0.0 + shell-quote: 1.7.3 + string.prototype.padend: 3.1.3 + dev: false + + /npm-run-path@4.0.1: + resolution: {integrity: sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==} + engines: {node: '>=8'} + dependencies: + path-key: 3.1.1 + dev: true + + /npmlog@5.0.1: + resolution: {integrity: sha512-AqZtDUWOMKs1G/8lwylVjrdYgqA4d9nu8hc+0gzRxlDb1I10+FHBGMXs6aiQHFdCUUlqH99MUMuLfzWDNDtfxw==} + dependencies: + are-we-there-yet: 2.0.0 + console-control-strings: 1.1.0 + gauge: 3.0.2 + set-blocking: 2.0.0 + + /nth-check@2.1.1: + resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==} + dependencies: + boolbase: 1.0.0 + dev: false + + /nwsapi@2.2.5: + resolution: {integrity: sha512-6xpotnECFy/og7tKSBVmUNft7J3jyXAka4XvG6AUhFWRz+Q/Ljus7znJAA3bxColfQLdS+XsjoodtJfCgeTEFQ==} + dev: false + + /object-assign@4.1.1: + resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} + engines: {node: '>=0.10.0'} + + /object-hash@3.0.0: + resolution: {integrity: sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==} + engines: {node: '>= 6'} + + /object-inspect@1.12.3: + resolution: {integrity: sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==} + + /object-is@1.1.5: + resolution: {integrity: sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.2.0 + dev: true + + /object-keys@1.1.1: + resolution: {integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==} + engines: {node: '>= 0.4'} + + /object.assign@4.1.4: + resolution: {integrity: sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.2.0 + has-symbols: 1.0.3 + object-keys: 1.1.1 + dev: false + + /on-finished@2.4.1: + resolution: {integrity: sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==} + engines: {node: '>= 0.8'} + dependencies: + ee-first: 1.1.1 + dev: true + + /on-headers@1.0.2: + resolution: {integrity: sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==} + engines: {node: '>= 0.8'} + dev: true + + /once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + dependencies: + wrappy: 1.0.2 + + /onetime@5.1.2: + resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} + engines: {node: '>=6'} + dependencies: + mimic-fn: 2.1.0 + + /open@7.4.2: + resolution: {integrity: sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==} + engines: {node: '>=8'} + dependencies: + is-docker: 2.2.1 + is-wsl: 2.2.0 + dev: true + + /open@8.4.2: + resolution: {integrity: sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==} + engines: {node: '>=12'} + dependencies: + define-lazy-prop: 2.0.0 + is-docker: 2.2.1 + is-wsl: 2.2.0 + dev: true + + /optionator@0.9.1: + resolution: {integrity: sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==} + engines: {node: '>= 0.8.0'} + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.3 + dev: true + + /optionator@0.9.3: + resolution: {integrity: sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==} + engines: {node: '>= 0.8.0'} + dependencies: + '@aashutoshrathi/word-wrap': 1.2.6 + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + dev: false + + /ora@5.4.1: + resolution: {integrity: sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==} + engines: {node: '>=10'} + dependencies: + bl: 4.1.0 + chalk: 4.1.2 + cli-cursor: 3.1.0 + cli-spinners: 2.7.0 + is-interactive: 1.0.0 + is-unicode-supported: 0.1.0 + log-symbols: 4.1.0 + strip-ansi: 6.0.1 + wcwidth: 1.0.1 + + /os-tmpdir@1.0.2: + resolution: {integrity: sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==} + engines: {node: '>=0.10.0'} + dev: false + + /outdent@0.5.0: + resolution: {integrity: sha512-/jHxFIzoMXdqPzTaCpFzAAWhpkSjZPF4Vsn6jAfNpmbH/ymsmd7Qc6VE9BGn0L6YMj6uwpQLxCECpus4ukKS9Q==} + dev: false + + /outvariant@1.4.0: + resolution: {integrity: sha512-AlWY719RF02ujitly7Kk/0QlV+pXGFDHrHf9O2OKqyqgBieaPOIeuSkL8sRK6j2WK+/ZAURq2kZsY0d8JapUiw==} + dev: false + + /p-filter@2.1.0: + resolution: {integrity: sha512-ZBxxZ5sL2HghephhpGAQdoskxplTwr7ICaehZwLIlfL6acuVgZPm8yBNuRAFBGEqtD/hmUeq9eqLg2ys9Xr/yw==} + engines: {node: '>=8'} + dependencies: + p-map: 2.1.0 + dev: false + + /p-limit@2.3.0: + resolution: {integrity: sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==} + engines: {node: '>=6'} + dependencies: + p-try: 2.2.0 + + /p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + dependencies: + yocto-queue: 0.1.0 + + /p-limit@4.0.0: + resolution: {integrity: sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dependencies: + yocto-queue: 1.0.0 + dev: false + + /p-locate@3.0.0: + resolution: {integrity: sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==} + engines: {node: '>=6'} + dependencies: + p-limit: 2.3.0 + dev: true + + /p-locate@4.1.0: + resolution: {integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==} + engines: {node: '>=8'} + dependencies: + p-limit: 2.3.0 + + /p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + dependencies: + p-limit: 3.1.0 + + /p-map@2.1.0: + resolution: {integrity: sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==} + engines: {node: '>=6'} + dev: false + + /p-map@4.0.0: + resolution: {integrity: sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==} + engines: {node: '>=10'} + dependencies: + aggregate-error: 3.1.0 + dev: true + + /p-try@2.2.0: + resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==} + engines: {node: '>=6'} + + /pako@0.2.9: + resolution: {integrity: sha512-NUcwaKxUxWrZLpDG+z/xZaCgQITkA/Dv4V/T6bw7VON6l1Xz/VnrBqrYjZQ12TamKHzITTfOEIYUj48y2KXImA==} + dev: true + + /parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + dependencies: + callsites: 3.1.0 + + /parse-json@4.0.0: + resolution: {integrity: sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=} + engines: {node: '>=4'} + dependencies: + error-ex: 1.3.2 + json-parse-better-errors: 1.0.2 + dev: false + + /parse-json@5.2.0: + resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} + engines: {node: '>=8'} + dependencies: + '@babel/code-frame': 7.16.7 + error-ex: 1.3.2 + json-parse-even-better-errors: 2.3.1 + lines-and-columns: 1.2.4 + + /parse-node-version@1.0.1: + resolution: {integrity: sha512-3YHlOa/JgH6Mnpr05jP9eDG254US9ek25LyIxZlDItp2iJtwyaXQb57lBYLdT3MowkUFYEV2XXNAYIPlESvJlA==} + engines: {node: '>= 0.10'} + + /parse5@7.1.2: + resolution: {integrity: sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==} + dependencies: + entities: 4.5.0 + dev: false + + /parseurl@1.3.3: + resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==} + engines: {node: '>= 0.8'} + dev: true + + /path-browserify@1.0.1: + resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==} + dev: false + + /path-exists@3.0.0: + resolution: {integrity: sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==} + engines: {node: '>=4'} + dev: true + + /path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + /path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + + /path-key@2.0.1: + resolution: {integrity: sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=} + engines: {node: '>=4'} + dev: false + + /path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + /path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + + /path-to-regexp@0.1.7: + resolution: {integrity: sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==} + dev: true + + /path-to-regexp@6.2.1: + resolution: {integrity: sha512-JLyh7xT1kizaEvcaXOQwOc2/Yhw6KZOvPf1S8401UyLk86CU79LN3vl7ztXGm/pZ+YjoyAJ4rxmHwbkBXJX+yw==} + dev: false + + /path-type@3.0.0: + resolution: {integrity: sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==} + engines: {node: '>=4'} + dependencies: + pify: 3.0.0 + dev: false + + /path-type@4.0.0: + resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} + engines: {node: '>=8'} + + /pathe@1.1.1: + resolution: {integrity: sha512-d+RQGp0MAYTIaDBIMmOfMwz3E+LOZnxx1HZd5R18mmCZY0QBlK0LDZfPc8FW8Ed2DlvsuE6PRjroDY+wg4+j/Q==} + + /pathval@1.1.1: + resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==} + dev: false + + /peek-stream@1.1.3: + resolution: {integrity: sha512-FhJ+YbOSBb9/rIl2ZeE/QHEsWn7PqNYt8ARAY3kIgNGOk13g9FGyIY6JIl/xB/3TFRVoTv5as0l11weORrTekA==} + dependencies: + buffer-from: 1.1.2 + duplexify: 3.7.1 + through2: 2.0.5 + dev: true + + /pend@1.2.0: + resolution: {integrity: sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==} + dev: true + + /periscopic@3.1.0: + resolution: {integrity: sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==} + dependencies: + '@types/estree': 1.0.0 + estree-walker: 3.0.3 + is-reference: 3.0.1 + + /picocolors@1.0.0: + resolution: {integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==} + + /picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + /pidtree@0.3.1: + resolution: {integrity: sha512-qQbW94hLHEqCg7nhby4yRC7G2+jYHY4Rguc2bjw7Uug4GIJuu1tvf2uHaZv5Q8zdt+WKJ6qK1FOI6amaWUo5FA==} + engines: {node: '>=0.10'} + hasBin: true + dev: false + + /pify@2.3.0: + resolution: {integrity: sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==} + engines: {node: '>=0.10.0'} + + /pify@3.0.0: + resolution: {integrity: sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==} + engines: {node: '>=4'} + dev: false + + /pify@4.0.1: + resolution: {integrity: sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==} + engines: {node: '>=6'} + + /pirates@4.0.6: + resolution: {integrity: sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==} + engines: {node: '>= 6'} + dev: true + + /pkg-dir@3.0.0: + resolution: {integrity: sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==} + engines: {node: '>=6'} + dependencies: + find-up: 3.0.0 + dev: true + + /pkg-dir@4.2.0: + resolution: {integrity: sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==} + engines: {node: '>=8'} + dependencies: + find-up: 4.1.0 + + /pkg-dir@5.0.0: + resolution: {integrity: sha512-NPE8TDbzl/3YQYY7CSS228s3g2ollTFnc+Qi3tqmqJp9Vg2ovUpixcJEo2HJScN2Ez+kEaal6y70c0ehqJBJeA==} + engines: {node: '>=10'} + dependencies: + find-up: 5.0.0 + dev: true + + /pkg-types@1.0.3: + resolution: {integrity: sha512-nN7pYi0AQqJnoLPC9eHFQ8AcyaixBUOwvqc5TDnIKCMEE6I0y8P7OKA7fPexsXGCGxQDl/cmrLAp26LhcwxZ4A==} + dependencies: + jsonc-parser: 3.2.0 + mlly: 1.4.0 + pathe: 1.1.1 + dev: false + + /playwright-core@1.37.1: + resolution: {integrity: sha512-17EuQxlSIYCmEMwzMqusJ2ztDgJePjrbttaefgdsiqeLWidjYz9BxXaTaZWxH1J95SHGk6tjE+dwgWILJoUZfA==} + engines: {node: '>=16'} + hasBin: true + dev: false + + /plotly.js-dist-min@2.11.1: + resolution: {integrity: sha512-F9WWNht0D3yBLZGHbLoJNfvplXvy+GUPSsA/lCbMuYd/UwzSu6Vmyprxlps9Einw1LDS1hYBrJeioK0lE3ieXA==} + dev: false + + /polished@4.2.2: + resolution: {integrity: sha512-Sz2Lkdxz6F2Pgnpi9U5Ng/WdWAUZxmHrNPoVlm3aAemxoy2Qy7LGjQg4uf8qKelDAUW94F4np3iH2YPf2qefcQ==} + engines: {node: '>=10'} + dependencies: + '@babel/runtime': 7.22.6 + dev: true + + /polka@1.0.0-next.22: + resolution: {integrity: sha512-a7tsZy5gFbJr0aUltZS97xCkbPglXuD67AMvTyZX7BTDBH384FWf0ZQF6rPvdutSxnO1vUlXM2zSLf5tCKk5RA==} + engines: {node: '>=8'} + dependencies: + '@polka/url': 1.0.0-next.21 + trouter: 3.2.0 + dev: false + + /pollen-css@4.6.1: + resolution: {integrity: sha512-w4doC2Nu2l16VyVhShcLqPOuIEuB/thGKxCwOZv9ef/kmoU4iwFMbdWJyjWMIp4A2+fRJ5E79nalpxMNvkEJvQ==} + hasBin: true + dependencies: + case: 1.6.3 + commander: 9.4.1 + css-vars-ponyfill: 2.4.8 + deepmerge: 4.2.2 + javascript-stringify: 2.1.0 + lilconfig: 2.0.6 + map-obj: 5.0.2 + prettier: 2.8.1 + dev: false + + /postcss-custom-media@10.0.0(postcss@8.4.27): + resolution: {integrity: sha512-NxDn7C6GJ7X8TsWOa8MbCdq9rLERRLcPfQSp856k1jzMreL8X9M6iWk35JjPRIb9IfRnVohmxAylDRx7n4Rv4g==} + engines: {node: ^14 || ^16 || >=18} + peerDependencies: + postcss: ^8.4 + dependencies: + '@csstools/cascade-layer-name-parser': 1.0.3(@csstools/css-parser-algorithms@2.3.0)(@csstools/css-tokenizer@2.1.1) + '@csstools/css-parser-algorithms': 2.3.0(@csstools/css-tokenizer@2.1.1) + '@csstools/css-tokenizer': 2.1.1 + '@csstools/media-query-list-parser': 2.1.2(@csstools/css-parser-algorithms@2.3.0)(@csstools/css-tokenizer@2.1.1) + postcss: 8.4.27 + dev: false + + /postcss-import@14.1.0(postcss@8.4.21): + resolution: {integrity: sha512-flwI+Vgm4SElObFVPpTIT7SU7R3qk2L7PyduMcokiaVKuWv9d/U+Gm/QAd8NDLuykTWTkcrjOeD2Pp1rMeBTGw==} + engines: {node: '>=10.0.0'} + peerDependencies: + postcss: ^8.0.0 + dependencies: + postcss: 8.4.21 + postcss-value-parser: 4.2.0 + read-cache: 1.0.0 + resolve: 1.22.1 + dev: true + + /postcss-import@14.1.0(postcss@8.4.27): + resolution: {integrity: sha512-flwI+Vgm4SElObFVPpTIT7SU7R3qk2L7PyduMcokiaVKuWv9d/U+Gm/QAd8NDLuykTWTkcrjOeD2Pp1rMeBTGw==} + engines: {node: '>=10.0.0'} + peerDependencies: + postcss: ^8.0.0 + dependencies: + postcss: 8.4.27 + postcss-value-parser: 4.2.0 + read-cache: 1.0.0 + resolve: 1.22.1 + + /postcss-js@4.0.0(postcss@8.4.21): + resolution: {integrity: sha512-77QESFBwgX4irogGVPgQ5s07vLvFqWr228qZY+w6lW599cRlK/HmnlivnnVUxkjHnCu4J16PDMHcH+e+2HbvTQ==} + engines: {node: ^12 || ^14 || >= 16} + peerDependencies: + postcss: ^8.3.3 + dependencies: + camelcase-css: 2.0.1 + postcss: 8.4.21 + dev: true + + /postcss-js@4.0.0(postcss@8.4.27): + resolution: {integrity: sha512-77QESFBwgX4irogGVPgQ5s07vLvFqWr228qZY+w6lW599cRlK/HmnlivnnVUxkjHnCu4J16PDMHcH+e+2HbvTQ==} + engines: {node: ^12 || ^14 || >= 16} + peerDependencies: + postcss: ^8.3.3 + dependencies: + camelcase-css: 2.0.1 + postcss: 8.4.27 + + /postcss-less@6.0.0(postcss@8.4.27): + resolution: {integrity: sha512-FPX16mQLyEjLzEuuJtxA8X3ejDLNGGEG503d2YGZR5Ask1SpDN8KmZUMpzCvyalWRywAn1n1VOA5dcqfCLo5rg==} + engines: {node: '>=12'} + peerDependencies: + postcss: ^8.3.5 + dependencies: + postcss: 8.4.27 + dev: false + + /postcss-load-config@3.1.4(postcss@8.4.21): + resolution: {integrity: sha512-6DiM4E7v4coTE4uzA8U//WhtPwyhiim3eyjEMFCnUpzbrkK9wJHgKDT2mR+HbtSrd/NubVaYTOpSpjUl8NQeRg==} + engines: {node: '>= 10'} + peerDependencies: + postcss: '>=8.0.9' + ts-node: '>=9.0.0' + peerDependenciesMeta: + postcss: + optional: true + ts-node: + optional: true + dependencies: + lilconfig: 2.0.6 + postcss: 8.4.21 + yaml: 1.10.2 + dev: true + + /postcss-load-config@3.1.4(postcss@8.4.27): + resolution: {integrity: sha512-6DiM4E7v4coTE4uzA8U//WhtPwyhiim3eyjEMFCnUpzbrkK9wJHgKDT2mR+HbtSrd/NubVaYTOpSpjUl8NQeRg==} + engines: {node: '>= 10'} + peerDependencies: + postcss: '>=8.0.9' + ts-node: '>=9.0.0' + peerDependenciesMeta: + postcss: + optional: true + ts-node: + optional: true + dependencies: + lilconfig: 2.0.6 + postcss: 8.4.27 + yaml: 1.10.2 + + /postcss-loader@7.3.3(postcss@8.4.27)(webpack@5.88.1): + resolution: {integrity: sha512-YgO/yhtevGO/vJePCQmTxiaEwER94LABZN0ZMT4A0vsak9TpO+RvKRs7EmJ8peIlB9xfXCsS7M8LjqncsUZ5HA==} + engines: {node: '>= 14.15.0'} + peerDependencies: + postcss: ^7.0.0 || ^8.0.1 + webpack: ^5.0.0 + dependencies: + cosmiconfig: 8.2.0 + jiti: 1.18.2 + postcss: 8.4.27 + semver: 7.4.0 + webpack: 5.88.1(esbuild@0.17.14) + dev: true + + /postcss-modules-extract-imports@3.0.0(postcss@8.4.27): + resolution: {integrity: sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==} + engines: {node: ^10 || ^12 || >= 14} + peerDependencies: + postcss: ^8.1.0 + dependencies: + postcss: 8.4.27 + dev: true + + /postcss-modules-local-by-default@4.0.3(postcss@8.4.27): + resolution: {integrity: sha512-2/u2zraspoACtrbFRnTijMiQtb4GW4BvatjaG/bCjYQo8kLTdevCUlwuBHx2sCnSyrI3x3qj4ZK1j5LQBgzmwA==} + engines: {node: ^10 || ^12 || >= 14} + peerDependencies: + postcss: ^8.1.0 + dependencies: + icss-utils: 5.1.0(postcss@8.4.27) + postcss: 8.4.27 + postcss-selector-parser: 6.0.13 + postcss-value-parser: 4.2.0 + dev: true + + /postcss-modules-scope@3.0.0(postcss@8.4.27): + resolution: {integrity: sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg==} + engines: {node: ^10 || ^12 || >= 14} + peerDependencies: + postcss: ^8.1.0 + dependencies: + postcss: 8.4.27 + postcss-selector-parser: 6.0.13 + dev: true + + /postcss-modules-values@4.0.0(postcss@8.4.27): + resolution: {integrity: sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==} + engines: {node: ^10 || ^12 || >= 14} + peerDependencies: + postcss: ^8.1.0 + dependencies: + icss-utils: 5.1.0(postcss@8.4.27) + postcss: 8.4.27 + dev: true + + /postcss-nested@5.0.6(postcss@8.4.21): + resolution: {integrity: sha512-rKqm2Fk0KbA8Vt3AdGN0FB9OBOMDVajMG6ZCf/GoHgdxUJ4sBFp0A/uMIRm+MJUdo33YXEtjqIz8u7DAp8B7DA==} + engines: {node: '>=12.0'} + peerDependencies: + postcss: ^8.2.14 + dependencies: + postcss: 8.4.21 + postcss-selector-parser: 6.0.9 + dev: true + + /postcss-nested@5.0.6(postcss@8.4.27): + resolution: {integrity: sha512-rKqm2Fk0KbA8Vt3AdGN0FB9OBOMDVajMG6ZCf/GoHgdxUJ4sBFp0A/uMIRm+MJUdo33YXEtjqIz8u7DAp8B7DA==} + engines: {node: '>=12.0'} + peerDependencies: + postcss: ^8.2.14 + dependencies: + postcss: 8.4.27 + postcss-selector-parser: 6.0.9 + + /postcss-prefix-selector@1.16.0(postcss@8.4.21): + resolution: {integrity: sha512-rdVMIi7Q4B0XbXqNUEI+Z4E+pueiu/CS5E6vRCQommzdQ/sgsS4dK42U7GX8oJR+TJOtT+Qv3GkNo6iijUMp3Q==} + peerDependencies: + postcss: '>4 <9' + dependencies: + postcss: 8.4.21 + dev: false + + /postcss-prefix-selector@1.16.0(postcss@8.4.27): + resolution: {integrity: sha512-rdVMIi7Q4B0XbXqNUEI+Z4E+pueiu/CS5E6vRCQommzdQ/sgsS4dK42U7GX8oJR+TJOtT+Qv3GkNo6iijUMp3Q==} + peerDependencies: + postcss: '>4 <9' + dependencies: + postcss: 8.4.27 + dev: false + + /postcss-safe-parser@6.0.0(postcss@8.4.27): + resolution: {integrity: sha512-FARHN8pwH+WiS2OPCxJI8FuRJpTVnn6ZNFiqAM2aeW2LwTHWWmWgIyKC6cUo0L8aeKiF/14MNvnpls6R2PBeMQ==} + engines: {node: '>=12.0'} + peerDependencies: + postcss: ^8.3.3 + dependencies: + postcss: 8.4.27 + dev: false + + /postcss-scss@4.0.6(postcss@8.4.27): + resolution: {integrity: sha512-rLDPhJY4z/i4nVFZ27j9GqLxj1pwxE80eAzUNRMXtcpipFYIeowerzBgG3yJhMtObGEXidtIgbUpQ3eLDsf5OQ==} + engines: {node: '>=12.0'} + peerDependencies: + postcss: ^8.4.19 + dependencies: + postcss: 8.4.27 + dev: false + + /postcss-selector-parser@6.0.13: + resolution: {integrity: sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==} + engines: {node: '>=4'} + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + + /postcss-selector-parser@6.0.9: + resolution: {integrity: sha512-UO3SgnZOVTwu4kyLR22UQ1xZh086RyNZppb7lLAKBFK8a32ttG5i87Y/P3+2bRSjZNyJ1B7hfFNo273tKe9YxQ==} + engines: {node: '>=4'} + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + + /postcss-value-parser@4.2.0: + resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==} + + /postcss@8.4.21: + resolution: {integrity: sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==} + engines: {node: ^10 || ^12 || >=14} + dependencies: + nanoid: 3.3.6 + picocolors: 1.0.0 + source-map-js: 1.0.2 + + /postcss@8.4.23: + resolution: {integrity: sha512-bQ3qMcpF6A/YjR55xtoTr0jGOlnPOKAIMdOWiv0EIT6HVPEaJiJB4NLljSbiHoC2RX7DN5Uvjtpbg1NPdwv1oA==} + engines: {node: ^10 || ^12 || >=14} + dependencies: + nanoid: 3.3.6 + picocolors: 1.0.0 + source-map-js: 1.0.2 + + /postcss@8.4.27: + resolution: {integrity: sha512-gY/ACJtJPSmUFPDCHtX78+01fHa64FaU4zaaWfuh1MhGJISufJAH4cun6k/8fwsHYeK4UQmENQK+tRLCFJE8JQ==} + engines: {node: ^10 || ^12 || >=14} + dependencies: + nanoid: 3.3.6 + picocolors: 1.0.0 + source-map-js: 1.0.2 + + /preferred-pm@3.0.3: + resolution: {integrity: sha512-+wZgbxNES/KlJs9q40F/1sfOd/j7f1O9JaHcW5Dsn3aUUOZg3L2bjpVUcKV2jvtElYfoTuQiNeMfQJ4kwUAhCQ==} + engines: {node: '>=10'} + dependencies: + find-up: 5.0.0 + find-yarn-workspace-root2: 1.2.16 + path-exists: 4.0.0 + which-pm: 2.0.0 + dev: false + + /prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + + /prettier-plugin-css-order@2.0.0(postcss@8.4.27)(prettier@3.0.0): + resolution: {integrity: sha512-BCstZZ78G6FH/Ms1hl5ZLnSMso3ModM+SEZY8QrJzgyxjuCjSrS2bveeLq9wJMn20h9tAb6+C2FZPh5qdZ2qsw==} + engines: {node: '>=16'} + peerDependencies: + prettier: 3.x + dependencies: + css-declaration-sorter: 7.0.0(postcss@8.4.27) + postcss-less: 6.0.0(postcss@8.4.27) + postcss-scss: 4.0.6(postcss@8.4.27) + prettier: 3.0.0 + transitivePeerDependencies: + - postcss + dev: false + + /prettier-plugin-svelte@3.0.0(prettier@3.0.0)(svelte@3.57.0): + resolution: {integrity: sha512-l3RQcPty2UBCoRh3yb9c5XCAmxkrc4BptAnbd5acO1gmSJtChOWkiEjnOvh7hvmtT4V80S8gXCOKAq8RNeIzSw==} + peerDependencies: + prettier: ^3.0.0 + svelte: ^3.2.0 || ^4.0.0-next.0 + dependencies: + prettier: 3.0.0 + svelte: 3.57.0 + dev: true + + /prettier-plugin-svelte@3.0.0(prettier@3.0.0)(svelte@4.0.0): + resolution: {integrity: sha512-l3RQcPty2UBCoRh3yb9c5XCAmxkrc4BptAnbd5acO1gmSJtChOWkiEjnOvh7hvmtT4V80S8gXCOKAq8RNeIzSw==} + peerDependencies: + prettier: ^3.0.0 + svelte: ^3.2.0 || ^4.0.0-next.0 + dependencies: + prettier: 3.0.0 + svelte: 4.0.0 + dev: false + + /prettier@2.8.1: + resolution: {integrity: sha512-lqGoSJBQNJidqCHE80vqZJHWHRFoNYsSpP9AjFhlhi9ODCJA541svILes/+/1GM3VaL/abZi7cpFzOpdR9UPKg==} + engines: {node: '>=10.13.0'} + hasBin: true + + /prettier@3.0.0: + resolution: {integrity: sha512-zBf5eHpwHOGPC47h0zrPyNn+eAEIdEzfywMoYn2XPi0P44Zp0tSq64rq0xAREh4auw2cJZHo9QUob+NqCQky4g==} + engines: {node: '>=14'} + hasBin: true + + /pretty-format@27.5.1: + resolution: {integrity: sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==} + engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} + dependencies: + ansi-regex: 5.0.1 + ansi-styles: 5.2.0 + react-is: 17.0.2 + + /pretty-format@29.5.0: + resolution: {integrity: sha512-V2mGkI31qdttvTFX7Mt4efOqHXqJWMu4/r66Xh3Z3BwZaPfPJgp6/gbwoujRpPUtfEF6AUUWx3Jim3GCw5g/Qw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/schemas': 29.4.3 + ansi-styles: 5.2.0 + react-is: 18.2.0 + dev: false + + /pretty-hrtime@1.0.3: + resolution: {integrity: sha512-66hKPCr+72mlfiSjlEB1+45IjXSqvVAIy6mocupoww4tBFE9R9IhwwUGoI4G++Tc9Aq+2rxOt0RFU6gPcrte0A==} + engines: {node: '>= 0.8'} + dev: true + + /prism-svelte@0.4.7: + resolution: {integrity: sha512-yABh19CYbM24V7aS7TuPYRNMqthxwbvx6FF/Rw920YbyBWO3tnyPIqRMgHuSVsLmuHkkBS1Akyof463FVdkeDQ==} + dev: false + + /prismjs@1.29.0: + resolution: {integrity: sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==} + engines: {node: '>=6'} + + /process-nextick-args@2.0.1: + resolution: {integrity: sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==} + dev: true + + /process@0.11.10: + resolution: {integrity: sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==} + engines: {node: '>= 0.6.0'} + dev: true + + /progress@2.0.3: + resolution: {integrity: sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==} + engines: {node: '>=0.4.0'} + dev: true + + /prompts@2.4.2: + resolution: {integrity: sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==} + engines: {node: '>= 6'} + dependencies: + kleur: 3.0.3 + sisteransi: 1.0.5 + dev: true + + /prop-types@15.8.1: + resolution: {integrity: sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==} + dependencies: + loose-envify: 1.4.0 + object-assign: 4.1.1 + react-is: 16.13.1 + dev: true + + /proxy-addr@2.0.7: + resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==} + engines: {node: '>= 0.10'} + dependencies: + forwarded: 0.2.0 + ipaddr.js: 1.9.1 + dev: true + + /proxy-from-env@1.1.0: + resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + dev: true + + /prr@1.0.1: + resolution: {integrity: sha512-yPw4Sng1gWghHQWj0B3ZggWUm4qVbPwPFcRG8KyxiU7J2OHFSoEHKS+EZ3fv5l1t9CyCiop6l/ZYeWbrgoQejw==} + requiresBuild: true + optional: true + + /pseudomap@1.0.2: + resolution: {integrity: sha512-b/YwNhb8lk1Zz2+bXXpS/LK9OisiZZ1SNsSLxN1x2OXVEhW2Ckr/7mWE5vrC1ZTiJlD9g19jWszTmJsB+oEpFQ==} + dev: false + + /psl@1.9.0: + resolution: {integrity: sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==} + dev: false + + /pump@2.0.1: + resolution: {integrity: sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==} + dependencies: + end-of-stream: 1.4.4 + once: 1.4.0 + dev: true + + /pump@3.0.0: + resolution: {integrity: sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==} + dependencies: + end-of-stream: 1.4.4 + once: 1.4.0 + dev: true + + /pumpify@1.5.1: + resolution: {integrity: sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==} + dependencies: + duplexify: 3.7.1 + inherits: 2.0.4 + pump: 2.0.1 + dev: true + + /punycode@2.3.0: + resolution: {integrity: sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==} + engines: {node: '>=6'} + + /puppeteer-core@2.1.1: + resolution: {integrity: sha512-n13AWriBMPYxnpbb6bnaY5YoY6rGj8vPLrz6CZF3o0qJNEwlcfJVxBzYZ0NJsQ21UbdJoijPCDrM++SUVEz7+w==} + engines: {node: '>=8.16.0'} + dependencies: + '@types/mime-types': 2.1.1 + debug: 4.3.4 + extract-zip: 1.7.0 + https-proxy-agent: 4.0.0 + mime: 2.6.0 + mime-types: 2.1.34 + progress: 2.0.3 + proxy-from-env: 1.1.0 + rimraf: 2.7.1 + ws: 6.2.2 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /pyodide@0.24.0: + resolution: {integrity: sha512-k2TzIbAYQ/ucme0LYv4KmxKDR15m68/3pvPKmUVtdpzn6K9Qt1NLHeZI1RErWJE6PtlQI8UA0Q21wrtu3XPUMg==} + dependencies: + base-64: 1.0.0 + ws: 8.13.0(bufferutil@4.0.7) + transitivePeerDependencies: + - bufferutil + - utf-8-validate + dev: true + + /qs@6.11.0: + resolution: {integrity: sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==} + engines: {node: '>=0.6'} + dependencies: + side-channel: 1.0.4 + dev: true + + /qs@6.11.2: + resolution: {integrity: sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==} + engines: {node: '>=0.6'} + dependencies: + side-channel: 1.0.4 + dev: true + + /querystringify@2.2.0: + resolution: {integrity: sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==} + dev: false + + /queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + + /quick-lru@4.0.1: + resolution: {integrity: sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==} + engines: {node: '>=8'} + dev: false + + /quick-lru@5.1.1: + resolution: {integrity: sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==} + engines: {node: '>=10'} + + /ramda@0.29.0: + resolution: {integrity: sha512-BBea6L67bYLtdbOqfp8f58fPMqEwx0doL+pAi8TZyp2YWz8R9G8z9x75CZI8W+ftqhFHCpEX2cRnUUXK130iKA==} + dev: true + + /randombytes@2.1.0: + resolution: {integrity: sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==} + dependencies: + safe-buffer: 5.2.1 + dev: true + + /range-parser@1.2.1: + resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==} + engines: {node: '>= 0.6'} + dev: true + + /raw-body@2.5.1: + resolution: {integrity: sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==} + engines: {node: '>= 0.8'} + dependencies: + bytes: 3.1.2 + http-errors: 2.0.0 + iconv-lite: 0.4.24 + unpipe: 1.0.0 + dev: true + + /react-colorful@5.6.1(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-1exovf0uGTGyq5mXQT0zgQ80uvj2PCwvF8zY1RN9/vbJVSjSo3fsB/4L3ObbF7u70NduSiK4xu4Y6q1MHoUGEw==} + peerDependencies: + react: '>=16.8.0' + react-dom: '>=16.8.0' + dependencies: + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /react-dom@18.2.0(react@18.2.0): + resolution: {integrity: sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==} + peerDependencies: + react: ^18.2.0 + dependencies: + loose-envify: 1.4.0 + react: 18.2.0 + scheduler: 0.23.0 + dev: true + + /react-inspector@6.0.2(react@18.2.0): + resolution: {integrity: sha512-x+b7LxhmHXjHoU/VrFAzw5iutsILRoYyDq97EDYdFpPLcvqtEzk4ZSZSQjnFPbr5T57tLXnHcqFYoN1pI6u8uQ==} + peerDependencies: + react: ^16.8.4 || ^17.0.0 || ^18.0.0 + dependencies: + react: 18.2.0 + dev: true + + /react-is@16.13.1: + resolution: {integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==} + dev: true + + /react-is@17.0.2: + resolution: {integrity: sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==} + + /react-is@18.2.0: + resolution: {integrity: sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==} + dev: false + + /react-resize-detector@7.1.2(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-zXnPJ2m8+6oq9Nn8zsep/orts9vQv3elrpA+R8XTcW7DVVUJ9vwDwMXaBtykAYjMnkCIaOoK9vObyR7ZgFNlOw==} + peerDependencies: + react: ^16.0.0 || ^17.0.0 || ^18.0.0 + react-dom: ^16.0.0 || ^17.0.0 || ^18.0.0 + dependencies: + lodash: 4.17.21 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /react@18.2.0: + resolution: {integrity: sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==} + engines: {node: '>=0.10.0'} + dependencies: + loose-envify: 1.4.0 + dev: true + + /read-cache@1.0.0: + resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==} + dependencies: + pify: 2.3.0 + + /read-pkg-up@7.0.1: + resolution: {integrity: sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==} + engines: {node: '>=8'} + dependencies: + find-up: 4.1.0 + read-pkg: 5.2.0 + type-fest: 0.8.1 + + /read-pkg@3.0.0: + resolution: {integrity: sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k=} + engines: {node: '>=4'} + dependencies: + load-json-file: 4.0.0 + normalize-package-data: 2.5.0 + path-type: 3.0.0 + dev: false + + /read-pkg@5.2.0: + resolution: {integrity: sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==} + engines: {node: '>=8'} + dependencies: + '@types/normalize-package-data': 2.4.1 + normalize-package-data: 2.5.0 + parse-json: 5.2.0 + type-fest: 0.6.0 + + /read-yaml-file@1.1.0: + resolution: {integrity: sha512-VIMnQi/Z4HT2Fxuwg5KrY174U1VdUIASQVWXXyqtNRtxSr9IYkn1rsI6Tb6HsrHCmB7gVpNwX6JxPTHcH6IoTA==} + engines: {node: '>=6'} + dependencies: + graceful-fs: 4.2.9 + js-yaml: 3.14.1 + pify: 4.0.1 + strip-bom: 3.0.0 + dev: false + + /readable-stream@2.3.8: + resolution: {integrity: sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==} + dependencies: + core-util-is: 1.0.3 + inherits: 2.0.4 + isarray: 1.0.0 + process-nextick-args: 2.0.1 + safe-buffer: 5.1.2 + string_decoder: 1.1.1 + util-deprecate: 1.0.2 + dev: true + + /readable-stream@3.6.0: + resolution: {integrity: sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==} + engines: {node: '>= 6'} + dependencies: + inherits: 2.0.4 + string_decoder: 1.1.1 + util-deprecate: 1.0.2 + + /readdirp@3.6.0: + resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} + engines: {node: '>=8.10.0'} + dependencies: + picomatch: 2.3.1 + + /recast@0.21.5: + resolution: {integrity: sha512-hjMmLaUXAm1hIuTqOdeYObMslq/q+Xff6QE3Y2P+uoHAg2nmVlLBps2hzh1UJDdMtDTMXOFewK6ky51JQIeECg==} + engines: {node: '>= 4'} + dependencies: + ast-types: 0.15.2 + esprima: 4.0.1 + source-map: 0.6.1 + tslib: 2.6.1 + dev: true + + /recast@0.23.2: + resolution: {integrity: sha512-Qv6cPfVZyMOtPszK6PgW70pUgm7gPlFitAPf0Q69rlOA0zLw2XdDcNmPbVGYicFGT9O8I7TZ/0ryJD+6COvIPw==} + engines: {node: '>= 4'} + dependencies: + assert: 2.0.0 + ast-types: 0.16.1 + esprima: 4.0.1 + source-map: 0.6.1 + tslib: 2.6.1 + dev: true + + /rechoir@0.6.2: + resolution: {integrity: sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==} + engines: {node: '>= 0.10'} + dependencies: + resolve: 1.22.1 + dev: true + + /recorder-audio-worklet-processor@5.0.8: + resolution: {integrity: sha512-TBN5M+ZFG4F573sJKgYsvlBnhQGV0lxOBRTEP+gkezaArZK8EBpN1QgmcldWY4Ay3ZNJnLLDWDWdhoi0A+mzWQ==} + dependencies: + '@babel/runtime': 7.22.6 + tslib: 2.6.1 + dev: false + + /recorder-audio-worklet@6.0.13: + resolution: {integrity: sha512-wT1ZwG1C5BAbGiKOEOAyvPjxb1zMxnxa6B3NaTwY0Jq5yjU8koqVCH7VO0YhYeVGtWFTSS916CuyyoRXTOVZkQ==} + dependencies: + '@babel/runtime': 7.22.6 + broker-factory: 3.0.84 + fast-unique-numbers: 8.0.7 + recorder-audio-worklet-processor: 5.0.8 + standardized-audio-context: 25.3.55 + subscribable-things: 2.1.23 + tslib: 2.6.1 + worker-factory: 7.0.9 + dev: false + + /redent@3.0.0: + resolution: {integrity: sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==} + engines: {node: '>=8'} + dependencies: + indent-string: 4.0.0 + strip-indent: 3.0.0 + dev: false + + /regenerate-unicode-properties@10.1.0: + resolution: {integrity: sha512-d1VudCLoIGitcU/hEg2QqvyGZQmdC0Lf8BqdOMXGFSvJP4bNV1+XqbPQeHHLD51Jh4QJJ225dlIFvY4Ly6MXmQ==} + engines: {node: '>=4'} + dependencies: + regenerate: 1.4.2 + dev: true + + /regenerate@1.4.2: + resolution: {integrity: sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==} + dev: true + + /regenerator-runtime@0.13.11: + resolution: {integrity: sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==} + + /regenerator-transform@0.15.1: + resolution: {integrity: sha512-knzmNAcuyxV+gQCufkYcvOqX/qIIfHLv0u5x79kRxuGojfYVky1f15TzZEu2Avte8QGepvUNTnLskf8E6X6Vyg==} + dependencies: + '@babel/runtime': 7.22.6 + dev: true + + /regex-parser@2.2.11: + resolution: {integrity: sha512-jbD/FT0+9MBU2XAZluI7w2OBs1RBi6p9M83nkoZayQXXU9e8Robt69FcZc7wU4eJD/YFTjn1JdCk3rbMJajz8Q==} + dev: true + + /regexp.prototype.flags@1.5.0: + resolution: {integrity: sha512-0SutC3pNudRKgquxGoRGIz946MZVHqbNfPjBdxeOhBrdgDKlRoXmYLQN9xRbrR09ZXWeGAdPuif7egofn6v5LA==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.2.0 + functions-have-names: 1.2.3 + dev: false + + /regexparam@1.3.0: + resolution: {integrity: sha512-6IQpFBv6e5vz1QAqI+V4k8P2e/3gRrqfCJ9FI+O1FLQTO+Uz6RXZEZOPmTJ6hlGj7gkERzY5BRCv09whKP96/g==} + engines: {node: '>=6'} + dev: false + + /regexpp@3.2.0: + resolution: {integrity: sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==} + engines: {node: '>=8'} + dev: true + + /regexpu-core@5.3.2: + resolution: {integrity: sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==} + engines: {node: '>=4'} + dependencies: + '@babel/regjsgen': 0.8.0 + regenerate: 1.4.2 + regenerate-unicode-properties: 10.1.0 + regjsparser: 0.9.1 + unicode-match-property-ecmascript: 2.0.0 + unicode-match-property-value-ecmascript: 2.1.0 + dev: true + + /regjsparser@0.9.1: + resolution: {integrity: sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==} + hasBin: true + dependencies: + jsesc: 0.5.0 + dev: true + + /remark-external-links@8.0.0: + resolution: {integrity: sha512-5vPSX0kHoSsqtdftSHhIYofVINC8qmp0nctkeU9YoJwV3YfiBRiI6cbFRJ0oI/1F9xS+bopXG0m2KS8VFscuKA==} + dependencies: + extend: 3.0.2 + is-absolute-url: 3.0.3 + mdast-util-definitions: 4.0.0 + space-separated-tokens: 1.1.5 + unist-util-visit: 2.0.3 + dev: true + + /remark-slug@6.1.0: + resolution: {integrity: sha512-oGCxDF9deA8phWvxFuyr3oSJsdyUAxMFbA0mZ7Y1Sas+emILtO+e5WutF9564gDsEN4IXaQXm5pFo6MLH+YmwQ==} + dependencies: + github-slugger: 1.5.0 + mdast-util-to-string: 1.1.0 + unist-util-visit: 2.0.3 + dev: true + + /require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + dev: false + + /require-main-filename@2.0.0: + resolution: {integrity: sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==} + dev: false + + /requires-port@1.0.0: + resolution: {integrity: sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==} + dev: false + + /resize-observer-polyfill@1.5.1: + resolution: {integrity: sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==} + dev: false + + /resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + + /resolve-from@5.0.0: + resolution: {integrity: sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==} + engines: {node: '>=8'} + + /resolve-url-loader@5.0.0: + resolution: {integrity: sha512-uZtduh8/8srhBoMx//5bwqjQ+rfYOUq8zC9NrMUGtjBiGTtFJM42s58/36+hTqeqINcnYe08Nj3LkK9lW4N8Xg==} + engines: {node: '>=12'} + dependencies: + adjust-sourcemap-loader: 4.0.0 + convert-source-map: 1.9.0 + loader-utils: 2.0.4 + postcss: 8.4.27 + source-map: 0.6.1 + dev: true + + /resolve@1.22.1: + resolution: {integrity: sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==} + hasBin: true + dependencies: + is-core-module: 2.9.0 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + + /restore-cursor@3.1.0: + resolution: {integrity: sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==} + engines: {node: '>=8'} + dependencies: + onetime: 5.1.2 + signal-exit: 3.0.7 + + /reusify@1.0.4: + resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + + /rimraf@2.6.3: + resolution: {integrity: sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==} + hasBin: true + dependencies: + glob: 7.2.0 + dev: true + + /rimraf@2.7.1: + resolution: {integrity: sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==} + hasBin: true + dependencies: + glob: 7.2.0 + + /rimraf@3.0.2: + resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} + hasBin: true + dependencies: + glob: 7.2.0 + + /robust-predicates@3.0.1: + resolution: {integrity: sha512-ndEIpszUHiG4HtDsQLeIuMvRsDnn8c8rYStabochtUeCvfuvNptb5TUbVD68LRAILPX7p9nqQGh4xJgn3EHS/g==} + dev: false + + /rollup@3.21.6: + resolution: {integrity: sha512-SXIICxvxQxR3D4dp/3LDHZIJPC8a4anKMHd4E3Jiz2/JnY+2bEjqrOokAauc5ShGVNFHlEFjBXAXlaxkJqIqSg==} + engines: {node: '>=14.18.0', npm: '>=8.0.0'} + hasBin: true + optionalDependencies: + fsevents: 2.3.2 + + /rrweb-cssom@0.6.0: + resolution: {integrity: sha512-APM0Gt1KoXBz0iIkkdB/kfvGOwC4UuJFeG/c+yV7wSc7q96cG/kJ0HiYCnzivD9SB53cLV1MlHFNfOuPaadYSw==} + dev: false + + /run-async@2.4.1: + resolution: {integrity: sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==} + engines: {node: '>=0.12.0'} + dev: false + + /run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + dependencies: + queue-microtask: 1.2.3 + + /rw@1.3.3: + resolution: {integrity: sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==} + dev: false + + /rxjs-interop@2.0.0: + resolution: {integrity: sha512-ASEq9atUw7lualXB+knvgtvwkCEvGWV2gDD/8qnASzBkzEARZck9JAyxmY8OS6Nc1pCPEgDTKNcx+YqqYfzArw==} + dev: false + + /rxjs@7.8.0: + resolution: {integrity: sha512-F2+gxDshqmIub1KdvZkaEfGDwLNpPvk9Fs6LD/MyQxNgMds/WH9OdDDXOmxUZpME+iSK3rQCctkL0DYyytUqMg==} + dependencies: + tslib: 2.6.1 + dev: false + + /sade@1.8.1: + resolution: {integrity: sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==} + engines: {node: '>=6'} + dependencies: + mri: 1.2.0 + + /safe-buffer@5.1.1: + resolution: {integrity: sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==} + dev: true + + /safe-buffer@5.1.2: + resolution: {integrity: sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==} + + /safe-buffer@5.2.1: + resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} + dev: true + + /safe-regex-test@1.0.0: + resolution: {integrity: sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==} + dependencies: + call-bind: 1.0.2 + get-intrinsic: 1.2.0 + is-regex: 1.1.4 + dev: false + + /safer-buffer@2.1.2: + resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} + + /sander@0.5.1: + resolution: {integrity: sha512-3lVqBir7WuKDHGrKRDn/1Ye3kwpXaDOMsiRP1wd6wpZW56gJhsbp5RqQpA6JG/P+pkXizygnr1dKR8vzWaVsfA==} + dependencies: + es6-promise: 3.3.1 + graceful-fs: 4.2.9 + mkdirp: 0.5.5 + rimraf: 2.7.1 + + /sass-loader@13.3.2(webpack@5.88.1): + resolution: {integrity: sha512-CQbKl57kdEv+KDLquhC+gE3pXt74LEAzm+tzywcA0/aHZuub8wTErbjAoNI57rPUWRYRNC5WUnNl8eGJNbDdwg==} + engines: {node: '>= 14.15.0'} + peerDependencies: + fibers: '>= 3.1.0' + node-sass: ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0 || ^9.0.0 + sass: ^1.3.0 + sass-embedded: '*' + webpack: ^5.0.0 + peerDependenciesMeta: + fibers: + optional: true + node-sass: + optional: true + sass: + optional: true + sass-embedded: + optional: true + dependencies: + neo-async: 2.6.2 + webpack: 5.88.1(esbuild@0.17.14) + dev: true + + /sax@1.2.4: + resolution: {integrity: sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==} + requiresBuild: true + optional: true + + /saxes@6.0.0: + resolution: {integrity: sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==} + engines: {node: '>=v12.22.7'} + dependencies: + xmlchars: 2.2.0 + dev: false + + /scheduler@0.23.0: + resolution: {integrity: sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==} + dependencies: + loose-envify: 1.4.0 + dev: true + + /schema-utils@3.3.0: + resolution: {integrity: sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==} + engines: {node: '>= 10.13.0'} + dependencies: + '@types/json-schema': 7.0.12 + ajv: 6.12.6 + ajv-keywords: 3.5.2(ajv@6.12.6) + dev: true + + /semiver@1.1.0: + resolution: {integrity: sha512-QNI2ChmuioGC1/xjyYwyZYADILWyW6AmS1UH6gDj/SFUUUS4MBAWs/7mxnkRPc/F4iHezDP+O8t0dO8WHiEOdg==} + engines: {node: '>=6'} + dev: false + + /semver@5.7.1: + resolution: {integrity: sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==} + hasBin: true + + /semver@6.3.0: + resolution: {integrity: sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==} + hasBin: true + + /semver@7.0.0: + resolution: {integrity: sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A==} + hasBin: true + dev: true + + /semver@7.4.0: + resolution: {integrity: sha512-RgOxM8Mw+7Zus0+zcLEUn8+JfoLpj/huFTItQy2hsM4khuC1HYRDp0cU482Ewn/Fcy6bCjufD8vAj7voC66KQw==} + engines: {node: '>=10'} + hasBin: true + dependencies: + lru-cache: 6.0.0 + + /semver@7.5.4: + resolution: {integrity: sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==} + engines: {node: '>=10'} + hasBin: true + dependencies: + lru-cache: 6.0.0 + dev: false + + /send@0.18.0: + resolution: {integrity: sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==} + engines: {node: '>= 0.8.0'} + dependencies: + debug: 2.6.9 + depd: 2.0.0 + destroy: 1.2.0 + encodeurl: 1.0.2 + escape-html: 1.0.3 + etag: 1.8.1 + fresh: 0.5.2 + http-errors: 2.0.0 + mime: 1.6.0 + ms: 2.1.3 + on-finished: 2.4.1 + range-parser: 1.2.1 + statuses: 2.0.1 + transitivePeerDependencies: + - supports-color + dev: true + + /serialize-javascript@6.0.1: + resolution: {integrity: sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==} + dependencies: + randombytes: 2.1.0 + dev: true + + /serve-favicon@2.5.0: + resolution: {integrity: sha512-FMW2RvqNr03x+C0WxTyu6sOv21oOjkq5j8tjquWccwa6ScNyGFOGJVpuS1NmTVGBAHS07xnSKotgf2ehQmf9iA==} + engines: {node: '>= 0.8.0'} + dependencies: + etag: 1.8.1 + fresh: 0.5.2 + ms: 2.1.1 + parseurl: 1.3.3 + safe-buffer: 5.1.1 + dev: true + + /serve-static@1.15.0: + resolution: {integrity: sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==} + engines: {node: '>= 0.8.0'} + dependencies: + encodeurl: 1.0.2 + escape-html: 1.0.3 + parseurl: 1.3.3 + send: 0.18.0 + transitivePeerDependencies: + - supports-color + dev: true + + /set-blocking@2.0.0: + resolution: {integrity: sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==} + + /set-cookie-parser@2.6.0: + resolution: {integrity: sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==} + + /setprototypeof@1.2.0: + resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} + dev: true + + /shallow-clone@3.0.1: + resolution: {integrity: sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==} + engines: {node: '>=8'} + dependencies: + kind-of: 6.0.3 + dev: true + + /shebang-command@1.2.0: + resolution: {integrity: sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=} + engines: {node: '>=0.10.0'} + dependencies: + shebang-regex: 1.0.0 + dev: false + + /shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + dependencies: + shebang-regex: 3.0.0 + + /shebang-regex@1.0.0: + resolution: {integrity: sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=} + engines: {node: '>=0.10.0'} + dev: false + + /shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + /shell-quote@1.7.3: + resolution: {integrity: sha512-Vpfqwm4EnqGdlsBFNmHhxhElJYrdfcxPThu+ryKS5J8L/fhAwLazFZtq+S+TWZ9ANj2piSQLGj6NQg+lKPmxrw==} + dev: false + + /shelljs@0.8.5: + resolution: {integrity: sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==} + engines: {node: '>=4'} + hasBin: true + dependencies: + glob: 7.2.0 + interpret: 1.4.0 + rechoir: 0.6.2 + dev: true + + /side-channel@1.0.4: + resolution: {integrity: sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==} + dependencies: + call-bind: 1.0.2 + get-intrinsic: 1.2.0 + object-inspect: 1.12.3 + + /siginfo@2.0.0: + resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} + dev: false + + /signal-exit@3.0.7: + resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} + + /simple-update-notifier@1.1.0: + resolution: {integrity: sha512-VpsrsJSUcJEseSbMHkrsrAVSdvVS5I96Qo1QAQ4FxQ9wXFcB+pjj7FB7/us9+GcgfW4ziHtYMc1J0PLczb55mg==} + engines: {node: '>=8.10.0'} + dependencies: + semver: 7.0.0 + dev: true + + /sirv-cli@2.0.2: + resolution: {integrity: sha512-OtSJDwxsF1NWHc7ps3Sa0s+dPtP15iQNJzfKVz+MxkEo3z72mCD+yu30ct79rPr0CaV1HXSOBp+MIY5uIhHZ1A==} + engines: {node: '>= 10'} + hasBin: true + dependencies: + console-clear: 1.1.1 + get-port: 3.2.0 + kleur: 4.1.5 + local-access: 1.1.0 + sade: 1.8.1 + semiver: 1.1.0 + sirv: 2.0.2 + tinydate: 1.3.0 + dev: false + + /sirv@2.0.2: + resolution: {integrity: sha512-4Qog6aE29nIjAOKe/wowFTxOdmbEZKb+3tsLljaBRzJwtqto0BChD2zzH0LhgCSXiI+V7X+Y45v14wBZQ1TK3w==} + engines: {node: '>= 10'} + dependencies: + '@polka/url': 1.0.0-next.21 + mrmime: 1.0.0 + totalist: 3.0.0 + + /sisteransi@1.0.5: + resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} + dev: true + + /slash@3.0.0: + resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} + engines: {node: '>=8'} + + /smartwrap@2.0.2: + resolution: {integrity: sha512-vCsKNQxb7PnCNd2wY1WClWifAc2lwqsG8OaswpJkVJsvMGcnEntdTCDajZCkk93Ay1U3t/9puJmb525Rg5MZBA==} + engines: {node: '>=6'} + hasBin: true + dependencies: + array.prototype.flat: 1.3.1 + breakword: 1.0.5 + grapheme-splitter: 1.0.4 + strip-ansi: 6.0.1 + wcwidth: 1.0.1 + yargs: 15.4.1 + dev: false + + /sorcery@0.11.0: + resolution: {integrity: sha512-J69LQ22xrQB1cIFJhPfgtLuI6BpWRiWu1Y3vSsIwK/eAScqJxd/+CJlUuHQRdX2C9NGFamq+KqNywGgaThwfHw==} + hasBin: true + dependencies: + '@jridgewell/sourcemap-codec': 1.4.15 + buffer-crc32: 0.2.13 + minimist: 1.2.6 + sander: 0.5.1 + + /source-map-js@1.0.2: + resolution: {integrity: sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==} + engines: {node: '>=0.10.0'} + + /source-map-support@0.5.21: + resolution: {integrity: sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==} + dependencies: + buffer-from: 1.1.2 + source-map: 0.6.1 + dev: true + + /source-map@0.6.1: + resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} + engines: {node: '>=0.10.0'} + + /space-separated-tokens@1.1.5: + resolution: {integrity: sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==} + dev: true + + /spawndamnit@2.0.0: + resolution: {integrity: sha512-j4JKEcncSjFlqIwU5L/rp2N5SIPsdxaRsIv678+TZxZ0SRDJTm8JrxJMjE/XuiEZNEir3S8l0Fa3Ke339WI4qA==} + dependencies: + cross-spawn: 5.1.0 + signal-exit: 3.0.7 + dev: false + + /spdx-correct@3.1.1: + resolution: {integrity: sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==} + dependencies: + spdx-expression-parse: 3.0.1 + spdx-license-ids: 3.0.11 + + /spdx-exceptions@2.3.0: + resolution: {integrity: sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==} + + /spdx-expression-parse@3.0.1: + resolution: {integrity: sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==} + dependencies: + spdx-exceptions: 2.3.0 + spdx-license-ids: 3.0.11 + + /spdx-license-ids@3.0.11: + resolution: {integrity: sha512-Ctl2BrFiM0X3MANYgj3CkygxhRmr9mi6xhejbdO960nF6EDJApTYpn0BQnDKlnNBULKiCN1n3w9EBkHK8ZWg+g==} + + /sprintf-js@1.0.3: + resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} + + /stack-utils@2.0.6: + resolution: {integrity: sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==} + engines: {node: '>=10'} + dependencies: + escape-string-regexp: 2.0.0 + dev: false + + /stackback@0.0.2: + resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + dev: false + + /standardized-audio-context@25.3.55: + resolution: {integrity: sha512-ym9g7FZ5S1FykbQ1///ktTJgk+zTtGF1hGR/BFRQjRkN6G2Xy9GbL5kOcM7DlzflV2yJtqVwfU2gL042b1oHwg==} + dependencies: + '@babel/runtime': 7.22.6 + automation-events: 6.0.8 + tslib: 2.6.1 + dev: false + + /statuses@2.0.1: + resolution: {integrity: sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==} + engines: {node: '>= 0.8'} + dev: true + + /std-env@3.3.3: + resolution: {integrity: sha512-Rz6yejtVyWnVjC1RFvNmYL10kgjC49EOghxWn0RFqlCHGFpQx+Xe7yW3I4ceK1SGrWIGMjD5Kbue8W/udkbMJg==} + dev: false + + /store2@2.14.2: + resolution: {integrity: sha512-siT1RiqlfQnGqgT/YzXVUNsom9S0H1OX+dpdGN1xkyYATo4I6sep5NmsRD/40s3IIOvlCq6akxkqG82urIZW1w==} + dev: true + + /storybook@7.0.23: + resolution: {integrity: sha512-eko4DZ6lheJZCsL55RJhYksXX3UWgdO6rkR52pmfhCjlitxf07We+lEuzVou8+HLg8jnSqLi2GIzDKh+hBS4og==} + hasBin: true + dependencies: + '@storybook/cli': 7.0.23 + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - utf-8-validate + dev: true + + /stream-shift@1.0.1: + resolution: {integrity: sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==} + dev: true + + /stream-transform@2.1.3: + resolution: {integrity: sha512-9GHUiM5hMiCi6Y03jD2ARC1ettBXkQBoQAe7nJsPknnI0ow10aXjTnew8QtYQmLjzn974BnmWEAJgCY6ZP1DeQ==} + dependencies: + mixme: 0.5.9 + dev: false + + /streamsearch@1.1.0: + resolution: {integrity: sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==} + engines: {node: '>=10.0.0'} + + /strict-event-emitter@0.2.8: + resolution: {integrity: sha512-KDf/ujU8Zud3YaLtMCcTI4xkZlZVIYxTLr+XIULexP+77EEVWixeXroLUXQXiVtH4XH2W7jr/3PT1v3zBuvc3A==} + dependencies: + events: 3.3.0 + dev: false + + /strict-event-emitter@0.4.6: + resolution: {integrity: sha512-12KWeb+wixJohmnwNFerbyiBrAlq5qJLwIt38etRtKtmmHyDSoGlIqFE9wx+4IwG0aDjI7GV8tc8ZccjWZZtTg==} + dev: false + + /string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + + /string.prototype.padend@3.1.3: + resolution: {integrity: sha512-jNIIeokznm8SD/TZISQsZKYu7RJyheFNt84DUPrh482GC8RVp2MKqm2O5oBRdGxbDQoXrhhWtPIWQOiy20svUg==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.1.3 + es-abstract: 1.19.1 + dev: false + + /string.prototype.trim@1.2.7: + resolution: {integrity: sha512-p6TmeT1T3411M8Cgg9wBTMRtY2q9+PNy9EV1i2lIXUN/btt763oIfxwN3RR8VU6wHX8j/1CFy0L+YuThm6bgOg==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.2.0 + es-abstract: 1.21.2 + dev: false + + /string.prototype.trimend@1.0.4: + resolution: {integrity: sha512-y9xCjw1P23Awk8EvTpcyL2NIr1j7wJ39f+k6lvRnSMz+mz9CGz9NYPelDk42kOz6+ql8xjfK8oYzy3jAP5QU5A==} + dependencies: + call-bind: 1.0.2 + define-properties: 1.2.0 + dev: false + + /string.prototype.trimend@1.0.6: + resolution: {integrity: sha512-JySq+4mrPf9EsDBEDYMOb/lM7XQLulwg5R/m1r0PXEFqrV0qHvl58sdTilSXtKOflCsK2E8jxf+GKC0T07RWwQ==} + dependencies: + call-bind: 1.0.2 + define-properties: 1.2.0 + es-abstract: 1.21.2 + dev: false + + /string.prototype.trimstart@1.0.4: + resolution: {integrity: sha512-jh6e984OBfvxS50tdY2nRZnoC5/mLFKOREQfw8t5yytkoUsJRNxvI/E39qu1sD0OtWI3OC0XgKSmcWwziwYuZw==} + dependencies: + call-bind: 1.0.2 + define-properties: 1.2.0 + dev: false + + /string.prototype.trimstart@1.0.6: + resolution: {integrity: sha512-omqjMDaY92pbn5HOX7f9IccLA+U1tA9GvtU4JrodiXFfYB7jPzzHpRzpglLAjtUV6bB557zwClJezTqnAiYnQA==} + dependencies: + call-bind: 1.0.2 + define-properties: 1.2.0 + es-abstract: 1.21.2 + dev: false + + /string_decoder@1.1.1: + resolution: {integrity: sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==} + dependencies: + safe-buffer: 5.1.2 + + /strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + dependencies: + ansi-regex: 5.0.1 + + /strip-bom@3.0.0: + resolution: {integrity: sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=} + engines: {node: '>=4'} + dev: false + + /strip-final-newline@2.0.0: + resolution: {integrity: sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==} + engines: {node: '>=6'} + dev: true + + /strip-indent@3.0.0: + resolution: {integrity: sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==} + engines: {node: '>=8'} + dependencies: + min-indent: 1.0.1 + + /strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + /strip-literal@1.0.1: + resolution: {integrity: sha512-QZTsipNpa2Ppr6v1AmJHESqJ3Uz247MUS0OjrnnZjFAvEoWqxuyFuXn2xLgMtRnijJShAa1HL0gtJyUs7u7n3Q==} + dependencies: + acorn: 8.10.0 + dev: false + + /style-loader@3.3.3(webpack@5.88.1): + resolution: {integrity: sha512-53BiGLXAcll9maCYtZi2RCQZKa8NQQai5C4horqKyRmHj9H7QmcUyucrH+4KW/gBQbXM2AsB0axoEcFZPlfPcw==} + engines: {node: '>= 12.13.0'} + peerDependencies: + webpack: ^5.0.0 + dependencies: + webpack: 5.88.1(esbuild@0.17.14) + dev: true + + /style-mod@4.0.0: + resolution: {integrity: sha512-OPhtyEjyyN9x3nhPsu76f52yUGXiZcgvsrFVtvTkyGRQJ0XK+GPc6ov1z+lRpbeabka+MYEQxOYRnt5nF30aMw==} + dev: false + + /subscribable-things@2.1.23: + resolution: {integrity: sha512-ks4rvqxofzIcXzOdRUCoNbAptNzsoa1LEJEbJBCy/ADahzWNEiy1Oa3LpRVw12QcTGUXHblxO4Gpb+JQNjprKQ==} + dependencies: + '@babel/runtime': 7.22.6 + rxjs-interop: 2.0.0 + tslib: 2.6.1 + dev: false + + /supports-color@5.5.0: + resolution: {integrity: sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==} + engines: {node: '>=4'} + dependencies: + has-flag: 3.0.0 + + /supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + dependencies: + has-flag: 4.0.0 + + /supports-color@8.1.1: + resolution: {integrity: sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==} + engines: {node: '>=10'} + dependencies: + has-flag: 4.0.0 + dev: true + + /supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + + /svelte-check@3.1.4(@babel/core@7.22.5)(less@4.1.3)(postcss@8.4.27)(svelte@3.57.0): + resolution: {integrity: sha512-25Lb46ZS4IK/XpBMe4IBMrtYf23V8alqBX+szXoccb7uM0D2Wqq5rMRzYBONZnFVuU1bQG3R50lyIT5eRewv2g==} + hasBin: true + peerDependencies: + svelte: ^3.55.0 + dependencies: + '@jridgewell/trace-mapping': 0.3.17 + chokidar: 3.5.3 + fast-glob: 3.2.11 + import-fresh: 3.3.0 + picocolors: 1.0.0 + sade: 1.8.1 + svelte: 3.57.0 + svelte-preprocess: 5.0.4(@babel/core@7.22.5)(less@4.1.3)(postcss@8.4.27)(svelte@3.57.0)(typescript@4.9.5) + typescript: 4.9.5 + transitivePeerDependencies: + - '@babel/core' + - coffeescript + - less + - postcss + - postcss-load-config + - pug + - sass + - stylus + - sugarss + dev: true + + /svelte-check@3.4.4(@babel/core@7.22.5)(less@4.1.3)(postcss@8.4.21)(svelte@3.59.2): + resolution: {integrity: sha512-Uys9+R65cj8TmP8f5UpS7B2xKpNLYNxEWJsA5ZoKcWq/uwvABFF7xS6iPQGLoa7hxz0DS6xU60YFpmq06E4JxA==} + hasBin: true + peerDependencies: + svelte: ^3.55.0 || ^4.0.0-next.0 || ^4.0.0 + dependencies: + '@jridgewell/trace-mapping': 0.3.18 + chokidar: 3.5.3 + fast-glob: 3.2.11 + import-fresh: 3.3.0 + picocolors: 1.0.0 + sade: 1.8.1 + svelte: 3.59.2 + svelte-preprocess: 5.0.4(@babel/core@7.22.5)(less@4.1.3)(postcss@8.4.21)(svelte@3.59.2)(typescript@5.1.3) + typescript: 5.1.3 + transitivePeerDependencies: + - '@babel/core' + - coffeescript + - less + - postcss + - postcss-load-config + - pug + - sass + - stylus + - sugarss + dev: true + + /svelte-check@3.4.4(@babel/core@7.22.5)(less@4.1.3)(postcss@8.4.27)(svelte@4.0.0): + resolution: {integrity: sha512-Uys9+R65cj8TmP8f5UpS7B2xKpNLYNxEWJsA5ZoKcWq/uwvABFF7xS6iPQGLoa7hxz0DS6xU60YFpmq06E4JxA==} + hasBin: true + peerDependencies: + svelte: ^3.55.0 || ^4.0.0-next.0 || ^4.0.0 + dependencies: + '@jridgewell/trace-mapping': 0.3.18 + chokidar: 3.5.3 + fast-glob: 3.2.11 + import-fresh: 3.3.0 + picocolors: 1.0.0 + sade: 1.8.1 + svelte: 4.0.0 + svelte-preprocess: 5.0.4(@babel/core@7.22.5)(less@4.1.3)(postcss@8.4.27)(svelte@4.0.0)(typescript@5.1.3) + typescript: 5.1.3 + transitivePeerDependencies: + - '@babel/core' + - coffeescript + - less + - postcss + - postcss-load-config + - pug + - sass + - stylus + - sugarss + dev: false + + /svelte-eslint-parser@0.32.2(svelte@4.0.0): + resolution: {integrity: sha512-Ok9D3A4b23iLQsONrjqtXtYDu5ZZ/826Blaw2LeFZVTg1pwofKDG4mz3/GYTax8fQ0plRGHI6j+d9VQYy5Lo/A==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + svelte: ^3.37.0 || ^4.0.0 + peerDependenciesMeta: + svelte: + optional: true + dependencies: + eslint-scope: 7.2.0 + eslint-visitor-keys: 3.4.1 + espree: 9.5.2 + postcss: 8.4.27 + postcss-scss: 4.0.6(postcss@8.4.27) + svelte: 4.0.0 + dev: false + + /svelte-hmr@0.15.2(svelte@3.57.0): + resolution: {integrity: sha512-q/bAruCvFLwvNbeE1x3n37TYFb3mTBJ6TrCq6p2CoFbSTNhDE9oAtEfpy+wmc9So8AG0Tja+X0/mJzX9tSfvIg==} + engines: {node: ^12.20 || ^14.13.1 || >= 16} + peerDependencies: + svelte: ^3.19.0 || ^4.0.0-next.0 + dependencies: + svelte: 3.57.0 + dev: true + + /svelte-hmr@0.15.2(svelte@3.59.2): + resolution: {integrity: sha512-q/bAruCvFLwvNbeE1x3n37TYFb3mTBJ6TrCq6p2CoFbSTNhDE9oAtEfpy+wmc9So8AG0Tja+X0/mJzX9tSfvIg==} + engines: {node: ^12.20 || ^14.13.1 || >= 16} + peerDependencies: + svelte: ^3.19.0 || ^4.0.0-next.0 + dependencies: + svelte: 3.59.2 + + /svelte-hmr@0.15.2(svelte@4.0.0): + resolution: {integrity: sha512-q/bAruCvFLwvNbeE1x3n37TYFb3mTBJ6TrCq6p2CoFbSTNhDE9oAtEfpy+wmc9So8AG0Tja+X0/mJzX9tSfvIg==} + engines: {node: ^12.20 || ^14.13.1 || >= 16} + peerDependencies: + svelte: ^3.19.0 || ^4.0.0-next.0 + dependencies: + svelte: 4.0.0 + + /svelte-i18n@3.7.0(svelte@4.0.0): + resolution: {integrity: sha512-kfdJsYsyOE9tFEVtjPXvrUaufXQnbFAI6LsX9vaQP+xm8A5Wao2qQ6pRZmIUCAvXvYQt7aXQ7hK9+NP9AlxehA==} + engines: {node: '>= 16'} + hasBin: true + peerDependencies: + svelte: ^3 || ^4 + dependencies: + cli-color: 2.0.3 + deepmerge: 4.3.1 + estree-walker: 2.0.2 + intl-messageformat: 9.13.0 + sade: 1.8.1 + svelte: 4.0.0 + tiny-glob: 0.2.9 + dev: false + + /svelte-preprocess@5.0.4(@babel/core@7.22.5)(less@4.1.3)(postcss@8.4.21)(svelte@3.59.2)(typescript@5.1.3): + resolution: {integrity: sha512-ABia2QegosxOGsVlsSBJvoWeXy1wUKSfF7SWJdTjLAbx/Y3SrVevvvbFNQqrSJw89+lNSsM58SipmZJ5SRi5iw==} + engines: {node: '>= 14.10.0'} + requiresBuild: true + peerDependencies: + '@babel/core': ^7.10.2 + coffeescript: ^2.5.1 + less: ^3.11.3 || ^4.0.0 + postcss: ^7 || ^8 + postcss-load-config: ^2.1.0 || ^3.0.0 || ^4.0.0 + pug: ^3.0.0 + sass: ^1.26.8 + stylus: ^0.55.0 + sugarss: ^2.0.0 || ^3.0.0 || ^4.0.0 + svelte: ^3.23.0 || ^4.0.0-next.0 || ^4.0.0 + typescript: '>=3.9.5 || ^4.0.0 || ^5.0.0' + peerDependenciesMeta: + '@babel/core': + optional: true + coffeescript: + optional: true + less: + optional: true + postcss: + optional: true + postcss-load-config: + optional: true + pug: + optional: true + sass: + optional: true + stylus: + optional: true + sugarss: + optional: true + typescript: + optional: true + dependencies: + '@babel/core': 7.22.5 + '@types/pug': 2.0.6 + detect-indent: 6.1.0 + less: 4.1.3 + magic-string: 0.27.0 + postcss: 8.4.21 + sorcery: 0.11.0 + strip-indent: 3.0.0 + svelte: 3.59.2 + typescript: 5.1.3 + dev: true + + /svelte-preprocess@5.0.4(@babel/core@7.22.5)(less@4.1.3)(postcss@8.4.27)(svelte@3.57.0)(typescript@4.9.5): + resolution: {integrity: sha512-ABia2QegosxOGsVlsSBJvoWeXy1wUKSfF7SWJdTjLAbx/Y3SrVevvvbFNQqrSJw89+lNSsM58SipmZJ5SRi5iw==} + engines: {node: '>= 14.10.0'} + requiresBuild: true + peerDependencies: + '@babel/core': ^7.10.2 + coffeescript: ^2.5.1 + less: ^3.11.3 || ^4.0.0 + postcss: ^7 || ^8 + postcss-load-config: ^2.1.0 || ^3.0.0 || ^4.0.0 + pug: ^3.0.0 + sass: ^1.26.8 + stylus: ^0.55.0 + sugarss: ^2.0.0 || ^3.0.0 || ^4.0.0 + svelte: ^3.23.0 || ^4.0.0-next.0 || ^4.0.0 + typescript: '>=3.9.5 || ^4.0.0 || ^5.0.0' + peerDependenciesMeta: + '@babel/core': + optional: true + coffeescript: + optional: true + less: + optional: true + postcss: + optional: true + postcss-load-config: + optional: true + pug: + optional: true + sass: + optional: true + stylus: + optional: true + sugarss: + optional: true + typescript: + optional: true + dependencies: + '@babel/core': 7.22.5 + '@types/pug': 2.0.6 + detect-indent: 6.1.0 + less: 4.1.3 + magic-string: 0.27.0 + postcss: 8.4.27 + sorcery: 0.11.0 + strip-indent: 3.0.0 + svelte: 3.57.0 + typescript: 4.9.5 + dev: true + + /svelte-preprocess@5.0.4(@babel/core@7.22.5)(less@4.1.3)(postcss@8.4.27)(svelte@4.0.0)(typescript@5.1.3): + resolution: {integrity: sha512-ABia2QegosxOGsVlsSBJvoWeXy1wUKSfF7SWJdTjLAbx/Y3SrVevvvbFNQqrSJw89+lNSsM58SipmZJ5SRi5iw==} + engines: {node: '>= 14.10.0'} + requiresBuild: true + peerDependencies: + '@babel/core': ^7.10.2 + coffeescript: ^2.5.1 + less: ^3.11.3 || ^4.0.0 + postcss: ^7 || ^8 + postcss-load-config: ^2.1.0 || ^3.0.0 || ^4.0.0 + pug: ^3.0.0 + sass: ^1.26.8 + stylus: ^0.55.0 + sugarss: ^2.0.0 || ^3.0.0 || ^4.0.0 + svelte: ^3.23.0 || ^4.0.0-next.0 || ^4.0.0 + typescript: '>=3.9.5 || ^4.0.0 || ^5.0.0' + peerDependenciesMeta: + '@babel/core': + optional: true + coffeescript: + optional: true + less: + optional: true + postcss: + optional: true + postcss-load-config: + optional: true + pug: + optional: true + sass: + optional: true + stylus: + optional: true + sugarss: + optional: true + typescript: + optional: true + dependencies: + '@babel/core': 7.22.5 + '@types/pug': 2.0.6 + detect-indent: 6.1.0 + less: 4.1.3 + magic-string: 0.27.0 + postcss: 8.4.27 + sorcery: 0.11.0 + strip-indent: 3.0.0 + svelte: 4.0.0 + typescript: 5.1.3 + dev: false + + /svelte-range-slider-pips@2.0.2: + resolution: {integrity: sha512-VTWHOdwDyWbndGZnI0PQJY9DO7hgQlNubtCcCL6Wlypv5dU4vEsc4A1sX9TWMuvebEe4332SgsQQHzOdZ+guhQ==} + dev: false + + /svelte-vega@2.0.0(svelte@3.59.2)(vega-lite@5.12.0)(vega@5.22.1): + resolution: {integrity: sha512-WnJM+hQNw15VAUtwT6oteog3+0KRw8i8K02gAB4VLM0RYs2NXxBZ1q/BdOvkl1XVnZytRVlLr1HQgxeZs7QwUA==} + peerDependencies: + svelte: ^3.54.0 + vega: '*' + vega-lite: '*' + dependencies: + fast-deep-equal: 3.1.3 + svelte: 3.59.2 + vega: 5.22.1 + vega-embed: 6.22.1(vega-lite@5.12.0)(vega@5.22.1) + vega-lite: 5.12.0(vega@5.22.1) + dev: false + + /svelte@3.57.0: + resolution: {integrity: sha512-WMXEvF+RtAaclw0t3bPDTUe19pplMlfyKDsixbHQYgCWi9+O9VN0kXU1OppzrB9gPAvz4NALuoca2LfW2bOjTQ==} + engines: {node: '>= 8'} + dev: true + + /svelte@3.59.2: + resolution: {integrity: sha512-vzSyuGr3eEoAtT/A6bmajosJZIUWySzY2CzB3w2pgPvnkUjGqlDnsNnA0PMO+mMAhuyMul6C2uuZzY6ELSkzyA==} + engines: {node: '>= 8'} + + /svelte@4.0.0: + resolution: {integrity: sha512-+yCYu3AEUu9n91dnQNGIbnVp8EmNQtuF/YImW4+FTXRHard7NMo+yTsWzggPAbj3fUEJ1FBJLkql/jkp6YB5pg==} + engines: {node: '>=16'} + dependencies: + '@ampproject/remapping': 2.2.1 + '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/trace-mapping': 0.3.18 + acorn: 8.8.2 + aria-query: 5.3.0 + axobject-query: 3.2.1 + code-red: 1.0.3 + css-tree: 2.3.1 + estree-walker: 3.0.3 + is-reference: 3.0.1 + locate-character: 3.0.0 + magic-string: 0.30.0 + periscopic: 3.1.0 + + /sveltedoc-parser@4.2.1: + resolution: {integrity: sha512-sWJRa4qOfRdSORSVw9GhfDEwsbsYsegnDzBevUCF6k/Eis/QqCu9lJ6I0+d/E2wOWCjOhlcJ3+jl/Iur+5mmCw==} + engines: {node: '>=10.0.0'} + dependencies: + eslint: 8.4.1 + espree: 9.2.0 + htmlparser2-svelte: 4.1.0 + transitivePeerDependencies: + - supports-color + dev: true + + /symbol-tree@3.2.4: + resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==} + dev: false + + /synchronous-promise@2.0.17: + resolution: {integrity: sha512-AsS729u2RHUfEra9xJrE39peJcc2stq2+poBXX8bcM08Y6g9j/i/PUzwNQqkaJde7Ntg1TO7bSREbR5sdosQ+g==} + dev: true + + /tailwindcss@3.1.6(postcss@8.4.21): + resolution: {integrity: sha512-7skAOY56erZAFQssT1xkpk+kWt2NrO45kORlxFPXUt3CiGsVPhH1smuH5XoDH6sGPXLyBv+zgCKA2HWBsgCytg==} + engines: {node: '>=12.13.0'} + hasBin: true + peerDependencies: + postcss: ^8.0.9 + dependencies: + arg: 5.0.2 + chokidar: 3.5.3 + color-name: 1.1.4 + detective: 5.2.1 + didyoumean: 1.2.2 + dlv: 1.1.3 + fast-glob: 3.2.11 + glob-parent: 6.0.2 + is-glob: 4.0.3 + lilconfig: 2.0.6 + normalize-path: 3.0.0 + object-hash: 3.0.0 + picocolors: 1.0.0 + postcss: 8.4.21 + postcss-import: 14.1.0(postcss@8.4.21) + postcss-js: 4.0.0(postcss@8.4.21) + postcss-load-config: 3.1.4(postcss@8.4.21) + postcss-nested: 5.0.6(postcss@8.4.21) + postcss-selector-parser: 6.0.13 + postcss-value-parser: 4.2.0 + quick-lru: 5.1.1 + resolve: 1.22.1 + transitivePeerDependencies: + - ts-node + dev: true + + /tailwindcss@3.1.6(postcss@8.4.27): + resolution: {integrity: sha512-7skAOY56erZAFQssT1xkpk+kWt2NrO45kORlxFPXUt3CiGsVPhH1smuH5XoDH6sGPXLyBv+zgCKA2HWBsgCytg==} + engines: {node: '>=12.13.0'} + hasBin: true + peerDependencies: + postcss: ^8.0.9 + dependencies: + arg: 5.0.2 + chokidar: 3.5.3 + color-name: 1.1.4 + detective: 5.2.1 + didyoumean: 1.2.2 + dlv: 1.1.3 + fast-glob: 3.2.11 + glob-parent: 6.0.2 + is-glob: 4.0.3 + lilconfig: 2.0.6 + normalize-path: 3.0.0 + object-hash: 3.0.0 + picocolors: 1.0.0 + postcss: 8.4.27 + postcss-import: 14.1.0(postcss@8.4.27) + postcss-js: 4.0.0(postcss@8.4.27) + postcss-load-config: 3.1.4(postcss@8.4.27) + postcss-nested: 5.0.6(postcss@8.4.27) + postcss-selector-parser: 6.0.13 + postcss-value-parser: 4.2.0 + quick-lru: 5.1.1 + resolve: 1.22.1 + transitivePeerDependencies: + - ts-node + + /tapable@2.2.1: + resolution: {integrity: sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==} + engines: {node: '>=6'} + dev: true + + /tar-fs@2.1.1: + resolution: {integrity: sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==} + dependencies: + chownr: 1.1.4 + mkdirp-classic: 0.5.3 + pump: 3.0.0 + tar-stream: 2.2.0 + dev: true + + /tar-stream@2.2.0: + resolution: {integrity: sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==} + engines: {node: '>=6'} + dependencies: + bl: 4.1.0 + end-of-stream: 1.4.4 + fs-constants: 1.0.0 + inherits: 2.0.4 + readable-stream: 3.6.0 + dev: true + + /tar@6.1.15: + resolution: {integrity: sha512-/zKt9UyngnxIT/EAGYuxaMYgOIJiP81ab9ZfkILq4oNLPFX50qyYmu7jRj9qeXoxmJHjGlbH0+cm2uy1WCs10A==} + engines: {node: '>=10'} + dependencies: + chownr: 2.0.0 + fs-minipass: 2.1.0 + minipass: 5.0.0 + minizlib: 2.1.2 + mkdirp: 1.0.4 + yallist: 4.0.0 + + /telejson@7.1.0: + resolution: {integrity: sha512-jFJO4P5gPebZAERPkJsqMAQ0IMA1Hi0AoSfxpnUaV6j6R2SZqlpkbS20U6dEUtA3RUYt2Ak/mTlkQzHH9Rv/hA==} + dependencies: + memoizerific: 1.11.3 + dev: true + + /temp-dir@2.0.0: + resolution: {integrity: sha512-aoBAniQmmwtcKp/7BzsH8Cxzv8OL736p7v1ihGb5e9DJ9kTwGWHrQrVB5+lfVDzfGrdRzXch+ig7LHaY1JTOrg==} + engines: {node: '>=8'} + dev: true + + /temp@0.8.4: + resolution: {integrity: sha512-s0ZZzd0BzYv5tLSptZooSjK8oj6C+c19p7Vqta9+6NPOf7r+fxq0cJe6/oN4LTC79sy5NY8ucOJNgwsKCSbfqg==} + engines: {node: '>=6.0.0'} + dependencies: + rimraf: 2.6.3 + dev: true + + /tempy@1.0.1: + resolution: {integrity: sha512-biM9brNqxSc04Ee71hzFbryD11nX7VPhQQY32AdDmjFvodsRFz/3ufeoTZ6uYkRFfGo188tENcASNs3vTdsM0w==} + engines: {node: '>=10'} + dependencies: + del: 6.1.1 + is-stream: 2.0.1 + temp-dir: 2.0.0 + type-fest: 0.16.0 + unique-string: 2.0.0 + dev: true + + /term-size@2.2.1: + resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==} + engines: {node: '>=8'} + dev: false + + /terser-webpack-plugin@5.3.9(esbuild@0.17.14)(webpack@5.88.1): + resolution: {integrity: sha512-ZuXsqE07EcggTWQjXUj+Aot/OMcD0bMKGgF63f7UxYcu5/AJF53aIpK1YoP5xR9l6s/Hy2b+t1AM0bLNPRuhwA==} + engines: {node: '>= 10.13.0'} + peerDependencies: + '@swc/core': '*' + esbuild: '*' + uglify-js: '*' + webpack: ^5.1.0 + peerDependenciesMeta: + '@swc/core': + optional: true + esbuild: + optional: true + uglify-js: + optional: true + dependencies: + '@jridgewell/trace-mapping': 0.3.18 + esbuild: 0.17.14 + jest-worker: 27.5.1 + schema-utils: 3.3.0 + serialize-javascript: 6.0.1 + terser: 5.18.2 + webpack: 5.88.1(esbuild@0.17.14) + dev: true + + /terser@5.18.2: + resolution: {integrity: sha512-Ah19JS86ypbJzTzvUCX7KOsEIhDaRONungA4aYBjEP3JZRf4ocuDzTg4QWZnPn9DEMiMYGJPiSOy7aykoCc70w==} + engines: {node: '>=10'} + hasBin: true + dependencies: + '@jridgewell/source-map': 0.3.5 + acorn: 8.10.0 + commander: 2.20.3 + source-map-support: 0.5.21 + dev: true + + /test-exclude@6.0.0: + resolution: {integrity: sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==} + engines: {node: '>=8'} + dependencies: + '@istanbuljs/schema': 0.1.3 + glob: 7.2.0 + minimatch: 3.1.2 + dev: true + + /text-table@0.2.0: + resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} + + /through2@2.0.5: + resolution: {integrity: sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==} + dependencies: + readable-stream: 2.3.8 + xtend: 4.0.2 + dev: true + + /through@2.3.8: + resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==} + dev: false + + /timers-ext@0.1.7: + resolution: {integrity: sha512-b85NUNzTSdodShTIbky6ZF02e8STtVVfD+fu4aXXShEELpozH+bCpJLYMPZbsABN2wDH7fJpqIoXxJpzbf0NqQ==} + dependencies: + es5-ext: 0.10.62 + next-tick: 1.1.0 + dev: false + + /tiny-glob@0.2.9: + resolution: {integrity: sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg==} + dependencies: + globalyzer: 0.1.0 + globrex: 0.1.2 + + /tinybench@2.5.0: + resolution: {integrity: sha512-kRwSG8Zx4tjF9ZiyH4bhaebu+EDz1BOx9hOigYHlUW4xxI/wKIUQUqo018UlU4ar6ATPBsaMrdbKZ+tmPdohFA==} + dev: false + + /tinydate@1.3.0: + resolution: {integrity: sha512-7cR8rLy2QhYHpsBDBVYnnWXm8uRTr38RoZakFSW7Bs7PzfMPNZthuMLkwqZv7MTu8lhQ91cOFYS5a7iFj2oR3w==} + engines: {node: '>=4'} + dev: false + + /tinypool@0.7.0: + resolution: {integrity: sha512-zSYNUlYSMhJ6Zdou4cJwo/p7w5nmAH17GRfU/ui3ctvjXFErXXkruT4MWW6poDeXgCaIBlGLrfU6TbTXxyGMww==} + engines: {node: '>=14.0.0'} + dev: false + + /tinyspy@2.1.1: + resolution: {integrity: sha512-XPJL2uSzcOyBMky6OFrusqWlzfFrXtE0hPuMgW8A2HmaqrPo4ZQHRN/V0QXN3FSjKxpsbRrFc5LI7KOwBsT1/w==} + engines: {node: '>=14.0.0'} + dev: false + + /tmp@0.0.33: + resolution: {integrity: sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==} + engines: {node: '>=0.6.0'} + dependencies: + os-tmpdir: 1.0.2 + dev: false + + /tmpl@1.0.5: + resolution: {integrity: sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==} + dev: true + + /to-fast-properties@2.0.0: + resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==} + engines: {node: '>=4'} + + /to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + dependencies: + is-number: 7.0.0 + + /toidentifier@1.0.1: + resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} + engines: {node: '>=0.6'} + dev: true + + /topojson-client@3.1.0: + resolution: {integrity: sha512-605uxS6bcYxGXw9qi62XyrV6Q3xwbndjachmNxu8HWTtVPxZfEJN9fd/SZS1Q54Sn2y0TMyMxFj/cJINqGHrKw==} + hasBin: true + dependencies: + commander: 2.20.3 + dev: false + + /totalist@3.0.0: + resolution: {integrity: sha512-eM+pCBxXO/njtF7vdFsHuqb+ElbxqtI4r5EAvk6grfAFyJ6IvWlSkfZ5T9ozC6xWw3Fj1fGoSmrl0gUs46JVIw==} + engines: {node: '>=6'} + + /tough-cookie@4.1.3: + resolution: {integrity: sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==} + engines: {node: '>=6'} + dependencies: + psl: 1.9.0 + punycode: 2.3.0 + universalify: 0.2.0 + url-parse: 1.5.10 + dev: false + + /tr46@0.0.3: + resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} + + /tr46@4.1.1: + resolution: {integrity: sha512-2lv/66T7e5yNyhAAC4NaKe5nVavzuGJQVVtRYLyQ2OI8tsJ61PMLlelehb0wi2Hx6+hT/OJUWZcw8MjlSRnxvw==} + engines: {node: '>=14'} + dependencies: + punycode: 2.3.0 + dev: false + + /trim-newlines@3.0.1: + resolution: {integrity: sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==} + engines: {node: '>=8'} + dev: false + + /trouter@3.2.0: + resolution: {integrity: sha512-rLLXbhTObLy2MBVjLC+jTnoIKw99n0GuJs9ov10J870vDw5qhTurPzsDrudNtBf5w/CZ9ctZy2p2IMmhGcel2w==} + engines: {node: '>=6'} + dependencies: + regexparam: 1.3.0 + dev: false + + /ts-api-utils@1.0.1(typescript@5.1.3): + resolution: {integrity: sha512-lC/RGlPmwdrIBFTX59wwNzqh7aR2otPNPR/5brHZm/XKFYKsfqxihXUe9pU3JI+3vGkl+vyCoNNnPhJn3aLK1A==} + engines: {node: '>=16.13.0'} + peerDependencies: + typescript: '>=4.2.0' + dependencies: + typescript: 5.1.3 + dev: false + + /ts-dedent@2.2.0: + resolution: {integrity: sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==} + engines: {node: '>=6.10'} + dev: true + + /tslib@2.4.0: + resolution: {integrity: sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==} + dev: false + + /tslib@2.5.3: + resolution: {integrity: sha512-mSxlJJwl3BMEQCUNnxXBU9jP4JBktcEGhURcPR6VQVlnP0FdDEsIaz0C35dXNGLyRfrATNofF0F5p2KPxQgB+w==} + + /tslib@2.6.1: + resolution: {integrity: sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==} + + /tty-table@4.2.1: + resolution: {integrity: sha512-xz0uKo+KakCQ+Dxj1D/tKn2FSyreSYWzdkL/BYhgN6oMW808g8QRMuh1atAV9fjTPbWBjfbkKQpI/5rEcnAc7g==} + engines: {node: '>=8.0.0'} + hasBin: true + dependencies: + chalk: 4.1.2 + csv: 5.5.3 + kleur: 4.1.5 + smartwrap: 2.0.2 + strip-ansi: 6.0.1 + wcwidth: 1.0.1 + yargs: 17.7.1 + dev: false + + /type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + dependencies: + prelude-ls: 1.2.1 + + /type-detect@4.0.8: + resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} + engines: {node: '>=4'} + dev: false + + /type-fest@0.13.1: + resolution: {integrity: sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg==} + engines: {node: '>=10'} + dev: false + + /type-fest@0.16.0: + resolution: {integrity: sha512-eaBzG6MxNzEn9kiwvtre90cXaNLkmadMWa1zQMs3XORCXNbsH/OewwbxC5ia9dCxIxnTAsSxXJaa/p5y8DlvJg==} + engines: {node: '>=10'} + dev: true + + /type-fest@0.20.2: + resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} + engines: {node: '>=10'} + + /type-fest@0.21.3: + resolution: {integrity: sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==} + engines: {node: '>=10'} + dev: false + + /type-fest@0.6.0: + resolution: {integrity: sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==} + engines: {node: '>=8'} + + /type-fest@0.8.1: + resolution: {integrity: sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==} + engines: {node: '>=8'} + + /type-fest@2.19.0: + resolution: {integrity: sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==} + engines: {node: '>=12.20'} + + /type-is@1.6.18: + resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==} + engines: {node: '>= 0.6'} + dependencies: + media-typer: 0.3.0 + mime-types: 2.1.34 + dev: true + + /type@1.2.0: + resolution: {integrity: sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==} + dev: false + + /type@2.7.2: + resolution: {integrity: sha512-dzlvlNlt6AXU7EBSfpAscydQ7gXB+pPGsPnfJnZpiNJBDj7IaJzQlBZYGdEi4R9HmPdBv2XmWJ6YUtoTa7lmCw==} + dev: false + + /typed-array-length@1.0.4: + resolution: {integrity: sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng==} + dependencies: + call-bind: 1.0.2 + for-each: 0.3.3 + is-typed-array: 1.1.10 + dev: false + + /typedarray@0.0.6: + resolution: {integrity: sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==} + dev: true + + /typescript@4.9.5: + resolution: {integrity: sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==} + engines: {node: '>=4.2.0'} + hasBin: true + dev: true + + /typescript@5.0.4: + resolution: {integrity: sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==} + engines: {node: '>=12.20'} + hasBin: true + dev: true + + /typescript@5.1.3: + resolution: {integrity: sha512-XH627E9vkeqhlZFQuL+UsyAXEnibT0kWR2FWONlr4sTjvxyJYnyefgrkyECLzM5NenmKzRAy2rR/OlYLA1HkZw==} + engines: {node: '>=14.17'} + hasBin: true + + /ufo@1.1.2: + resolution: {integrity: sha512-TrY6DsjTQQgyS3E3dBaOXf0TpPD8u9FVrVYmKVegJuFw51n/YB9XPt+U6ydzFG5ZIN7+DIjPbNmXoBj9esYhgQ==} + dev: false + + /uglify-js@3.17.4: + resolution: {integrity: sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g==} + engines: {node: '>=0.8.0'} + hasBin: true + requiresBuild: true + dev: true + optional: true + + /unbox-primitive@1.0.1: + resolution: {integrity: sha512-tZU/3NqK3dA5gpE1KtyiJUrEB0lxnGkMFHptJ7q6ewdZ8s12QrODwNbhIJStmJkd1QDXa1NRA8aF2A1zk/Ypyw==} + dependencies: + function-bind: 1.1.1 + has-bigints: 1.0.2 + has-symbols: 1.0.3 + which-boxed-primitive: 1.0.2 + dev: false + + /unbox-primitive@1.0.2: + resolution: {integrity: sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==} + dependencies: + call-bind: 1.0.2 + has-bigints: 1.0.2 + has-symbols: 1.0.3 + which-boxed-primitive: 1.0.2 + dev: false + + /undici@5.22.0: + resolution: {integrity: sha512-fR9RXCc+6Dxav4P9VV/sp5w3eFiSdOjJYsbtWfd4s5L5C4ogyuVpdKIVHeW0vV1MloM65/f7W45nR9ZxwVdyiA==} + engines: {node: '>=14.0'} + dependencies: + busboy: 1.6.0 + + /unfetch@4.2.0: + resolution: {integrity: sha512-F9p7yYCn6cIW9El1zi0HI6vqpeIvBsr3dSuRO6Xuppb1u5rXpCPmMvLSyECLhybr9isec8Ohl0hPekMVrEinDA==} + dev: true + + /unicode-canonical-property-names-ecmascript@2.0.0: + resolution: {integrity: sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==} + engines: {node: '>=4'} + dev: true + + /unicode-match-property-ecmascript@2.0.0: + resolution: {integrity: sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==} + engines: {node: '>=4'} + dependencies: + unicode-canonical-property-names-ecmascript: 2.0.0 + unicode-property-aliases-ecmascript: 2.1.0 + dev: true + + /unicode-match-property-value-ecmascript@2.1.0: + resolution: {integrity: sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==} + engines: {node: '>=4'} + dev: true + + /unicode-property-aliases-ecmascript@2.1.0: + resolution: {integrity: sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==} + engines: {node: '>=4'} + dev: true + + /unique-string@2.0.0: + resolution: {integrity: sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==} + engines: {node: '>=8'} + dependencies: + crypto-random-string: 2.0.0 + dev: true + + /unist-util-is@4.1.0: + resolution: {integrity: sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==} + dev: true + + /unist-util-stringify-position@2.0.3: + resolution: {integrity: sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==} + dependencies: + '@types/unist': 2.0.6 + dev: false + + /unist-util-visit-parents@3.1.1: + resolution: {integrity: sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==} + dependencies: + '@types/unist': 2.0.6 + unist-util-is: 4.1.0 + dev: true + + /unist-util-visit@2.0.3: + resolution: {integrity: sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==} + dependencies: + '@types/unist': 2.0.6 + unist-util-is: 4.1.0 + unist-util-visit-parents: 3.1.1 + dev: true + + /universalify@0.1.2: + resolution: {integrity: sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==} + engines: {node: '>= 4.0.0'} + dev: false + + /universalify@0.2.0: + resolution: {integrity: sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==} + engines: {node: '>= 4.0.0'} + dev: false + + /universalify@2.0.0: + resolution: {integrity: sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==} + engines: {node: '>= 10.0.0'} + dev: true + + /unpipe@1.0.0: + resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} + engines: {node: '>= 0.8'} + dev: true + + /unplugin@0.10.2: + resolution: {integrity: sha512-6rk7GUa4ICYjae5PrAllvcDeuT8pA9+j5J5EkxbMFaV+SalHhxZ7X2dohMzu6C3XzsMT+6jwR/+pwPNR3uK9MA==} + dependencies: + acorn: 8.10.0 + chokidar: 3.5.3 + webpack-sources: 3.2.3 + webpack-virtual-modules: 0.4.6 + dev: true + + /untildify@4.0.0: + resolution: {integrity: sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw==} + engines: {node: '>=8'} + dev: true + + /update-browserslist-db@1.0.11(browserslist@4.21.9): + resolution: {integrity: sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + dependencies: + browserslist: 4.21.9 + escalade: 3.1.1 + picocolors: 1.0.0 + + /uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + dependencies: + punycode: 2.3.0 + + /url-parse@1.5.10: + resolution: {integrity: sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==} + dependencies: + querystringify: 2.2.0 + requires-port: 1.0.0 + dev: false + + /use-resize-observer@9.1.0(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-R25VqO9Wb3asSD4eqtcxk8sJalvIOYBqS8MNZlpDSQ4l4xMQxC/J7Id9HoTqPq8FwULIn0PVW+OAqF2dyYbjow==} + peerDependencies: + react: 16.8.0 - 18 + react-dom: 16.8.0 - 18 + dependencies: + '@juggle/resize-observer': 3.4.0 + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: true + + /util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + /util@0.12.5: + resolution: {integrity: sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==} + dependencies: + inherits: 2.0.4 + is-arguments: 1.1.1 + is-generator-function: 1.0.10 + is-typed-array: 1.1.10 + which-typed-array: 1.1.9 + + /utils-merge@1.0.1: + resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==} + engines: {node: '>= 0.4.0'} + dev: true + + /uuid@9.0.0: + resolution: {integrity: sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==} + hasBin: true + dev: true + + /v8-compile-cache@2.3.0: + resolution: {integrity: sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==} + dev: true + + /validate-npm-package-license@3.0.4: + resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} + dependencies: + spdx-correct: 3.1.1 + spdx-expression-parse: 3.0.1 + + /vary@1.1.2: + resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} + engines: {node: '>= 0.8'} + dev: true + + /vega-canvas@1.2.6: + resolution: {integrity: sha512-rgeYUpslYn/amIfnuv3Sw6n4BGns94OjjZNtUc9IDji6b+K8LGS/kW+Lvay8JX/oFqtulBp8RLcHN6QjqPLA9Q==} + dev: false + + /vega-crossfilter@4.1.0: + resolution: {integrity: sha512-aiOJcvVpiEDIu5uNc4Kf1hakkkPaVOO5fw5T4RSFAw6GEDbdqcB6eZ1xePcsLVic1hxYD5SGiUPdiiIs0SMh2g==} + dependencies: + d3-array: 3.1.1 + vega-dataflow: 5.7.4 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-dataflow@5.7.4: + resolution: {integrity: sha512-JGHTpUo8XGETH3b1V892we6hdjzCWB977ybycIu8DPqRoyrZuj6t1fCVImazfMgQD1LAfJlQybWP+alwKDpKig==} + dependencies: + vega-format: 1.1.0 + vega-loader: 4.5.0 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-embed@6.22.1(vega-lite@5.12.0)(vega@5.22.1): + resolution: {integrity: sha512-5a3SVhPwG5/Mz3JbcJV4WE38s/7AFrkANtPxoln7E8fbNLIbrurIennaAxB9+l0QOAg63lPSuJBNMUkM6yXvLA==} + peerDependencies: + vega: ^5.21.0 + vega-lite: '*' + dependencies: + fast-json-patch: 3.1.1 + json-stringify-pretty-compact: 3.0.0 + semver: 7.4.0 + tslib: 2.5.3 + vega: 5.22.1 + vega-interpreter: 1.0.5 + vega-lite: 5.12.0(vega@5.22.1) + vega-schema-url-parser: 2.2.0 + vega-themes: 2.13.0(vega-lite@5.12.0)(vega@5.22.1) + vega-tooltip: 0.32.0 + dev: false + bundledDependencies: + - yallist + + /vega-encode@4.9.0: + resolution: {integrity: sha512-etv2BHuCn9bzEc0cxyA2TnbtcAFQGVFmsaqmB4sgBCaqTSEfXMoX68LK3yxBrsdm5LU+y3otJVoewi3qWYCx2g==} + dependencies: + d3-array: 3.1.1 + d3-interpolate: 3.0.1 + vega-dataflow: 5.7.4 + vega-scale: 7.2.0 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-event-selector@3.0.0: + resolution: {integrity: sha512-Gls93/+7tEJGE3kUuUnxrBIxtvaNeF01VIFB2Q2Of2hBIBvtHX74jcAdDtkh5UhhoYGD8Q1J30P5cqEBEwtPoQ==} + dev: false + + /vega-event-selector@3.0.1: + resolution: {integrity: sha512-K5zd7s5tjr1LiOOkjGpcVls8GsH/f2CWCrWcpKy74gTCp+llCdwz0Enqo013ZlGaRNjfgD/o1caJRt3GSaec4A==} + dev: false + + /vega-expression@5.0.0: + resolution: {integrity: sha512-y5+c2frq0tGwJ7vYXzZcfVcIRF/QGfhf2e+bV1Z0iQs+M2lI1II1GPDdmOcMKimpoCVp/D61KUJDIGE1DSmk2w==} + dependencies: + '@types/estree': 0.0.50 + vega-util: 1.17.2 + dev: false + + /vega-expression@5.1.0: + resolution: {integrity: sha512-u8Rzja/cn2PEUkhQN3zUj3REwNewTA92ExrcASNKUJPCciMkHJEjESwFYuI6DWMCq4hQElQ92iosOAtwzsSTqA==} + dependencies: + '@types/estree': 1.0.0 + vega-util: 1.17.2 + dev: false + + /vega-force@4.1.0: + resolution: {integrity: sha512-Sssf8iH48vYlz+E7/RpU+SUaJbuLoIL87U4tG2Av4gf/hRiImU49x2TI3EuhFWg1zpaCFxlz0CAaX++Oh/gjdw==} + dependencies: + d3-force: 3.0.0 + vega-dataflow: 5.7.4 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-format@1.1.0: + resolution: {integrity: sha512-6mgpeWw8yGdG0Zdi8aVkx5oUrpJGOpNxqazC2858RSDPvChM/jDFlgRMTYw52qk7cxU0L08ARp4BwmXaI75j0w==} + dependencies: + d3-array: 3.1.1 + d3-format: 3.1.0 + d3-time-format: 4.1.0 + vega-time: 2.1.0 + vega-util: 1.17.2 + dev: false + + /vega-functions@5.13.0: + resolution: {integrity: sha512-Mf53zNyx+c9fFqagEI0T8zc9nMlx0zozOngr8oOpG1tZDKOgwOnUgN99zQKbLHjyv+UzWrq3LYTnSLyVe0ZmhQ==} + dependencies: + d3-array: 3.1.1 + d3-color: 3.0.1 + d3-geo: 3.0.1 + vega-dataflow: 5.7.4 + vega-expression: 5.0.0 + vega-scale: 7.2.0 + vega-scenegraph: 4.10.1 + vega-selections: 5.4.0 + vega-statistics: 1.8.0 + vega-time: 2.1.0 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-geo@4.4.0: + resolution: {integrity: sha512-3YX41y+J5pu0PMjvBCASg0/lgvu9+QXWJZ+vl6FFKa8AlsIopQ67ZL7ObwqjZcoZMolJ4q0rc+ZO8aj1pXCYcw==} + dependencies: + d3-array: 3.1.1 + d3-color: 3.0.1 + d3-geo: 3.0.1 + vega-canvas: 1.2.6 + vega-dataflow: 5.7.4 + vega-projection: 1.5.0 + vega-statistics: 1.8.0 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-hierarchy@4.1.0: + resolution: {integrity: sha512-DWBK39IEt4FiQru12twzKSFUvFFZ7KtlH9+lAaqrJnKuIZFCyQ1XOUfKScfbKIlk4KS+DuCTNLI/pxC/f7Sk9Q==} + dependencies: + d3-hierarchy: 3.1.2 + vega-dataflow: 5.7.4 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-interpreter@1.0.5: + resolution: {integrity: sha512-po6oTOmeQqr1tzTCdD15tYxAQLeUnOVirAysgVEemzl+vfmvcEP7jQmlc51jz0jMA+WsbmE6oJywisQPu/H0Bg==} + dev: false + + /vega-label@1.2.0: + resolution: {integrity: sha512-1prOqkCAfXaUvMqavbGI0nbYGqV8UQR9qvuVwrPJ6Yxm3GIUIOA/JRqNY8eZR8USwMP/kzsqlfVEixj9+Y75VQ==} + dependencies: + vega-canvas: 1.2.6 + vega-dataflow: 5.7.4 + vega-scenegraph: 4.10.1 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-lite@5.12.0(vega@5.22.1): + resolution: {integrity: sha512-mc0vavCp77B7wJwfVbNrJTmFuAUAornyi1nTw7UjBOFlCY6S4FKpUH9OToYC1o8rzv8HZ+QAFjz6MzmxIWfH7A==} + engines: {node: '>=16'} + hasBin: true + peerDependencies: + vega: ^5.24.0 + dependencies: + '@types/clone': 2.1.1 + clone: 2.1.2 + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-stringify-pretty-compact: 3.0.0 + tslib: 2.5.3 + vega: 5.22.1 + vega-event-selector: 3.0.1 + vega-expression: 5.1.0 + vega-util: 1.17.2 + yargs: 17.7.2 + dev: false + + /vega-loader@4.5.0: + resolution: {integrity: sha512-EkAyzbx0pCYxH3v3wghGVCaKINWxHfgbQ2pYDiYv0yo8e04S8Mv/IlRGTt6BAe7cLhrk1WZ4zh20QOppnGG05w==} + dependencies: + d3-dsv: 3.0.1 + node-fetch: 2.6.7 + topojson-client: 3.1.0 + vega-format: 1.1.0 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-parser@6.1.4: + resolution: {integrity: sha512-tORdpWXiH/kkXcpNdbSVEvtaxBuuDtgYp9rBunVW9oLsjFvFXbSWlM1wvJ9ZFSaTfx6CqyTyGMiJemmr1QnTjQ==} + dependencies: + vega-dataflow: 5.7.4 + vega-event-selector: 3.0.0 + vega-functions: 5.13.0 + vega-scale: 7.2.0 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-projection@1.5.0: + resolution: {integrity: sha512-aob7qojh555x3hQWZ/tr8cIJNSWQbm6EoWTJaheZgFOY2x3cDa4Qrg3RJbGw6KwVj/IQk2p40paRzixKZ2kr+A==} + dependencies: + d3-geo: 3.0.1 + d3-geo-projection: 4.0.0 + dev: false + + /vega-regression@1.1.0: + resolution: {integrity: sha512-09K0RemY6cdaXBAyakDUNFfEkRcLkGjkDJyWQPAUqGK59hV2J+G3i4uxkZp18Vu0t8oqU7CgzwWim1s5uEpOcA==} + dependencies: + d3-array: 3.1.1 + vega-dataflow: 5.7.4 + vega-statistics: 1.8.0 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-runtime@6.1.3: + resolution: {integrity: sha512-gE+sO2IfxMUpV0RkFeQVnHdmPy3K7LjHakISZgUGsDI/ZFs9y+HhBf8KTGSL5pcZPtQsZh3GBQ0UonqL1mp9PA==} + dependencies: + vega-dataflow: 5.7.4 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-scale@7.2.0: + resolution: {integrity: sha512-QYltO/otrZHLrCGGf06Y99XtPtqWXITr6rw7rO9oL+l3d9o5RFl9sjHrVxiM7v+vGoZVWbBd5IPbFhPsXZ6+TA==} + dependencies: + d3-array: 3.1.1 + d3-interpolate: 3.0.1 + d3-scale: 4.0.2 + vega-time: 2.1.0 + vega-util: 1.17.2 + dev: false + + /vega-scenegraph@4.10.1: + resolution: {integrity: sha512-takIpkmNxYHhJYALOYzhTin3EDzbys6U4g+l1yJZVlXG9YTdiCMuEVAdtaQOCqF9/7qytD6pCrMxJY2HaoN0qQ==} + dependencies: + d3-path: 3.0.1 + d3-shape: 3.1.0 + vega-canvas: 1.2.6 + vega-loader: 4.5.0 + vega-scale: 7.2.0 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-schema-url-parser@2.2.0: + resolution: {integrity: sha512-yAtdBnfYOhECv9YC70H2gEiqfIbVkq09aaE4y/9V/ovEFmH9gPKaEgzIZqgT7PSPQjKhsNkb6jk6XvSoboxOBw==} + dev: false + + /vega-selections@5.4.0: + resolution: {integrity: sha512-Un3JdLDPjIpF9Dh4sw6m1c/QAcfam6m1YXHJ9vJxE/GdJ+sOrPxc7bcEU8VhOmTUN7IQUn4/1ry4JqqOVMbEhw==} + dependencies: + d3-array: 3.1.1 + vega-expression: 5.0.0 + vega-util: 1.17.2 + dev: false + + /vega-statistics@1.8.0: + resolution: {integrity: sha512-dl+LCRS6qS4jWDme/NEdPVt5r649uB4IK6Kyr2/czmGA5JqjuFmtQ9lHQOnRu8945XLkqLf+JIQQo7vnw+nslA==} + dependencies: + d3-array: 3.1.1 + dev: false + + /vega-themes@2.13.0(vega-lite@5.12.0)(vega@5.22.1): + resolution: {integrity: sha512-SVr/YDqGhkVDO2bRS62TeGyr1dVuXaNLJNCu42b1tbcnnmX2m9cyaq8G6gcputPeibArvHT1MsTF7MUzboOIWg==} + peerDependencies: + vega: '*' + vega-lite: '*' + dependencies: + vega: 5.22.1 + vega-lite: 5.12.0(vega@5.22.1) + dev: false + + /vega-time@2.1.0: + resolution: {integrity: sha512-Q9/l3S6Br1RPX5HZvyLD/cQ4K6K8DtpR09/1y7D66gxNorg2+HGzYZINH9nUvN3mxoXcBWg4cCUh3+JvmkDaEg==} + dependencies: + d3-array: 3.1.1 + d3-time: 3.0.0 + vega-util: 1.17.2 + dev: false + + /vega-tooltip@0.32.0: + resolution: {integrity: sha512-Sc4/vZsXDM9nOiHrxc8hfpc9lYc7Nr0FIYYkIi90v2d6IoE6thm6T4Exo2m7cMK4rwevwf6c4/FABwjOMIs4MQ==} + dependencies: + vega-util: 1.17.2 + dev: false + + /vega-transforms@4.10.0: + resolution: {integrity: sha512-Yk6ByzVq5F2niFfPlSsrU5wi+NZhsF7IBpJCcTfms4U7eoyNepUXagdFEJ3VWBD/Lit6GorLXFgO17NYcyS5gg==} + dependencies: + d3-array: 3.1.1 + vega-dataflow: 5.7.4 + vega-statistics: 1.8.0 + vega-time: 2.1.0 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-typings@0.22.3: + resolution: {integrity: sha512-PREcya3nXT9Tk7xU0IhEpOLVTlqizNtKXV55NhI6ApBjJtqVYbJL7IBh2ckKxGBy3YeUQ37BQZl56UqqiYVWBw==} + dependencies: + vega-event-selector: 3.0.0 + vega-expression: 5.0.0 + vega-util: 1.17.2 + dev: false + + /vega-util@1.17.0: + resolution: {integrity: sha512-HTaydZd9De3yf+8jH66zL4dXJ1d1p5OIFyoBzFiOli4IJbwkL1jrefCKz6AHDm1kYBzDJ0X4bN+CzZSCTvNk1w==} + dev: false + + /vega-util@1.17.2: + resolution: {integrity: sha512-omNmGiZBdjm/jnHjZlywyYqafscDdHaELHx1q96n5UOz/FlO9JO99P4B3jZg391EFG8dqhWjQilSf2JH6F1mIw==} + dev: false + + /vega-view-transforms@4.5.8: + resolution: {integrity: sha512-966m7zbzvItBL8rwmF2nKG14rBp7q+3sLCKWeMSUrxoG+M15Smg5gWEGgwTG3A/RwzrZ7rDX5M1sRaAngRH25g==} + dependencies: + vega-dataflow: 5.7.4 + vega-scenegraph: 4.10.1 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-view@5.11.0: + resolution: {integrity: sha512-MI9NTRFmtFX6ADk6KOHhi8bhHjC9pPm42Bj2+74c6l1d3NQZf9Jv7lkiGqKohdkQDNH9LPwz/6slhKwPU9JdkQ==} + dependencies: + d3-array: 3.1.1 + d3-timer: 3.0.1 + vega-dataflow: 5.7.4 + vega-format: 1.1.0 + vega-functions: 5.13.0 + vega-runtime: 6.1.3 + vega-scenegraph: 4.10.1 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-voronoi@4.2.0: + resolution: {integrity: sha512-1iuNAVZgUHRlBpdq4gSga3KlQmrgFfwy+KpyDgPLQ8HbLkhcVeT7RDh2L6naluqD7Op0xVLms3clR920WsYryQ==} + dependencies: + d3-delaunay: 6.0.2 + vega-dataflow: 5.7.4 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega-wordcloud@4.1.3: + resolution: {integrity: sha512-is4zYn9FMAyp9T4SAcz2P/U/wqc0Lx3P5YtpWKCbOH02a05vHjUQrQ2TTPOuvmMfAEDCSKvbMSQIJMOE018lJA==} + dependencies: + vega-canvas: 1.2.6 + vega-dataflow: 5.7.4 + vega-scale: 7.2.0 + vega-statistics: 1.8.0 + vega-util: 1.17.2 + transitivePeerDependencies: + - encoding + dev: false + + /vega@5.22.1: + resolution: {integrity: sha512-KJBI7OWSzpfCPbmWl3GQCqBqbf2TIdpWS0mzO6MmWbvdMhWHf74P9IVnx1B1mhg0ZTqWFualx9ZYhWzMMwudaQ==} + dependencies: + vega-crossfilter: 4.1.0 + vega-dataflow: 5.7.4 + vega-encode: 4.9.0 + vega-event-selector: 3.0.0 + vega-expression: 5.0.0 + vega-force: 4.1.0 + vega-format: 1.1.0 + vega-functions: 5.13.0 + vega-geo: 4.4.0 + vega-hierarchy: 4.1.0 + vega-label: 1.2.0 + vega-loader: 4.5.0 + vega-parser: 6.1.4 + vega-projection: 1.5.0 + vega-regression: 1.1.0 + vega-runtime: 6.1.3 + vega-scale: 7.2.0 + vega-scenegraph: 4.10.1 + vega-statistics: 1.8.0 + vega-time: 2.1.0 + vega-transforms: 4.10.0 + vega-typings: 0.22.3 + vega-util: 1.17.0 + vega-view: 5.11.0 + vega-view-transforms: 4.5.8 + vega-voronoi: 4.2.0 + vega-wordcloud: 4.1.3 + transitivePeerDependencies: + - encoding + dev: false + + /vfile-message@2.0.4: + resolution: {integrity: sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==} + dependencies: + '@types/unist': 2.0.6 + unist-util-stringify-position: 2.0.3 + dev: false + + /vite-node@0.34.0(@types/node@20.3.2)(less@4.1.3): + resolution: {integrity: sha512-rGZMvpb052rjUwJA/a17xMfOibzNF7byMdRSTcN2Lw8uxX08s5EfjWW5mBkm3MSFTPctMSVtT2yC+8ShrZbT5g==} + engines: {node: '>=v14.18.0'} + hasBin: true + dependencies: + cac: 6.7.14 + debug: 4.3.4 + mlly: 1.4.0 + pathe: 1.1.1 + picocolors: 1.0.0 + vite: 4.3.9(@types/node@20.3.2)(less@4.1.3) + transitivePeerDependencies: + - '@types/node' + - less + - sass + - stylus + - sugarss + - supports-color + - terser + dev: false + + /vite@4.3.5(@types/node@20.3.1)(less@4.1.3): + resolution: {integrity: sha512-0gEnL9wiRFxgz40o/i/eTBwm+NEbpUeTWhzKrZDSdKm6nplj+z4lKz8ANDgildxHm47Vg8EUia0aicKbawUVVA==} + engines: {node: ^14.18.0 || >=16.0.0} + hasBin: true + peerDependencies: + '@types/node': '>= 14' + less: '*' + sass: '*' + stylus: '*' + sugarss: '*' + terser: ^5.4.0 + peerDependenciesMeta: + '@types/node': + optional: true + less: + optional: true + sass: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + dependencies: + '@types/node': 20.3.1 + esbuild: 0.17.14 + less: 4.1.3 + postcss: 8.4.23 + rollup: 3.21.6 + optionalDependencies: + fsevents: 2.3.2 + dev: true + + /vite@4.3.9(@types/node@20.3.1)(less@4.1.3): + resolution: {integrity: sha512-qsTNZjO9NoJNW7KnOrgYwczm0WctJ8m/yqYAMAK9Lxt4SoySUfS5S8ia9K7JHpa3KEeMfyF8LoJ3c5NeBJy6pg==} + engines: {node: ^14.18.0 || >=16.0.0} + hasBin: true + peerDependencies: + '@types/node': '>= 14' + less: '*' + sass: '*' + stylus: '*' + sugarss: '*' + terser: ^5.4.0 + peerDependenciesMeta: + '@types/node': + optional: true + less: + optional: true + sass: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + dependencies: + '@types/node': 20.3.1 + esbuild: 0.17.14 + less: 4.1.3 + postcss: 8.4.23 + rollup: 3.21.6 + optionalDependencies: + fsevents: 2.3.2 + + /vite@4.3.9(@types/node@20.3.2)(less@4.1.3): + resolution: {integrity: sha512-qsTNZjO9NoJNW7KnOrgYwczm0WctJ8m/yqYAMAK9Lxt4SoySUfS5S8ia9K7JHpa3KEeMfyF8LoJ3c5NeBJy6pg==} + engines: {node: ^14.18.0 || >=16.0.0} + hasBin: true + peerDependencies: + '@types/node': '>= 14' + less: '*' + sass: '*' + stylus: '*' + sugarss: '*' + terser: ^5.4.0 + peerDependenciesMeta: + '@types/node': + optional: true + less: + optional: true + sass: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + dependencies: + '@types/node': 20.3.2 + esbuild: 0.17.14 + less: 4.1.3 + postcss: 8.4.23 + rollup: 3.21.6 + optionalDependencies: + fsevents: 2.3.2 + + /vitefu@0.2.4(vite@4.3.5): + resolution: {integrity: sha512-fanAXjSaf9xXtOOeno8wZXIhgia+CZury481LsDaV++lSvcU2R9Ch2bPh3PYFyoHW+w9LqAeYRISVQjUIew14g==} + peerDependencies: + vite: ^3.0.0 || ^4.0.0 + peerDependenciesMeta: + vite: + optional: true + dependencies: + vite: 4.3.5(@types/node@20.3.1)(less@4.1.3) + dev: true + + /vitefu@0.2.4(vite@4.3.9): + resolution: {integrity: sha512-fanAXjSaf9xXtOOeno8wZXIhgia+CZury481LsDaV++lSvcU2R9Ch2bPh3PYFyoHW+w9LqAeYRISVQjUIew14g==} + peerDependencies: + vite: ^3.0.0 || ^4.0.0 + peerDependenciesMeta: + vite: + optional: true + dependencies: + vite: 4.3.9(@types/node@20.3.1)(less@4.1.3) + + /vitest@0.34.0(jsdom@22.1.0)(less@4.1.3): + resolution: {integrity: sha512-8Pnc1fVt1P6uBncdUZ++hgiJGgxIRKuz4bmS/PQziaEcUj0D1g9cGiR1MbLrcsvFTC6fgrqDhYoTAdBG356WMA==} + engines: {node: '>=v14.18.0'} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@vitest/browser': '*' + '@vitest/ui': '*' + happy-dom: '*' + jsdom: '*' + playwright: '*' + safaridriver: '*' + webdriverio: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@vitest/browser': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + playwright: + optional: true + safaridriver: + optional: true + webdriverio: + optional: true + dependencies: + '@types/chai': 4.3.5 + '@types/chai-subset': 1.3.3 + '@types/node': 20.3.2 + '@vitest/expect': 0.34.0 + '@vitest/runner': 0.34.0 + '@vitest/snapshot': 0.34.0 + '@vitest/spy': 0.34.0 + '@vitest/utils': 0.34.0 + acorn: 8.10.0 + acorn-walk: 8.2.0 + cac: 6.7.14 + chai: 4.3.7 + debug: 4.3.4 + jsdom: 22.1.0 + local-pkg: 0.4.3 + magic-string: 0.30.1 + pathe: 1.1.1 + picocolors: 1.0.0 + std-env: 3.3.3 + strip-literal: 1.0.1 + tinybench: 2.5.0 + tinypool: 0.7.0 + vite: 4.3.9(@types/node@20.3.2)(less@4.1.3) + vite-node: 0.34.0(@types/node@20.3.2)(less@4.1.3) + why-is-node-running: 2.2.2 + transitivePeerDependencies: + - less + - sass + - stylus + - sugarss + - supports-color + - terser + dev: false + + /w3c-keyname@2.2.6: + resolution: {integrity: sha512-f+fciywl1SJEniZHD6H+kUO8gOnwIr7f4ijKA6+ZvJFjeGi1r4PDLl53Ayud9O/rk64RqgoQine0feoeOU0kXg==} + dev: false + + /w3c-xmlserializer@4.0.0: + resolution: {integrity: sha512-d+BFHzbiCx6zGfz0HyQ6Rg69w9k19nviJspaj4yNscGjrHu94sVP+aRm75yEbCh+r2/yR+7q6hux9LVtbuTGBw==} + engines: {node: '>=14'} + dependencies: + xml-name-validator: 4.0.0 + dev: false + + /walker@1.0.8: + resolution: {integrity: sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==} + dependencies: + makeerror: 1.0.12 + dev: true + + /watchpack@2.4.0: + resolution: {integrity: sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==} + engines: {node: '>=10.13.0'} + dependencies: + glob-to-regexp: 0.4.1 + graceful-fs: 4.2.9 + dev: true + + /wcwidth@1.0.1: + resolution: {integrity: sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==} + dependencies: + defaults: 1.0.4 + + /web-encoding@1.1.5: + resolution: {integrity: sha512-HYLeVCdJ0+lBYV2FvNZmv3HJ2Nt0QYXqZojk3d9FJOLkwnuhzM9tmamh8d7HPM8QqjKH8DeHkFTx+CFlWpZZDA==} + dependencies: + util: 0.12.5 + optionalDependencies: + '@zxing/text-encoding': 0.9.0 + dev: false + + /webidl-conversions@3.0.1: + resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} + + /webidl-conversions@7.0.0: + resolution: {integrity: sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==} + engines: {node: '>=12'} + dev: false + + /webpack-sources@3.2.3: + resolution: {integrity: sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==} + engines: {node: '>=10.13.0'} + dev: true + + /webpack-virtual-modules@0.4.6: + resolution: {integrity: sha512-5tyDlKLqPfMqjT3Q9TAqf2YqjwmnUleZwzJi1A5qXnlBCdj2AtOJ6wAWdglTIDOPgOiOrXeBeFcsQ8+aGQ6QbA==} + dev: true + + /webpack@5.88.1(esbuild@0.17.14): + resolution: {integrity: sha512-FROX3TxQnC/ox4N+3xQoWZzvGXSuscxR32rbzjpXgEzWudJFEJBpdlkkob2ylrv5yzzufD1zph1OoFsLtm6stQ==} + engines: {node: '>=10.13.0'} + hasBin: true + peerDependencies: + webpack-cli: '*' + peerDependenciesMeta: + webpack-cli: + optional: true + dependencies: + '@types/eslint-scope': 3.7.4 + '@types/estree': 1.0.0 + '@webassemblyjs/ast': 1.11.6 + '@webassemblyjs/wasm-edit': 1.11.6 + '@webassemblyjs/wasm-parser': 1.11.6 + acorn: 8.10.0 + acorn-import-assertions: 1.9.0(acorn@8.10.0) + browserslist: 4.21.9 + chrome-trace-event: 1.0.3 + enhanced-resolve: 5.15.0 + es-module-lexer: 1.3.0 + eslint-scope: 5.1.1 + events: 3.3.0 + glob-to-regexp: 0.4.1 + graceful-fs: 4.2.9 + json-parse-even-better-errors: 2.3.1 + loader-runner: 4.3.0 + mime-types: 2.1.34 + neo-async: 2.6.2 + schema-utils: 3.3.0 + tapable: 2.2.1 + terser-webpack-plugin: 5.3.9(esbuild@0.17.14)(webpack@5.88.1) + watchpack: 2.4.0 + webpack-sources: 3.2.3 + transitivePeerDependencies: + - '@swc/core' + - esbuild + - uglify-js + dev: true + + /whatwg-encoding@2.0.0: + resolution: {integrity: sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==} + engines: {node: '>=12'} + dependencies: + iconv-lite: 0.6.3 + dev: false + + /whatwg-mimetype@3.0.0: + resolution: {integrity: sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==} + engines: {node: '>=12'} + dev: false + + /whatwg-url@12.0.1: + resolution: {integrity: sha512-Ed/LrqB8EPlGxjS+TrsXcpUond1mhccS3pchLhzSgPCnTimUCKj3IZE75pAs5m6heB2U2TMerKFUXheyHY+VDQ==} + engines: {node: '>=14'} + dependencies: + tr46: 4.1.1 + webidl-conversions: 7.0.0 + dev: false + + /whatwg-url@5.0.0: + resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + dependencies: + tr46: 0.0.3 + webidl-conversions: 3.0.1 + + /which-boxed-primitive@1.0.2: + resolution: {integrity: sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==} + dependencies: + is-bigint: 1.0.4 + is-boolean-object: 1.1.2 + is-number-object: 1.0.6 + is-string: 1.0.7 + is-symbol: 1.0.4 + dev: false + + /which-module@2.0.0: + resolution: {integrity: sha512-B+enWhmw6cjfVC7kS8Pj9pCrKSc5txArRyaYGe088shv/FGWH+0Rjx/xPgtsWfsUtS27FkP697E4DDhgrgoc0Q==} + dev: false + + /which-pm@2.0.0: + resolution: {integrity: sha512-Lhs9Pmyph0p5n5Z3mVnN0yWcbQYUAD7rbQUiMsQxOJ3T57k7RFe35SUwWMf7dsbDZks1uOmw4AecB/JMDj3v/w==} + engines: {node: '>=8.15'} + dependencies: + load-yaml-file: 0.2.0 + path-exists: 4.0.0 + dev: false + + /which-typed-array@1.1.9: + resolution: {integrity: sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA==} + engines: {node: '>= 0.4'} + dependencies: + available-typed-arrays: 1.0.5 + call-bind: 1.0.2 + for-each: 0.3.3 + gopd: 1.0.1 + has-tostringtag: 1.0.0 + is-typed-array: 1.1.10 + + /which@1.3.1: + resolution: {integrity: sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==} + hasBin: true + dependencies: + isexe: 2.0.0 + dev: false + + /which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + dependencies: + isexe: 2.0.0 + + /why-is-node-running@2.2.2: + resolution: {integrity: sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==} + engines: {node: '>=8'} + hasBin: true + dependencies: + siginfo: 2.0.0 + stackback: 0.0.2 + dev: false + + /wide-align@1.1.5: + resolution: {integrity: sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==} + dependencies: + string-width: 4.2.3 + + /wikidata-lang@4.1.2: + resolution: {integrity: sha512-Nw+loOQQO02cVZQCAFCaE5IWdPakX9zXLr4Pjv0GA81OyI+TPWC6iLy/0cORL6YbYde19MbRVITIHiKRv1HmFA==} + dev: true + + /word-wrap@1.2.3: + resolution: {integrity: sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==} + engines: {node: '>=0.10.0'} + dev: true + + /wordwrap@1.0.0: + resolution: {integrity: sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==} + dev: true + + /worker-factory@6.0.69: + resolution: {integrity: sha512-vut3DexCAyRicCuvfUAhOAlt7s4segcDutnqAH/ybxbpYzDu4qLfkmpEzfinbGCkPffTzXq64XulaSdqVG3Ncw==} + dependencies: + '@babel/runtime': 7.22.6 + compilerr: 9.0.21 + fast-unique-numbers: 6.0.21 + tslib: 2.6.1 + dev: false + + /worker-factory@7.0.9: + resolution: {integrity: sha512-t5tPtTLXh663SzgCW/2oFSLtsKZYI3Oko/lcOgrakH2EZ8Qm9p0zea/ceGmiEwwVgVdqjg+xE8AboIOwY3A7iA==} + dependencies: + '@babel/runtime': 7.22.6 + compilerr: 11.0.8 + fast-unique-numbers: 8.0.7 + tslib: 2.6.1 + dev: false + + /wrap-ansi@6.2.0: + resolution: {integrity: sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==} + engines: {node: '>=8'} + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + dev: false + + /wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + dev: false + + /wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + /write-file-atomic@2.4.3: + resolution: {integrity: sha512-GaETH5wwsX+GcnzhPgKcKjJ6M2Cq3/iZp1WyY/X1CSqrW+jVNM9Y7D8EC2sM4ZG/V8wZlSniJnCKWPmBYAucRQ==} + dependencies: + graceful-fs: 4.2.9 + imurmurhash: 0.1.4 + signal-exit: 3.0.7 + dev: true + + /write-file-atomic@4.0.2: + resolution: {integrity: sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==} + engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} + dependencies: + imurmurhash: 0.1.4 + signal-exit: 3.0.7 + dev: true + + /ws@6.2.2: + resolution: {integrity: sha512-zmhltoSR8u1cnDsD43TX59mzoMZsLKqUweyYBAIvTngR3shc0W6aOZylZmq/7hqyVxPdi+5Ud2QInblgyE72fw==} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: ^5.0.2 + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + dependencies: + async-limiter: 1.0.1 + dev: true + + /ws@8.13.0(bufferutil@4.0.7): + resolution: {integrity: sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==} + engines: {node: '>=10.0.0'} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: '>=5.0.2' + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + dependencies: + bufferutil: 4.0.7 + + /xml-name-validator@4.0.0: + resolution: {integrity: sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==} + engines: {node: '>=12'} + dev: false + + /xmlchars@2.2.0: + resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==} + dev: false + + /xtend@4.0.2: + resolution: {integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==} + engines: {node: '>=0.4'} + + /y18n@4.0.3: + resolution: {integrity: sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==} + dev: false + + /y18n@5.0.8: + resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} + engines: {node: '>=10'} + dev: false + + /yallist@2.1.2: + resolution: {integrity: sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==} + dev: false + + /yallist@3.1.1: + resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} + + /yallist@4.0.0: + resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} + + /yaml@1.10.2: + resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==} + engines: {node: '>= 6'} + + /yargs-parser@18.1.3: + resolution: {integrity: sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==} + engines: {node: '>=6'} + dependencies: + camelcase: 5.3.1 + decamelize: 1.2.0 + dev: false + + /yargs-parser@21.1.1: + resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} + engines: {node: '>=12'} + dev: false + + /yargs@15.4.1: + resolution: {integrity: sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==} + engines: {node: '>=8'} + dependencies: + cliui: 6.0.0 + decamelize: 1.2.0 + find-up: 4.1.0 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + require-main-filename: 2.0.0 + set-blocking: 2.0.0 + string-width: 4.2.3 + which-module: 2.0.0 + y18n: 4.0.3 + yargs-parser: 18.1.3 + dev: false + + /yargs@17.7.1: + resolution: {integrity: sha512-cwiTb08Xuv5fqF4AovYacTFNxk62th7LKJ6BL9IGUpTJrWoU7/7WdQGTP2SjKf1dUNBGzDd28p/Yfs/GI6JrLw==} + engines: {node: '>=12'} + dependencies: + cliui: 8.0.1 + escalade: 3.1.1 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 21.1.1 + dev: false + + /yargs@17.7.2: + resolution: {integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==} + engines: {node: '>=12'} + dependencies: + cliui: 8.0.1 + escalade: 3.1.1 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 21.1.1 + dev: false + + /yauzl@2.10.0: + resolution: {integrity: sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==} + dependencies: + buffer-crc32: 0.2.13 + fd-slicer: 1.1.0 + dev: true + + /yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + + /yocto-queue@1.0.0: + resolution: {integrity: sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==} + engines: {node: '>=12.20'} + dev: false diff --git a/testbed/gradio-app__gradio/pnpm-workspace.yaml b/testbed/gradio-app__gradio/pnpm-workspace.yaml new file mode 100644 index 0000000000000000000000000000000000000000..930546311b1563b85135a6acd73f64638212c1da --- /dev/null +++ b/testbed/gradio-app__gradio/pnpm-workspace.yaml @@ -0,0 +1,5 @@ +packages: + - 'js/*' + - "gradio" + - "client/*" + - "client/*/*" \ No newline at end of file diff --git a/testbed/gradio-app__gradio/pyproject.toml b/testbed/gradio-app__gradio/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..34c43155ae36259f9d9429b4017cb5dfaacca674 --- /dev/null +++ b/testbed/gradio-app__gradio/pyproject.toml @@ -0,0 +1,124 @@ +[build-system] +requires = ["hatchling", "hatch-requirements-txt", "hatch-fancy-pypi-readme>=22.5.0"] +build-backend = "hatchling.build" + +[project] +name = "gradio" +dynamic = ["version", "dependencies", "optional-dependencies", "readme"] +description = "Python library for easily interacting with trained machine learning models" +license = "Apache-2.0" +requires-python = ">=3.8" +authors = [ + { name = "Abubakar Abid", email = "team@gradio.app" }, + { name = "Ali Abid", email = "team@gradio.app" }, + { name = "Ali Abdalla", email = "team@gradio.app" }, + { name = "Dawood Khan", email = "team@gradio.app" }, + { name = "Ahsen Khaliq", email = "team@gradio.app" }, + { name = "Pete Allen", email = "team@gradio.app" }, + { name = "Ömer Faruk Özdemir", email = "team@gradio.app" }, +] +keywords = ["machine learning", "reproducibility", "visualization"] + +classifiers = [ + 'Development Status :: 5 - Production/Stable', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Scientific/Engineering :: Visualization', +] + +[project.scripts] +gradio = "gradio.cli:cli" +upload_theme = "gradio.themes.upload_theme:main" + +[project.urls] +Homepage = "https://github.com/gradio-app/gradio" + +[tool.hatch.version] +path = "gradio/package.json" +pattern = ".*\"version\":\\s*\"(?P[^\"]+)\"" + +[tool.hatch.metadata.hooks.requirements_txt] +filename = "requirements.txt" + +[tool.hatch.metadata.hooks.requirements_txt.optional-dependencies] +oauth = ["requirements-oauth.txt"] + +[tool.hatch.metadata.hooks.fancy-pypi-readme] +content-type = "text/markdown" +fragments = [ + { path = "README.md" }, +] + +[[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] +pattern = "(website/homepage|readme_files)/" +replacement = 'https://raw.githubusercontent.com/gradio-app/gradio/main/\g<1>/' + +[[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] +pattern = 'demo/([\S]*.gif)' +replacement = 'https://raw.githubusercontent.com/gradio-app/gradio/main/demo/\g<1>' + +[tool.hatch.build] +artifacts = [ + "/gradio/templates", +] + + +[tool.hatch.build.targets.sdist] +include = [ + "/gradio", + "/test", + "/README.md", + "/requirements.txt", + "/requirements-oauth.txt", +] + +[tool.pyright] +include = ["gradio/**/*.py"] +exclude = ["gradio/themes/"] + +[tool.ruff] +target-version = "py37" +extend-select = [ + "B", + "C", + "I", + "N", + "SIM", + "UP", +] +ignore = [ + "C901", # function is too complex (TODO: un-ignore this) + "B023", # function definition in loop (TODO: un-ignore this) + "B008", # function call in argument defaults + "B017", # pytest.raises considered evil + "B028", # explicit stacklevel for warnings + "E501", # from scripts/lint_backend.sh + "SIM105", # contextlib.suppress (has a performance cost) + "SIM117", # multiple nested with blocks (doesn't look good with gr.Row etc) + "UP007", # use X | Y for type annotations (TODO: can be enabled once Pydantic plays nice with them) +] + +[tool.ruff.per-file-ignores] +"demo/*" = [ + "E402", # Demos may have imports not at the top + "E741", # Demos may have ambiguous variable names + "F405", # Demos may use star imports + "I", # Don't care about import order +] +"gradio/__init__.py" = [ + "F401", # "Imported but unused" (TODO: it would be better to be explicit and use __all__) +] +"gradio/routes.py" = [ + "UP006", # Pydantic on Python 3.7 requires old-style type annotations (TODO: drop when Python 3.7 is dropped) +] + +[tool.pytest.ini_options] +GRADIO_ANALYTICS_ENABLED = "False" \ No newline at end of file diff --git a/testbed/gradio-app__gradio/readme_template.md b/testbed/gradio-app__gradio/readme_template.md new file mode 100644 index 0000000000000000000000000000000000000000..439491d801c2ec0d86e3343ba9455958d6c0e318 --- /dev/null +++ b/testbed/gradio-app__gradio/readme_template.md @@ -0,0 +1,71 @@ +
+ +[gradio](https://gradio.app)
+Build & share delightful machine learning apps easily + +[![gradio-backend](https://github.com/gradio-app/gradio/actions/workflows/backend.yml/badge.svg)](https://github.com/gradio-app/gradio/actions/workflows/backend.yml) +[![gradio-ui](https://github.com/gradio-app/gradio/actions/workflows/ui.yml/badge.svg)](https://github.com/gradio-app/gradio/actions/workflows/ui.yml) + [![PyPI](https://img.shields.io/pypi/v/gradio)](https://pypi.org/project/gradio/) +[![PyPI downloads](https://img.shields.io/pypi/dm/gradio)](https://pypi.org/project/gradio/) +![Python version](https://img.shields.io/badge/python-3.8+-important) +[![Twitter follow](https://img.shields.io/twitter/follow/gradio?style=social&label=follow)](https://twitter.com/gradio) + +[Website](https://gradio.app) +| [Documentation](https://gradio.app/docs/) +| [Guides](https://gradio.app/guides/) +| [Getting Started](https://gradio.app/getting_started/) +| [Examples](demo/) +| [中文](readme_files/zh-cn#readme) + +
+ +# Gradio: Build Machine Learning Web Apps — in Python + +Gradio is an open-source Python library that is used to build machine learning and data science demos and web applications. + +With Gradio, you can quickly create a beautiful user interface around your machine learning models or data science workflow and let people "try it out" by dragging-and-dropping in their own images, +pasting text, recording their own voice, and interacting with your demo, all through the browser. + +![Interface montage](readme_files/header-image.jpg) + +Gradio is useful for: + +- **Demoing** your machine learning models for clients/collaborators/users/students. + +- **Deploying** your models quickly with automatic shareable links and getting feedback on model performance. + +- **Debugging** your model interactively during development using built-in manipulation and interpretation tools. + +$getting_started + +## Open Source Stack + +Gradio is built with many wonderful open-source libraries, please support them as well! + +[huggingface](https://huggingface.co) +[python](https://www.python.org) +[fastapi](https://fastapi.tiangolo.com) +[encode](https://www.encode.io) +[svelte](https://svelte.dev) +[vite](https://vitejs.dev) +[pnpm](https://pnpm.io) +[tailwind](https://tailwindcss.com) +[storybook](https://storybook.js.org/) +[chromatic](https://www.chromatic.com/) + +## License + +Gradio is licensed under the Apache License 2.0 found in the [LICENSE](LICENSE) file in the root directory of this repository. + +## Citation + +Also check out the paper _[Gradio: Hassle-Free Sharing and Testing of ML Models in the Wild](https://arxiv.org/abs/1906.02569), ICML HILL 2019_, and please cite it if you use Gradio in your work. + +``` +@article{abid2019gradio, + title = {Gradio: Hassle-Free Sharing and Testing of ML Models in the Wild}, + author = {Abid, Abubakar and Abdalla, Ali and Abid, Ali and Khan, Dawood and Alfozan, Abdulrahman and Zou, James}, + journal = {arXiv preprint arXiv:1906.02569}, + year = {2019}, +} +``` diff --git a/testbed/gradio-app__gradio/render_readme.py b/testbed/gradio-app__gradio/render_readme.py new file mode 100644 index 0000000000000000000000000000000000000000..1b681877e69d7e44401b21743eaa40873aea4226 --- /dev/null +++ b/testbed/gradio-app__gradio/render_readme.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +import re +from pathlib import Path + +README_TEMPLATE_FILEPATH = "readme_template.md" +GETTING_STARTED_TEMPLATE_FILEPATH = "guides/01_getting-started/01_quickstart.md" + +readme_template = Path(README_TEMPLATE_FILEPATH).read_text() +getting_started_template = Path(GETTING_STARTED_TEMPLATE_FILEPATH).read_text() + +# Extract all the code and demo tags from the getting started template +code_tags = re.findall(r"\$code_([^\s]+)", getting_started_template) +demo_tags = re.findall(r"\$demo_([^\s]+)", getting_started_template) +codes = {} +demos = {} + +for src in code_tags: + context = Path(f"demo/{src}/run.py").read_text() + # Replace the condition to run the demo directly with actual launch code + context = re.sub(r"if __name__(.*[\n$]*)*", "demo.launch()", context) + codes[src] = f"```python\n{context}\n```\n" # Convert to Markdown code block + +for src in demo_tags: + demos[src] = f"![`{src}` demo](demo/{src}/screenshot.gif)" + +# Replace the headers in the getting started template with a smaller header (e.g. H3 instead of H2) to +# make the README more readable and less cluttered. +getting_started_template = re.sub(r"^(#+)", r"#\1", getting_started_template, flags=re.MULTILINE) +readme_template = readme_template.replace("$getting_started", getting_started_template) + +# Now put the codes and the screenshots in the README template +readme_template = re.sub(r"\$code_([^\s]+)", lambda x: codes[x.group(1)], readme_template) +readme_template = re.sub(r"\$demo_([^\s]+)", lambda x: demos[x.group(1)], readme_template) + +# Save the README template to the actual README.md file (with a note about the editing) +EDITING_NOTE = ("") +Path("README.md").write_text(f"{EDITING_NOTE}\n\n{readme_template}") diff --git a/testbed/gradio-app__gradio/renovate.json b/testbed/gradio-app__gradio/renovate.json new file mode 100644 index 0000000000000000000000000000000000000000..d1d2b6ccec483919b4992f527f428edc6a52ae14 --- /dev/null +++ b/testbed/gradio-app__gradio/renovate.json @@ -0,0 +1,24 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "github>whitesource/merge-confidence:beta", + "config:base", + ":preserveSemverRanges", + "group:allNonMajor", + ":semanticCommitTypeAll(chore)" + ], + "ignorePaths": [ + "**/demo/**", + "**/requirements.txt", + "**/Dockerfile", + "**/website/**" + ], + "ignoreDeps": [ + "lazy-brush", + "babylonjs", + "babylonjs-loaders", + "postcss-load-config", + "postcss-nested", + "svelte" + ] +} diff --git a/testbed/gradio-app__gradio/requirements-oauth.txt b/testbed/gradio-app__gradio/requirements-oauth.txt new file mode 100644 index 0000000000000000000000000000000000000000..e6cb0c8bdc0ebac9cc0654a22c3110b57980c54e --- /dev/null +++ b/testbed/gradio-app__gradio/requirements-oauth.txt @@ -0,0 +1,2 @@ +authlib +itsdangerous # required for starlette SessionMiddleware \ No newline at end of file diff --git a/testbed/gradio-app__gradio/requirements.txt b/testbed/gradio-app__gradio/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ebe9505ba749148133f8da4cc0ce5c77d4cc22d7 --- /dev/null +++ b/testbed/gradio-app__gradio/requirements.txt @@ -0,0 +1,25 @@ +aiofiles>=22.0,<24.0 +altair>=4.2.0,<6.0 +fastapi +ffmpy +gradio_client==0.5.2 +httpx +huggingface_hub>=0.14.0 +importlib_resources>=1.3,<7.0 +Jinja2<4.0 +markupsafe~=2.0 +matplotlib~=3.0 +numpy~=1.0 +orjson~=3.0 +packaging +pandas>=1.0,<3.0 +pillow>=8.0,<11.0 +pydantic>=1.7.4,!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,<3.0.0 +python-multipart # required for fastapi forms +pydub +pyyaml>=5.0,<7.0 +requests~=2.0 +semantic_version~=2.0 +typing_extensions~=4.0 +uvicorn>=0.14.0 +websockets>=10.0,<12.0 \ No newline at end of file diff --git a/testbed/gradio-app__gradio/style.md b/testbed/gradio-app__gradio/style.md new file mode 100644 index 0000000000000000000000000000000000000000..37a54f3a372685dd5ff99648d90c00780619395e --- /dev/null +++ b/testbed/gradio-app__gradio/style.md @@ -0,0 +1,160 @@ +# component-styles + +## Textbox + +| name | type | description | +| ----------- | ------------------------------------ | ------------------------------ | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of text input | +| `border` | `bool` or `(bool, bool, bool, bool)` | borders of text input | +| `container` | `bool` | show or hide the container box | + +## Number + +| name | type | description | +| ----------- | ------------------------------------ | ------------------------------ | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of text input | +| `border` | `bool` or `(bool, bool, bool, bool)` | borders of text input | +| `container` | `bool` | show or hide the container box | + +## Slider + +| name | type | description | +| ----------- | ------ | ------------------------------ | +| `container` | `bool` | show or hide the container box | + +## Checkbox + +| name | type | description | +| ----------- | ------------------------------------ | ------------------------------ | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of checkbox | +| `border` | `bool` or `(bool, bool, bool, bool)` | borders of checkbox | +| `container` | `bool` | show or hide the container box | + +## Checkbox Group + +| name | type | description | +| ---------------- | ------------------------------------ | ----------------------------------------- | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of checkboxes | +| `container` | `bool` | show or hide the container box | +| `item_container` | `bool` | show or hide the checkbox container boxes | + +## Radio + +| name | type | description | +| ---------------- | ------ | -------------------------------------- | +| `container` | `bool` | show or hide the container box | +| `item_container` | `bool` | show or hide the radio container boxes | + +## Dropdown + +| name | type | description | +| ----------- | ------------------------------------ | ------------------------------ | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of input | +| `border` | `bool` or `(bool, bool, bool, bool)` | borders of input | +| `container` | `bool` | show or hide the container box | + +## Image + +| name | type | description | +| --------- | ------------------------------------ | ------------------- | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of main box | + +## Video + +| name | type | description | +| --------- | ------------------------------------ | ------------------- | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of main box | + +## Audio + +| name | type | description | +| --------- | ------------------------------------ | ------------------- | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of main box | + +## File + +| name | type | description | +| --------- | ------------------------------------ | ------------------- | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of main box | + +## Dataframe + +| name | type | description | +| --------- | ------------------------------------ | ------------------- | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of main box | + +## Timeseries + +| name | type | description | +| --------- | ------------------------------------ | ------------------- | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of main box | + +## Label + +| name | type | description | +| ----------- | ------ | ------------------------------ | +| `container` | `bool` | show or hide the container box | + +## HighlightedText + +| name | type | description | +| ----------- | ------------------------------------ | ------------------------------ | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of labels | +| `color_map` | `Dict[str, str]` | color map of labels and colors | +| `container` | `bool` | show or hide the container box | + +## JSON + +| name | type | description | +| ----------- | ------ | ------------------------------ | +| `container` | `bool` | show or hide the container box | + +## HTML + +Nothing + +## Gallery + +| name | type | description | +| ----------- | ----------------------------------------- | ----------------------------------- | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of images | +| `grid` | `int` or `(int, int, int, int, int, int)` | grid for images | +| `height` | `"auto"` | height of gallery (auto or default) | +| `container` | `bool` | show or hide the container box | + +## Chatbot + +| name | type | description | +| ----------- | ------------------------------------ | ------------------------------------------------ | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of chat bubbles | +| `color_map` | `Dict[str, str]` | color map of user and bot color for chat bubbles | + +## Model3D + +| name | type | description | +| --------- | ------------------------------------ | ------------------- | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of main box | + +## Plot + +Nothing (yet) + +## Markdown + +Nothing + +## Button + +| name | type | description | +| ------------ | ------------------------------------ | --------------------------------------- | +| `rounded` | `bool` or `(bool, bool, bool, bool)` | corners of button | +| `border` | `bool` or `(bool, bool, bool, bool)` | borders of button | +| `full_width` | `bool` | whether button expand to fill container | + +## Dataset + +Nothing + +## Variable + +Nothing diff --git a/testbed/gradio-app__gradio/sweep.yaml b/testbed/gradio-app__gradio/sweep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..31603692a35ad8fd94a23be50ed6dfaada3d3203 --- /dev/null +++ b/testbed/gradio-app__gradio/sweep.yaml @@ -0,0 +1,2 @@ +# Reference: https://github.com/sweepai/sweep/blob/main/sweep.yaml +branch: main diff --git a/testbed/gradio-app__gradio/test-strategy.md b/testbed/gradio-app__gradio/test-strategy.md new file mode 100644 index 0000000000000000000000000000000000000000..778ec5b190d8e38112e5428eb579235e28d77f31 --- /dev/null +++ b/testbed/gradio-app__gradio/test-strategy.md @@ -0,0 +1,110 @@ +# Test Strategy + +Very brief, mildly aspirational test strategy document. This isn't where we are but it is where we want to get to. + +This document does not detail how to setup an environment or how to run the tests locally nor does it contain any best practices that we try to follow when writing tests, that information exists in the [contributing guide](https://github.com/gradio-app/gradio/blob/main/CONTRIBUTING.md). + +## Objectives + +The purposes of all testing activities on Gradio fit one of the following objectives: + +1. Ensure that the Gradio library functions as we expect it to. +2. Enable the maintenance team to quickly identify both the presence and source of defects. +3. Prevent regressions, i.e. if we fix something it should stay fixed. +4. Improve the quality of the codebase in order to ease maintenance efforts. +5. Reduce the amount of manual testing required. + +## Scope + +Testing is always a tradeoff. We can't cover everything unless we want to spend all of our time writing and running tests. We should focus on a few keys areas. + +We should not focus on code coverage but on test coverage following the below criteria: + +- The documented Gradio API (that's the bit that users interact with via python) should be tested thoroughly. (1) +- Additional gradio elements that are both publicly available and used internally (such as the Python and JS client libraries) should be tested thoroughly. (1) +- Additional gradio elements that are publicly available should be tested as throughly as is reasonable (this could be things like demos/the gradio CLI/ other tooling). The importance of each individual component. and the appropriate investment of effort, needs to assessed on a case-by-case basis. (1) +- Element boundaries should be tested where there is reasonable cause to do so (e.g. config generation) (1) +- Implementation details should only be tested where there is sufficient complexity to warrant it. (1) +- Bug fixes should be accompanied by tests wherever is reasonably possible. (3) + +## Types of testing + +Our tests will broadly fall into one of three categories: + +- Static Quality checks +- Dynamic 'Code' tests +- Dynamic Functional tests + +### Static Quality checks + +Static quality check are generally very fast to run and do not require building the code base. They also provide the least value. These tests would be things like linting, typechecking, and formatting. + +While they offer little in terms of testing functionality they align very closely with objective (4, 5) as they generally help to keep the codebase in good shape and offer very fast feedback. Such check are almost free from an authoring point of view as fixes can be mostly automated (either via scripts or editor integrations). + +### Dynamic code tests + +These tests generally test either isolated pieces of code or test the relationship between parts of the code base. They sometimes test functionality or give indications of working functionality but never offer enough confidence to rely on them solely. + +These test are usually either unit or integration tests. They are generally pretty quick to write (especially unit tests) and run and offer a moderate amount of confidence. They align closely with Objectives 2 and 3 and a little bit of 1. + +This kind of tests should probably make up the bulk of our handwritten tests. + +### Dynamic functional tests + +These tests give by far the most confidence as they are testing only the functionality of the software and do so by running the entire software itself, exactly as a user would. + +This aligns very closely with objective 1 but significantly impacts objective 5, as these tests are costly to both write and run. Despite the value, due to the downside we should try to get as much out of other tests types as we can, reserving functional testing for complex use cases and end-to-end journey. + +Tests in this category could be browser-based end-to-end tests, accessibility tests, or performance tests. They are sometimes called acceptance tests. + +## Testing tools + +We currently use the following tools: + +### Static quality checks + +- Python type-checking (python) +- Black linting (python) +- ruff formatting (python) +- prettier formatting (javascript/svelte) +- TypeScript type-checking (javascript/svelte) +- eslint linting (javascript/svelte) [in progress] + +### Dynamic code tests + +- pytest (python unit and integration tests) +- vitest (node-based unit and integration tests) +- playwright (browser-based unit and integration tests) + +### Functional/acceptance tests + +- playwright (full end to end testing) +- chromatic (visual testing) [in progress] +- Accessibility testing [to do] + +## Supported environments and versions + +All operating systems refer to the current runner variants supported by GitHub actions. + +All unspecified version segments (`x`) refer to latest. + +| Software | Version(s) | Operating System(s) | +| -------- | --------------------- | --------------------------------- | +| Python | `3.8.x` | `ubuntu-latest`, `windows-latest` | +| Node | `18.x.x` | `ubuntu-latest` | +| Browser | `playwright-chrome-x` | `ubuntu-latest` | + +## Test execution + +Tests need to be executed in a number of environments and at different stages on the development cycle in orsder to be useful. The requirements for tests are as follows: + +- **Locally**: it is important that developers can easily run most tests locally to ensure a passing suite before making a PR. There are some exceptions to this, certain tests may require access to secret values which we cannot make available to all possible contributors for practical security reasons. It is reasonable that it isn't possible to run these tests but they should be disabled by default when running locally. +- **CI** - It is _critical_ that all tests run successfully in CI with no exceptions. Not every tests is required to pass to satisfy CI checks for practical reasons but it is required that all tests should run in CI and notify us if something unexpected happens in order for the development team to take appropriate action. + +For instructions on how to write and run tests see the [contributing guide](https://github.com/gradio-app/gradio/blob/main/CONTRIBUTING.md). + +## Managing defects + +As we formalise our testing strategy and bring / keep our test up to standard, it is important that we have some principles on managing defects as they ocurr/ are reported. For now we can have one very simple rule: + +- Every bug fix should be accompanied by a test that failed before the fix and passes afterwards. This test should _typically_ be a dynamic code test but it could be a linting rule or new type if that is appropriate. There are always exceptions but we should think very carefully before ignoring this rule. diff --git a/testbed/graphql-python__graphene/.coveragerc b/testbed/graphql-python__graphene/.coveragerc new file mode 100644 index 0000000000000000000000000000000000000000..95760a9a32d7e34b32824dd8d2951c6696c42691 --- /dev/null +++ b/testbed/graphql-python__graphene/.coveragerc @@ -0,0 +1,2 @@ +[run] +omit = graphene/pyutils/*,*/tests/* diff --git a/testbed/graphql-python__graphene/.editorconfig b/testbed/graphql-python__graphene/.editorconfig new file mode 100644 index 0000000000000000000000000000000000000000..568b3971713b75ebe08cde0a0c7cb26e87f5765e --- /dev/null +++ b/testbed/graphql-python__graphene/.editorconfig @@ -0,0 +1,13 @@ +# http://editorconfig.org + +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[*.{py,rst,ini}] +indent_style = space +indent_size = 4 diff --git a/testbed/graphql-python__graphene/.github/ISSUE_TEMPLATE/config.yml b/testbed/graphql-python__graphene/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..3ba13e0cec6cbbfd462e9ebf529dd2093148cd69 --- /dev/null +++ b/testbed/graphql-python__graphene/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: false diff --git a/testbed/graphql-python__graphene/.github/ISSUE_TEMPLATE/feature_request.md b/testbed/graphql-python__graphene/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000000000000000000000000000000000..c0d97a697b03b8c8a541635219aa149e41bbd078 --- /dev/null +++ b/testbed/graphql-python__graphene/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: "✨ enhancement" +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/testbed/graphql-python__graphene/.github/stale.yml b/testbed/graphql-python__graphene/.github/stale.yml new file mode 100644 index 0000000000000000000000000000000000000000..322a3edad2beb1694b1e70a73d8676d07a9432db --- /dev/null +++ b/testbed/graphql-python__graphene/.github/stale.yml @@ -0,0 +1,24 @@ +# Number of days of inactivity before an issue becomes stale +daysUntilStale: false +# Number of days of inactivity before a stale issue is closed +daysUntilClose: false +# Issues with these labels will never be considered stale +exemptLabels: + - pinned + - security + - 🐛 bug + - 📖 documentation + - 🙋 help wanted + - ✨ enhancement + - good first issue + - work in progress +# Label to use when marking an issue as stale +staleLabel: wontfix +# Comment to post when marking an issue as stale. Set to `false` to disable +markComment: false +# markComment: > + # This issue has been automatically marked as stale because it has not had + # recent activity. It will be closed if no further activity occurs. Thank you + # for your contributions. +# Comment to post when closing a stale issue. Set to `false` to disable +closeComment: false diff --git a/testbed/graphql-python__graphene/.github/workflows/deploy.yml b/testbed/graphql-python__graphene/.github/workflows/deploy.yml new file mode 100644 index 0000000000000000000000000000000000000000..6cce61d5c0c04cfb521fa2051de560c8fb733feb --- /dev/null +++ b/testbed/graphql-python__graphene/.github/workflows/deploy.yml @@ -0,0 +1,26 @@ +name: 🚀 Deploy to PyPI + +on: + push: + tags: + - 'v*' + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v4 + with: + python-version: "3.10" + - name: Build wheel and source tarball + run: | + pip install wheel + python setup.py sdist bdist_wheel + - name: Publish a Python distribution to PyPI + uses: pypa/gh-action-pypi-publish@v1.1.0 + with: + user: __token__ + password: ${{ secrets.pypi_password }} diff --git a/testbed/graphql-python__graphene/.github/workflows/lint.yml b/testbed/graphql-python__graphene/.github/workflows/lint.yml new file mode 100644 index 0000000000000000000000000000000000000000..ad5bea6ad54c1639fe640abf2c8cc44fac70c4bb --- /dev/null +++ b/testbed/graphql-python__graphene/.github/workflows/lint.yml @@ -0,0 +1,26 @@ +name: 💅 Lint + +on: [push, pull_request] + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v4 + with: + python-version: "3.10" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install tox + - name: Run lint + run: tox + env: + TOXENV: pre-commit + - name: Run mypy + run: tox + env: + TOXENV: mypy diff --git a/testbed/graphql-python__graphene/.github/workflows/tests.yml b/testbed/graphql-python__graphene/.github/workflows/tests.yml new file mode 100644 index 0000000000000000000000000000000000000000..6635a35bd4ddb4deea4e52e1b079d80ddae092cd --- /dev/null +++ b/testbed/graphql-python__graphene/.github/workflows/tests.yml @@ -0,0 +1,66 @@ +name: 📄 Tests +on: + push: + branches: + - master + - '*.x' + paths-ignore: + - 'docs/**' + - '*.md' + - '*.rst' + pull_request: + branches: + - master + - '*.x' + paths-ignore: + - 'docs/**' + - '*.md' + - '*.rst' +jobs: + tests: + # runs the test suite + name: ${{ matrix.name }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - {name: '3.11', python: '3.11-dev', os: ubuntu-latest, tox: py311} + - {name: '3.10', python: '3.10', os: ubuntu-latest, tox: py310} + - {name: '3.9', python: '3.9', os: ubuntu-latest, tox: py39} + - {name: '3.8', python: '3.8', os: ubuntu-latest, tox: py38} + - {name: '3.7', python: '3.7', os: ubuntu-latest, tox: py37} + - {name: '3.6', python: '3.6', os: ubuntu-20.04, tox: py36} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python }} + + - name: update pip + run: | + python -m pip install --upgrade pip + pip install --upgrade setuptools wheel + + - name: get pip cache dir + id: pip-cache + run: echo "::set-output name=dir::$(pip cache dir)" + + - name: cache pip dependencies + uses: actions/cache@v3 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: pip|${{ runner.os }}|${{ matrix.python }}|${{ hashFiles('setup.py') }} + + - run: pip install tox + - run: tox -e ${{ matrix.tox }} + - name: Upload coverage.xml + if: ${{ matrix.python == '3.10' }} + uses: actions/upload-artifact@v3 + with: + name: graphene-coverage + path: coverage.xml + if-no-files-found: error + - name: Upload coverage.xml to codecov + if: ${{ matrix.python == '3.10' }} + uses: codecov/codecov-action@v3 diff --git a/testbed/graphql-python__graphene/.gitignore b/testbed/graphql-python__graphene/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..9148845fa94b211582c1ce567326d436ffa9d229 --- /dev/null +++ b/testbed/graphql-python__graphene/.gitignore @@ -0,0 +1,92 @@ +# Created by https://www.gitignore.io + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +.pytest_cache +nosetests.xml +coverage.xml +*.cover +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# VirtualEnv +.env +.venv +env/ +venv/ + +# Typing +.mypy_cache/ + +/tests/django.sqlite + +/graphene/index.json +/graphene/meta.json + +/meta.json +/index.json + +/docs/playground/graphene-js/pypyjs-release-nojit/ +/docs/static/playground/lib + +/docs/static/playground + +# PyCharm +.idea +*.iml + +# Databases +*.sqlite3 +.vscode +.mypy_cache diff --git a/testbed/graphql-python__graphene/.isort.cfg b/testbed/graphql-python__graphene/.isort.cfg new file mode 100644 index 0000000000000000000000000000000000000000..76c6f842ff345751c47d807de576f65694279a94 --- /dev/null +++ b/testbed/graphql-python__graphene/.isort.cfg @@ -0,0 +1,2 @@ +[settings] +known_third_party = aniso8601,graphql,graphql_relay,promise,pytest,pytz,pyutils,setuptools,snapshottest,sphinx_graphene_theme diff --git a/testbed/graphql-python__graphene/.pre-commit-config.yaml b/testbed/graphql-python__graphene/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eece56e04d68a5e903f80096a67b1acb5b327518 --- /dev/null +++ b/testbed/graphql-python__graphene/.pre-commit-config.yaml @@ -0,0 +1,30 @@ +default_language_version: + python: python3.10 + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: check-merge-conflict + - id: check-json + - id: check-yaml + - id: debug-statements + - id: end-of-file-fixer + exclude: ^docs/.*$ + - id: pretty-format-json + args: + - --autofix + - id: trailing-whitespace + exclude: README.md +- repo: https://github.com/asottile/pyupgrade + rev: v2.37.3 + hooks: + - id: pyupgrade +- repo: https://github.com/psf/black + rev: 22.6.0 + hooks: + - id: black +- repo: https://github.com/PyCQA/flake8 + rev: 5.0.4 + hooks: + - id: flake8 diff --git a/testbed/graphql-python__graphene/LICENSE b/testbed/graphql-python__graphene/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..ecf46fbaef4ff467eaa00f9304a6f00a1c9559a3 --- /dev/null +++ b/testbed/graphql-python__graphene/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015-Present Syrus Akbary + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/testbed/graphql-python__graphene/MANIFEST.in b/testbed/graphql-python__graphene/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..9f92821d565f8720f71b7ccd63863b3d36c35f61 --- /dev/null +++ b/testbed/graphql-python__graphene/MANIFEST.in @@ -0,0 +1,5 @@ +global-exclude tests/* +recursive-exclude tests * +recursive-exclude tests_py35 * +recursive-exclude examples * +include LICENSE diff --git a/testbed/graphql-python__graphene/Makefile b/testbed/graphql-python__graphene/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..08947707370424db19a5edc6c208475787f8c454 --- /dev/null +++ b/testbed/graphql-python__graphene/Makefile @@ -0,0 +1,28 @@ +.PHONY: help +help: + @echo "Please use \`make ' where is one of" + @grep -E '^\.PHONY: [a-zA-Z_-]+ .*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = "(: |##)"}; {printf "\033[36m%-30s\033[0m %s\n", $$2, $$3}' + +.PHONY: install-dev ## Install development dependencies +install-dev: + pip install -e ".[dev]" + +.PHONY: test ## Run tests +test: + py.test graphene examples + +.PHONY: docs ## Generate docs +docs: install-dev + cd docs && make install && make html + +.PHONY: docs-live ## Generate docs with live reloading +docs-live: install-dev + cd docs && make install && make livehtml + +.PHONY: format +format: + black graphene examples setup.py + +.PHONY: lint +lint: + flake8 graphene examples setup.py diff --git a/testbed/graphql-python__graphene/README.md b/testbed/graphql-python__graphene/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3ba0737d04b369195eaf048fc2be640ed4d484bd --- /dev/null +++ b/testbed/graphql-python__graphene/README.md @@ -0,0 +1,130 @@ +# ![Graphene Logo](http://graphene-python.org/favicon.png) [Graphene](http://graphene-python.org) [![Build Status](https://travis-ci.org/graphql-python/graphene.svg?branch=master)](https://travis-ci.org/graphql-python/graphene) [![PyPI version](https://badge.fury.io/py/graphene.svg)](https://badge.fury.io/py/graphene) [![Coverage Status](https://coveralls.io/repos/graphql-python/graphene/badge.svg?branch=master&service=github)](https://coveralls.io/github/graphql-python/graphene?branch=master) [![](https://dcbadge.vercel.app/api/server/T6Gp6NFYHe?style=flat)](https://discord.gg/T6Gp6NFYHe) + +[💬 Join the community on Discord](https://discord.gg/T6Gp6NFYHe) + +**We are looking for contributors**! Please check the current issues to see how you can help ❤️ + +## Introduction + +[Graphene](http://graphene-python.org) is an opinionated Python library for building GraphQL schemas/types fast and easily. + +- **Easy to use:** Graphene helps you use GraphQL in Python without effort. +- **Relay:** Graphene has builtin support for Relay. +- **Data agnostic:** Graphene supports any kind of data source: SQL (Django, SQLAlchemy), Mongo, custom Python objects, etc. + We believe that by providing a complete API you could plug Graphene anywhere your data lives and make your data available + through GraphQL. + +## Integrations + +Graphene has multiple integrations with different frameworks: + +| integration | Package | +| ----------------- | --------------------------------------------------------------------------------------- | +| SQLAlchemy | [graphene-sqlalchemy](https://github.com/graphql-python/graphene-sqlalchemy/) | +| Mongo | [graphene-mongo](https://github.com/graphql-python/graphene-mongo/) | +| Apollo Federation | [graphene-federation](https://github.com/graphql-python/graphene-federation/) | +| Django | [graphene-django](https://github.com/graphql-python/graphene-django/) | + +Also, Graphene is fully compatible with the GraphQL spec, working seamlessly with all GraphQL clients, such as [Relay](https://github.com/facebook/relay), [Apollo](https://github.com/apollographql/apollo-client) and [gql](https://github.com/graphql-python/gql). + +## Installation + +To install `graphene`, just run this command in your shell + +```bash +pip install "graphene>=3.1" +``` + +## Examples + +Here is one example for you to get started: + +```python +import graphene + +class Query(graphene.ObjectType): + hello = graphene.String(description='A typical hello world') + + def resolve_hello(self, info): + return 'World' + +schema = graphene.Schema(query=Query) +``` + +Then Querying `graphene.Schema` is as simple as: + +```python +query = ''' + query SayHello { + hello + } +''' +result = schema.execute(query) +``` + +If you want to learn even more, you can also check the following [examples](examples/): + +- **Basic Schema**: [Starwars example](examples/starwars) +- **Relay Schema**: [Starwars Relay example](examples/starwars_relay) + +## Documentation + +Documentation and links to additional resources are available at +https://docs.graphene-python.org/en/latest/ + +## Contributing + +After cloning this repo, create a [virtualenv](https://virtualenv.pypa.io/en/stable/) and ensure dependencies are installed by running: + +```sh +virtualenv venv +source venv/bin/activate +pip install -e ".[test]" +``` + +Well-written tests and maintaining good test coverage is important to this project. While developing, run new and existing tests with: + +```sh +pytest graphene/relay/tests/test_node.py # Single file +pytest graphene/relay # All tests in directory +``` + +Add the `-s` flag if you have introduced breakpoints into the code for debugging. +Add the `-v` ("verbose") flag to get more detailed test output. For even more detailed output, use `-vv`. +Check out the [pytest documentation](https://docs.pytest.org/en/latest/) for more options and test running controls. + +Regularly ensure your `pre-commit` hooks are up to date and enabled: + +```sh +pre-commit install +``` + +You can also run the benchmarks with: + +```sh +pytest graphene --benchmark-only +``` + +Graphene supports several versions of Python. To make sure that changes do not break compatibility with any of those versions, we use `tox` to create virtualenvs for each Python version and run tests with that version. To run against all Python versions defined in the `tox.ini` config file, just run: + +```sh +tox +``` + +If you wish to run against a specific version defined in the `tox.ini` file: + +```sh +tox -e py39 +``` + +Tox can only use whatever versions of Python are installed on your system. When you create a pull request, GitHub Actions pipelines will also be running the same tests and report the results, so there is no need for potential contributors to try to install every single version of Python on their own system ahead of time. We appreciate opening issues and pull requests to make graphene even more stable & useful! + +### Building Documentation + +The documentation is generated using the excellent [Sphinx](http://www.sphinx-doc.org/) and a custom theme. + +An HTML version of the documentation is produced by running: + +```sh +make docs +``` diff --git a/testbed/graphql-python__graphene/README.rst b/testbed/graphql-python__graphene/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..405a8f44aa1e89dd0bdcc2990842036e3c4b50cb --- /dev/null +++ b/testbed/graphql-python__graphene/README.rst @@ -0,0 +1,171 @@ +|Graphene Logo| `Graphene `__ |Build Status| |PyPI version| |Coverage Status| +========================================================================================================= + +`💬 Join the community on +Slack `__ + +**We are looking for contributors**! Please check the +`ROADMAP `__ +to see how you can help ❤️ + +Introduction +------------ + +`Graphene `__ is an opinionated Python +library for building GraphQL schemas/types fast and easily. + +- **Easy to use:** Graphene helps you use GraphQL in Python without + effort. +- **Relay:** Graphene has builtin support for Relay. +- **Data agnostic:** Graphene supports any kind of data source: SQL + (Django, SQLAlchemy), NoSQL, custom Python objects, etc. We believe + that by providing a complete API you could plug Graphene anywhere + your data lives and make your data available through GraphQL. + +Integrations +------------ + +Graphene has multiple integrations with different frameworks: + ++-------------------+-------------------------------------------------+ +| integration | Package | ++===================+=================================================+ +| Django | `graphene-django `__ | ++-------------------+-------------------------------------------------+ +| SQLAlchemy | `graphene-sqlalchemy `__ | ++-------------------+-------------------------------------------------+ + +Also, Graphene is fully compatible with the GraphQL spec, working +seamlessly with all GraphQL clients, such as +`Relay `__, +`Apollo `__ and +`gql `__. + +Installation +------------ + +To install `graphene`, just run this command in your shell + +.. code:: bash + + pip install "graphene>=3.0" + +Examples +-------- + +Here is one example for you to get started: + +.. code:: python + + import graphene + + class Query(graphene.ObjectType): + hello = graphene.String(description='A typical hello world') + + def resolve_hello(self, info): + return 'World' + + schema = graphene.Schema(query=Query) + +Then Querying ``graphene.Schema`` is as simple as: + +.. code:: python + + query = ''' + query SayHello { + hello + } + ''' + result = schema.execute(query) + +If you want to learn even more, you can also check the following +`examples `__: + +- **Basic Schema**: `Starwars example `__ +- **Relay Schema**: `Starwars Relay + example `__ + +Documentation +------------- + +Documentation and links to additional resources are available at +https://docs.graphene-python.org/en/latest/ + +Contributing +------------ + +After cloning this repo, create a +`virtualenv `__ and ensure +dependencies are installed by running: + +.. code:: sh + + virtualenv venv + source venv/bin/activate + pip install -e ".[test]" + +Well-written tests and maintaining good test coverage is important to +this project. While developing, run new and existing tests with: + +.. code:: sh + + py.test graphene/relay/tests/test_node.py # Single file + py.test graphene/relay # All tests in directory + +Add the ``-s`` flag if you have introduced breakpoints into the code for +debugging. Add the ``-v`` (“verbose”) flag to get more detailed test +output. For even more detailed output, use ``-vv``. Check out the +`pytest documentation `__ for more +options and test running controls. + +You can also run the benchmarks with: + +.. code:: sh + + py.test graphene --benchmark-only + +Graphene supports several versions of Python. To make sure that changes +do not break compatibility with any of those versions, we use ``tox`` to +create virtualenvs for each Python version and run tests with that +version. To run against all Python versions defined in the ``tox.ini`` +config file, just run: + +.. code:: sh + + tox + +If you wish to run against a specific version defined in the ``tox.ini`` +file: + +.. code:: sh + + tox -e py36 + +Tox can only use whatever versions of Python are installed on your +system. When you create a pull request, Travis will also be running the +same tests and report the results, so there is no need for potential +contributors to try to install every single version of Python on their +own system ahead of time. We appreciate opening issues and pull requests +to make graphene even more stable & useful! + +Building Documentation +~~~~~~~~~~~~~~~~~~~~~~ + +The documentation is generated using the excellent +`Sphinx `__ and a custom theme. + +An HTML version of the documentation is produced by running: + +.. code:: sh + + make docs + +.. |Graphene Logo| image:: http://graphene-python.org/favicon.png +.. |Build Status| image:: https://travis-ci.org/graphql-python/graphene.svg?branch=master + :target: https://travis-ci.org/graphql-python/graphene +.. |PyPI version| image:: https://badge.fury.io/py/graphene.svg + :target: https://badge.fury.io/py/graphene +.. |Coverage Status| image:: https://coveralls.io/repos/graphql-python/graphene/badge.svg?branch=master&service=github + :target: https://coveralls.io/github/graphql-python/graphene?branch=master diff --git a/testbed/graphql-python__graphene/ROADMAP.md b/testbed/graphql-python__graphene/ROADMAP.md new file mode 100644 index 0000000000000000000000000000000000000000..2cdb09d68205ea71c07f082c37a9b7aa55402693 --- /dev/null +++ b/testbed/graphql-python__graphene/ROADMAP.md @@ -0,0 +1,54 @@ +# GraphQL Python Roadmap + +In order to move Graphene and the GraphQL Python ecosystem forward it's essential to be clear with the community on next steps, so we can move uniformly. + +_👋 If you have more ideas on how to move the Graphene ecosystem forward, don't hesistate to [open a PR](https://github.com/graphql-python/graphene/edit/master/ROADMAP.md)_ + + +## Now +- [ ] Continue to support v2.x with security releases +- [ ] Last major/feature release is cut and graphene-* libraries should pin to that version number + +## Next +New features will only be developed on version 3 of ecosystem libraries. + +### [Core-Next](https://github.com/graphql-python/graphql-core-next) +Targeted as v3 of [graphql-core](https://pypi.org/project/graphql-core/), Python 3 only + +### Graphene +- [ ] Integrate with the core-next API and resolve all breaking changes +- [ ] GraphQL types from type annotations - [See issue](https://github.com/graphql-python/graphene/issues/729) +- [ ] Add support for coroutines in Connection, Mutation (abstracting out Promise requirement) - [See PR](https://github.com/graphql-python/graphene/pull/824) + +### Graphene-* +- [ ] Integrate with the graphene core-next API and resolve all breaking changes + +### *-graphql +- [ ] Integrate with the graphql core-next API and resolve all breaking changes + +## Ongoing Initiatives +- [ ] Improve documentation, especially for new users to the library +- [ ] Recipes for “quick start” that people can ideally use/run + + +## Dependent Libraries +| Repo | Release Manager | CODEOWNERS | Pinned | next/master created | Labels Standardized | +| ---------------------------------------------------------------------------- | --------------- | ---------- | ---------- | ------------------- | ------------------- | +| [graphene](https://github.com/graphql-python/graphene) | ekampf | ✅ | | ✅ | | +| [graphql-core](https://github.com/graphql-python/graphql-core) | Cito | ✅ | N/A | N/A | | +| [graphql-core-next](https://github.com/graphql-python/graphql-core-next) | Cito | ✅ | N/A | N/A | | +| [graphql-server-core](https://github.com/graphql-python/graphql-server-core) | Cito | | ✅ | ✅ | | +| [gql](https://github.com/graphql-python/gql) | ekampf | | | | | +| [gql-next](https://github.com/graphql-python/gql-next) | ekampf | | N/A | N/A | | +| ...[aiohttp](https://github.com/graphql-python/aiohttp-graphql) | | | | | | +| ...[django](https://github.com/graphql-python/graphene-django) | mvanlonden | | ✅ | ✅ | | +| ...[sanic](https://github.com/graphql-python/sanic-graphql) | ekampf | | | | | +| ...[flask](https://github.com/graphql-python/flask-graphql) | | | | | | +| ...[webob](https://github.com/graphql-python/webob-graphql) | | | | | | +| ...[tornado](https://github.com/graphql-python/graphene-tornado) | ewhauser | | PR created | ✅ | | +| ...[ws](https://github.com/graphql-python/graphql-ws) | Cito/dfee | | ✅ | ✅ | | +| ...[gae](https://github.com/graphql-python/graphene-gae) | ekampf | | PR created | ✅ | | +| ...[sqlalchemy](https://github.com/graphql-python/graphene-sqlalchemy) | jnak/Nabell | ✅ | ✅ | ✅ | | +| ...[mongo](https://github.com/graphql-python/graphene-mongo) | | | ✅ | ✅ | | +| ...[relay-py](https://github.com/graphql-python/graphql-relay-py) | Cito | | | | | +| ...[wsgi](https://github.com/moritzmhmk/wsgi-graphql) | | | | | | diff --git a/testbed/graphql-python__graphene/UPGRADE-v1.0.md b/testbed/graphql-python__graphene/UPGRADE-v1.0.md new file mode 100644 index 0000000000000000000000000000000000000000..ecfa9da7ea4edb3200a07086c61c39ac2b8d2c77 --- /dev/null +++ b/testbed/graphql-python__graphene/UPGRADE-v1.0.md @@ -0,0 +1,190 @@ +# v1.0 Upgrade Guide + +Big changes from v0.10.x to 1.0. While on the surface a lot of this just looks like shuffling around API, the entire codebase has been rewritten to handle some really great use cases and improved performance. + +## Backwards Compatibility and Deprecation Warnings + +This has been a community project from the start, we need your help making the upgrade as smooth as possible for everybody! +We have done our best to provide backwards compatibility with deprecated APIs. + +## Deprecations + +- `with_context` is no longer needed. Resolvers now always take the context argument. + Before: + + ```python + def resolve_xxx(root, args, info): + # ... + ``` + + With 1.0: + + ```python + def resolve_xxx(root, args, context, info): + # ... + ``` + +- `ObjectType` and `Interface` no longer accept the `abstract` option in the `Meta`. + Inheriting fields should be now achieved using `AbstractType` inheritance. + + Before: + + ```python + class MyBaseQuery(graphene.ObjectType): + my_field = String() + class Meta: + abstract = True + + class Query(MyBaseQuery): + pass + + ``` + + With 1.0: + + ```python + class MyBaseQuery(graphene.AbstractType): + my_field = String() + + class Query(MyBaseQuery, graphene.ObjectType): + pass + ``` + +- The `type_name` option in the Meta in types is now `name` + +- Type references no longer work with strings, but with functions. + + Before: + + ```python + class Query(graphene.ObjectType): + user = graphene.Field('User') + users = graphene.List('User') + ``` + + With 1.0: + + ```python + class Query(graphene.ObjectType): + user = graphene.Field(lambda: User) + users = graphene.List(lambda: User) + ``` + +## Schema + +Schemas in graphene `1.0` are `Immutable`, that means that once you create a `graphene.Schema` any +change in their attributes will not have any effect. +The `name` argument is removed from the Schema. + +The arguments `executor` and `middlewares` are also removed from the `Schema` definition. +You can still use them, but by calling explicitly in the `execute` method in `graphql`. + +```python +# Old way +schema = graphene.Schema(name='My Schema') +schema.query = Query +schema.mutation = Mutation + +# New way +schema = graphene.Schema( + query=Query, + mutation=Mutation +) +``` + +## Interfaces + +For implementing an Interface in an ObjectType, you have to add it onto `Meta.interfaces`. + +Like: + +```python +from graphene import Interface, ObjectType, String + +class Character(Interface): + name = String() + +class Human(Character): # Old way, Human will still be an Interface + pass + +class Droid(ObjectType): # New way, you have to specify the ObjectType + class Meta: + interfaces = (Character, ) +``` + +## Mutations + +Mutation fields have changed the way of usage, before if you have the mutation `MyMutation` you +only have to reference with `graphene.Field(MyMutation)` now it's simply `MyMutation.Field()` + +Example: + +```python +from graphene import ObjectType, Mutation, String + +class ReverseString(Mutation): + class Input: + input = String(required=True) + + reversed = String() + + def mutate(root, args, context, info): + reversed = args.get('input')[::-1] + return ReverseString(reversed=reversed) + +class Query(ObjectType): + reverse_string = graphene.Field(ReverseString) # Old way, will not include the mutation arguments by default + reverse_string = ReverseString.Field() +``` + +## Nodes + +Apart from implementing as shown in the previous section, to use the node field you have to +specify the node Type. + +Example: + +```python +from graphene import ObjectType, relay + +class Query(ObjectType): + node = relay.NodeField() # Old way, NodeField no longer exists. Use Node.Field + node = relay.Node.Field() # New way +``` + +Also, if you wanted to create an `ObjectType` that implements `Node`, you have to do it +explicitly. + +## Django + +The Django integration with Graphene now has an independent package: `graphene-django`. +For installing, you have to replace the old `graphene[django]` with `graphene-django`. + +- As the package is now independent, you now have to import from `graphene_django`. +- **DjangoNode no longer exists**, please use `relay.Node` instead: + + ```python + from graphene.relay import Node + from graphene_django import DjangoObjectType + + class Droid(DjangoObjectType): + class Meta: + interfaces = (Node, ) + ``` + +## SQLAlchemy + +The SQLAlchemy integration with Graphene now has an independent package: `graphene-sqlalchemy`. +For installing, you have to replace the old `graphene[sqlalchemy]` with `graphene-sqlalchemy`. + +- As the package is now independent, you have to import now from `graphene_sqlalchemy`. +- **SQLAlchemyNode no longer exists**, please use `relay.Node` instead: + + ```python + from graphene.relay import Node + from graphene_sqlalchemy import SQLAlchemyObjectType + + class Droid(SQLAlchemyObjectType): + class Meta: + interfaces = (Node, ) + ``` diff --git a/testbed/graphql-python__graphene/UPGRADE-v2.0.md b/testbed/graphql-python__graphene/UPGRADE-v2.0.md new file mode 100644 index 0000000000000000000000000000000000000000..04926e7aa5e085aa3c9c961b8f1b91aed855f4e9 --- /dev/null +++ b/testbed/graphql-python__graphene/UPGRADE-v2.0.md @@ -0,0 +1,385 @@ +# v2.0 Upgrade Guide + +`ObjectType`, `Interface`, `InputObjectType`, `Scalar` and `Enum` implementations +have been quite simplified, without the need to define a explicit Metaclass for each subtype. + +It also improves the field resolvers, [simplifying the code](#simpler-resolvers) the +developer has to write to use them. + +**Deprecations:** + +- [`AbstractType`](#abstracttype-deprecated) +- [`resolve_only_args`](#resolve_only_args) +- [`Mutation.Input`](#mutationinput) + +**Breaking changes:** + +- [`Simpler Resolvers`](#simpler-resolvers) +- [`Node Connections`](#node-connections) + +**New Features!** + +- [`InputObjectType`](#inputobjecttype) +- [`Meta as Class arguments`](#meta-as-class-arguments) (_only available for Python 3_) + +> The type metaclasses are now deleted as they are no longer necessary. If your code was depending +> on this strategy for creating custom attrs, see an [example on how to do it in 2.0](https://github.com/graphql-python/graphene/blob/v2.0.0/graphene/tests/issues/test_425.py). + +## Deprecations + +### AbstractType deprecated + +AbstractType is deprecated in graphene 2.0, you can now use normal inheritance instead. + +Before: + +```python +class CommonFields(AbstractType): + name = String() + +class Pet(CommonFields, Interface): + pass +``` + +With 2.0: + +```python +class CommonFields(object): + name = String() + +class Pet(CommonFields, Interface): + pass +``` + +### resolve_only_args + +`resolve_only_args` is now deprecated as the resolver API has been simplified. + +Before: + +```python +class User(ObjectType): + name = String() + + @resolve_only_args + def resolve_name(root): + return root.name +``` + +With 2.0: + +```python +class User(ObjectType): + name = String() + + def resolve_name(root, info): + return root.name +``` + +### Mutation.Input + +`Mutation.Input` is now deprecated in favor of using `Mutation.Arguments` (`ClientIDMutation` still uses `Input`). + +Before: + +```python +class User(Mutation): + class Input: + name = String() +``` + +With 2.0: + +```python +class User(Mutation): + class Arguments: + name = String() +``` + +## Breaking Changes + +### Simpler resolvers + +All the resolvers in graphene have been simplified. +Prior to Graphene `2.0`, all resolvers required four arguments: `(root, args, context, info)`. +Now, resolver `args` are passed as keyword arguments to the function, and `context` argument dissapeared in favor of `info.context`. + +Before: + +```python +my_field = graphene.String(my_arg=graphene.String()) + +def resolve_my_field(root, args, context, info): + my_arg = args.get('my_arg') + return ... +``` + +With 2.0: + +```python +my_field = graphene.String(my_arg=graphene.String()) + +def resolve_my_field(root, info, my_arg): + return ... +``` + +**PS.: Take care with receiving args like `my_arg` as above. This doesn't work for optional (non-required) arguments as standard `Connection`'s arguments (first, last, after, before).** +You may need something like this: + +```python +def resolve_my_field(root, info, known_field1, known_field2, **args): ## get other args with: args.get('arg_key') +``` + +And, if you need the context in the resolver, you can use `info.context`: + +```python +my_field = graphene.String(my_arg=graphene.String()) + +def resolve_my_field(root, info, my_arg): + context = info.context + return ... +``` + +### Node Connections + +Node types no longer have a `Connection` by default. +In 2.0 and onwards `Connection`s should be defined explicitly. + +Before: + +```python +class User(ObjectType): + class Meta: + interfaces = [relay.Node] + name = String() + +class Query(ObjectType): + user_connection = relay.ConnectionField(User) +``` + +With 2.0: + +```python +class User(ObjectType): + class Meta: + interfaces = [relay.Node] + name = String() + +class UserConnection(relay.Connection): + class Meta: + node = User + +class Query(ObjectType): + user_connection = relay.ConnectionField(UserConnection) +``` + +## Node.get_node + +The method `get_node` in `ObjectTypes` that have `Node` as interface, changes its API. +From `def get_node(cls, id, context, info)` to `def get_node(cls, info, id)`. + +```python +class MyObject(ObjectType): + class Meta: + interfaces = (Node, ) + + @classmethod + def get_node(cls, id, context, info): + return ... +``` + +To: + +```python +class MyObject(ObjectType): + class Meta: + interfaces = (Node, ) + + @classmethod + def get_node(cls, info, id): + return ... +``` + +## Node.get_node_from_global_id + +The parameters' order of `get_node_from_global_id` method has changed. You may need to adjust your [Node Root Field](http://docs.graphene-python.org/en/latest/relay/nodes/#node-root-field) and maybe other places that uses this method to obtain an object. + +Before: + +```python +class RootQuery(object): + ... + node = Field(relay.Node, id=ID(required=True)) + + def resolve_node(root, args, context, info): + node = relay.Node.get_node_from_global_id(args['id'], context, info) + return node +``` + +Now: + +```python +class RootQuery(object): + ... + node = Field(relay.Node, id=ID(required=True)) + + def resolve_node(root, info, id): + node = relay.Node.get_node_from_global_id(info, id) + return node +``` + +## Mutation.mutate + +Now only receives (`root`, `info`, `**kwargs`) and is not a @classmethod + +Before: + +```python +class SomeMutation(Mutation): + ... + + @classmethod + def mutate(cls, instance, args, context, info): + ... +``` + +With 2.0: + +```python +class SomeMutation(Mutation): + ... + + def mutate(root, info, **args): + ... +``` + +With 2.0 you can also get your declared (as above) `args` this way: + +```python +class SomeMutation(Mutation): + class Arguments: + first_name = String(required=True) + last_name = String(required=True) + ... + + def mutate(root, info, first_name, last_name): + ... +``` + +## ClientIDMutation.mutate_and_get_payload + +Now only receives (`root`, `info`, `**input`) + +### Middlewares + +If you are using Middelwares, you need to some adjustments: + +Before: + +```python +class MyGrapheneMiddleware(object): + def resolve(self, next_mw, root, args, context, info): + + ## Middleware code + + return next_mw(root, args, context, info) +``` + +With 2.0: + +```python +class MyGrapheneMiddleware(object): + def resolve(self, next_mw, root, info, **args): + context = info.context + + ## Middleware code + + info.context = context +        return next_mw(root, info, **args) +``` + +## New Features + +### InputObjectType + +If you are using `InputObjectType`, you now can access +its fields via `getattr` (`my_input.myattr`) when resolving, instead of +the classic way `my_input['myattr']`. + +And also use custom defined properties on your input class. + +Example. Before: + +```python +class UserInput(InputObjectType): + id = ID(required=True) + +def is_valid_input(input): + return input.get('id').startswith('userid_') + +class Query(ObjectType): + user = graphene.Field(User, input=UserInput()) + + @resolve_only_args + def resolve_user(root, input): + user_id = input.get('id') + if is_valid_input(user_id): + return get_user(user_id) +``` + +With 2.0: + +```python +class UserInput(InputObjectType): + id = ID(required=True) + + @property + def is_valid(root): + return root.id.startswith('userid_') + +class Query(ObjectType): + user = graphene.Field(User, input=UserInput()) + + def resolve_user(root, info, input): + if input.is_valid: + return get_user(input.id) +``` + +### Meta as Class arguments + +Now you can use the meta options as class arguments (**ONLY PYTHON 3**). + +Before: + +```python +class Dog(ObjectType): + class Meta: + interfaces = [Pet] + name = String() +``` + +With 2.0: + +```python +class Dog(ObjectType, interfaces=[Pet]): + name = String() +``` + +### Abstract types + +Now you can create abstact types super easily, without the need of subclassing the meta. + +```python +class Base(ObjectType): + class Meta: + abstract = True + + id = ID() + + def resolve_id(root, info): + return f"{root.__class__.__name__}_{root.id}" +``` + +### UUID Scalar + +In Graphene 2.0 there is a new dedicated scalar for UUIDs, `UUID`. diff --git a/testbed/graphql-python__graphene/bin/autolinter b/testbed/graphql-python__graphene/bin/autolinter new file mode 100644 index 0000000000000000000000000000000000000000..0fc3ccaee8a46763d9d53a2bc5a939f26e0a9c99 --- /dev/null +++ b/testbed/graphql-python__graphene/bin/autolinter @@ -0,0 +1,7 @@ +#!/bin/bash + +# Install the required scripts with +# pip install autoflake autopep8 isort +autoflake ./examples/ ./graphene/ -r --remove-unused-variables --remove-all-unused-imports --in-place +autopep8 ./examples/ ./graphene/ -r --in-place --experimental --aggressive --max-line-length 120 +isort -rc ./examples/ ./graphene/ diff --git a/testbed/graphql-python__graphene/bin/convert_documentation b/testbed/graphql-python__graphene/bin/convert_documentation new file mode 100644 index 0000000000000000000000000000000000000000..b55d5dabc2d61217e27b33e4b57d5d8f261cb40b --- /dev/null +++ b/testbed/graphql-python__graphene/bin/convert_documentation @@ -0,0 +1,3 @@ +#!/bin/bash + +pandoc README.md --from markdown --to rst -s -o README.rst diff --git a/testbed/graphql-python__graphene/docs/Makefile b/testbed/graphql-python__graphene/docs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..8753f299822347d5e7ea4dbb5eb6f3a2399ec2a0 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/Makefile @@ -0,0 +1,208 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help +help: + @echo "Please use \`make ' where is one of" + @grep -E '^\.PHONY: [a-zA-Z_-]+ .*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = "(: |##)"}; {printf "\033[36m%-30s\033[0m %s\n", $$2, $$3}' + +.PHONY: install ## to install all documentation related requirements +install: + pip install -r requirements.txt + +.PHONY: clean ## to remove all built documentation +clean: + rm -rf $(BUILDDIR)/* + +.PHONY: html ## to make standalone HTML files +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +.PHONY: dirhtml ## to make HTML files named index.html in directories +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +.PHONY: singlehtml ## to make a single large HTML file +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +.PHONY: pickle ## to make pickle files +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +.PHONY: json ## to make JSON files +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +.PHONY: htmlhelp ## to make HTML files and a HTML help project +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +.PHONY: qthelp ## to make HTML files and a qthelp project +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Graphene.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Graphene.qhc" + +.PHONY: applehelp ## to make an Apple Help Book +applehelp: + $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp + @echo + @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." + @echo "N.B. You won't be able to view it unless you put it in" \ + "~/Library/Documentation/Help or install it in your application" \ + "bundle." + +.PHONY: devhelp ## to make HTML files and a Devhelp project +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/Graphene" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Graphene" + @echo "# devhelp" + +.PHONY: epub ## to make an epub +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +.PHONY: epub3 ## to make an epub3 +epub3: + $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 + @echo + @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." + +.PHONY: latex ## to make LaTeX files, you can set PAPER=a4 or PAPER=letter +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +.PHONY: latexpdf ## to make LaTeX files and run them through pdflatex +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: latexpdfja ## to make LaTeX files and run them through platex/dvipdfmx +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: text ## to make text files +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +.PHONY: man ## to make manual pages +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +.PHONY: texinfo ## to make Texinfo files +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +.PHONY: info ## to make Texinfo files and run them through makeinfo +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +.PHONY: gettext ## to make PO message catalogs +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +.PHONY: changes ## to make an overview of all changed/added/deprecated items +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +.PHONY: linkcheck ## to check all external links for integrity +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +.PHONY: doctest ## to run all doctests embedded in the documentation (if enabled) +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +.PHONY: coverage ## to run coverage check of the documentation (if enabled) +coverage: + $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage + @echo "Testing of coverage in the sources finished, look at the " \ + "results in $(BUILDDIR)/coverage/python.txt." + +.PHONY: xml ## to make Docutils-native XML files +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +.PHONY: pseudoxml ## to make pseudoxml-XML files for display purposes +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." + +.PHONY: dummy ## to check syntax errors of document sources +dummy: + $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy + @echo + @echo "Build finished. Dummy builder generates no files." + +.PHONY: livehtml ## to build and serve live-reloading documentation +livehtml: + sphinx-autobuild -b html --watch ../graphene $(ALLSPHINXOPTS) $(BUILDDIR)/html diff --git a/testbed/graphql-python__graphene/docs/_static/.gitkeep b/testbed/graphql-python__graphene/docs/_static/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/docs/api/index.rst b/testbed/graphql-python__graphene/docs/api/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..38b74909ea6c13f378471739231a1080b5c15844 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/api/index.rst @@ -0,0 +1,110 @@ +API Reference +============= + +Schema +------ + +.. autoclass:: graphene.types.schema.Schema + :members: + +.. Uncomment sections / types as API documentation is fleshed out +.. in each class + +Object types +------------ + +.. autoclass:: graphene.ObjectType + +.. autoclass:: graphene.InputObjectType + +.. autoclass:: graphene.Mutation + :members: + +.. _fields-mounted-types: + +Fields (Mounted Types) +---------------------- + +.. autoclass:: graphene.Field + +.. autoclass:: graphene.Argument + +.. autoclass:: graphene.InputField + +Fields (Unmounted Types) +------------------------ + +.. autoclass:: graphene.types.unmountedtype.UnmountedType + +GraphQL Scalars +--------------- + +.. autoclass:: graphene.Int() + +.. autoclass:: graphene.Float() + +.. autoclass:: graphene.String() + +.. autoclass:: graphene.Boolean() + +.. autoclass:: graphene.ID() + +Graphene Scalars +---------------- + +.. autoclass:: graphene.Date() + +.. autoclass:: graphene.DateTime() + +.. autoclass:: graphene.Time() + +.. autoclass:: graphene.Decimal() + +.. autoclass:: graphene.UUID() + +.. autoclass:: graphene.JSONString() + +.. autoclass:: graphene.Base64() + +Enum +---- + +.. autoclass:: graphene.Enum() + +Structures +---------- + +.. autoclass:: graphene.List + +.. autoclass:: graphene.NonNull + +Type Extension +-------------- + +.. autoclass:: graphene.Interface() + +.. autoclass:: graphene.Union() + +Execution Metadata +------------------ + +.. autoclass:: graphene.ResolveInfo + +.. autoclass:: graphene.Context + +.. autoclass:: graphql.ExecutionResult + +.. Relay +.. ----- + +.. .. autoclass:: graphene.Node + +.. .. autoclass:: graphene.GlobalID + +.. .. autoclass:: graphene.ClientIDMutation + +.. .. autoclass:: graphene.Connection + +.. .. autoclass:: graphene.ConnectionField + +.. .. autoclass:: graphene.PageInfo diff --git a/testbed/graphql-python__graphene/docs/conf.py b/testbed/graphql-python__graphene/docs/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..75f515416c8432d9902912edec129dd68f8665bf --- /dev/null +++ b/testbed/graphql-python__graphene/docs/conf.py @@ -0,0 +1,459 @@ +import os + +import sphinx_graphene_theme + +on_rtd = os.environ.get("READTHEDOCS", None) == "True" + +# -*- coding: utf-8 -*- +# +# Graphene documentation build configuration file, created by +# sphinx-quickstart on Sun Sep 11 18:30:51 2016. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys + +sys.path.insert(0, os.path.abspath("..")) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.viewcode", + "sphinx.ext.napoleon", +] +if not on_rtd: + extensions += ["sphinx.ext.githubpages"] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = ".rst" + +# The encoding of source files. +# +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = "Graphene" +copyright = "Graphene 2016" +author = "Syrus Akbary" + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = "1.0" +# The full version, including alpha/beta/rc tags. +release = "1.0" + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# +# today = '' +# +# Else, today_fmt is used as the format for a strftime call. +# +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +# html_theme = 'alabaster' +# if on_rtd: +# html_theme = 'sphinx_rtd_theme' + +html_theme = "sphinx_graphene_theme" + +html_theme_path = [sphinx_graphene_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. +# " v documentation" by default. +# +# html_title = u'Graphene v1.0' + +# A shorter title for the navigation bar. Default is the same as html_title. +# +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# +# html_logo = None + +# The name of an image file (relative to this directory) to use as a favicon of +# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# +# html_extra_path = [] + +# If not None, a 'Last updated on:' timestamp is inserted at every page +# bottom, using the given strftime format. +# The empty string is equivalent to '%b %d, %Y'. +# +# html_last_updated_fmt = None + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# +# html_additional_pages = {} + +# If false, no module index is generated. +# +# html_domain_indices = True + +# If false, no index is generated. +# +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' +# +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# 'ja' uses this config value. +# 'zh' user can custom change `jieba` dictionary path. +# +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "Graphenedoc" + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, "Graphene.tex", "Graphene Documentation", "Syrus Akbary", "manual") +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# +# latex_use_parts = False + +# If true, show page references after internal links. +# +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# +# latex_appendices = [] + +# It false, will not define \strong, \code, itleref, \crossref ... but only +# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added +# packages. +# +# latex_keep_old_macro_names = True + +# If false, no module index is generated. +# +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, "graphene", "Graphene Documentation", [author], 1)] + +# If true, show URL addresses after external links. +# +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "Graphene", + "Graphene Documentation", + author, + "Graphene", + "One line description of project.", + "Miscellaneous", + ) +] + +# Documents to append as an appendix to all manuals. +# +# texinfo_appendices = [] + +# If false, no module index is generated. +# +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# +# texinfo_no_detailmenu = False + + +# -- Options for Epub output ---------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project +epub_author = author +epub_publisher = author +epub_copyright = copyright + +# The basename for the epub file. It defaults to the project name. +# epub_basename = project + +# The HTML theme for the epub output. Since the default themes are not +# optimized for small screen space, using the same theme for HTML and epub +# output is usually not wise. This defaults to 'epub', a theme designed to save +# visual space. +# +# epub_theme = 'epub' + +# The language of the text. It defaults to the language option +# or 'en' if the language is not set. +# +# epub_language = '' + +# The scheme of the identifier. Typical schemes are ISBN or URL. +# epub_scheme = '' + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A tuple containing the cover image and cover page html template filenames. +# +# epub_cover = () + +# A sequence of (type, uri, title) tuples for the guide element of content.opf. +# +# epub_guide = () + +# HTML files that should be inserted before the pages created by sphinx. +# The format is a list of tuples containing the path and title. +# +# epub_pre_files = [] + +# HTML files that should be inserted after the pages created by sphinx. +# The format is a list of tuples containing the path and title. +# +# epub_post_files = [] + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ["search.html"] + +# The depth of the table of contents in toc.ncx. +# +# epub_tocdepth = 3 + +# Allow duplicate toc entries. +# +# epub_tocdup = True + +# Choose between 'default' and 'includehidden'. +# +# epub_tocscope = 'default' + +# Fix unsupported image types using the Pillow. +# +# epub_fix_images = False + +# Scale large images. +# +# epub_max_image_width = 0 + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# +# epub_show_urls = 'inline' + +# If false, no index is generated. +# +# epub_use_index = True + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "https://docs.python.org/": None, + "python": ("https://docs.python.org/", None), + "graphene_django": ( + "http://docs.graphene-python.org/projects/django/en/latest/", + None, + ), + "graphene_sqlalchemy": ( + "http://docs.graphene-python.org/projects/sqlalchemy/en/latest/", + None, + ), +} diff --git a/testbed/graphql-python__graphene/docs/execution/dataloader.rst b/testbed/graphql-python__graphene/docs/execution/dataloader.rst new file mode 100644 index 0000000000000000000000000000000000000000..557db2c136ebabaebdb525860862ec6196da1474 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/execution/dataloader.rst @@ -0,0 +1,117 @@ +Dataloader +========== + +DataLoader is a generic utility to be used as part of your application's +data fetching layer to provide a simplified and consistent API over +various remote data sources such as databases or web services via batching +and caching. It is provided by a separate package `aiodataloader `. + + +Batching +-------- + +Batching is not an advanced feature, it's DataLoader's primary feature. +Create loaders by providing a batch loading function. + +.. code:: python + + from aiodataloader import DataLoader + + class UserLoader(DataLoader): + async def batch_load_fn(self, keys): + # Here we call a function to return a user for each key in keys + return [get_user(id=key) for key in keys] + + +A batch loading async function accepts a list of keys, and returns a list of ``values``. + + +``DataLoader`` will coalesce all individual loads which occur within a +single frame of execution (executed once the wrapping event loop is resolved) +and then call your batch function with all requested keys. + + +.. code:: python + + user_loader = UserLoader() + + user1 = await user_loader.load(1) + user1_best_friend = await user_loader.load(user1.best_friend_id) + + user2 = await user_loader.load(2) + user2_best_friend = await user_loader.load(user2.best_friend_id) + + +A naive application may have issued *four* round-trips to a backend for the +required information, but with ``DataLoader`` this application will make at most *two*. + +Note that loaded values are one-to-one with the keys and must have the same +order. This means that if you load all values from a single query, you must +make sure that you then order the query result for the results to match the keys: + + +.. code:: python + + class UserLoader(DataLoader): + async def batch_load_fn(self, keys): + users = {user.id: user for user in User.objects.filter(id__in=keys)} + return [users.get(user_id) for user_id in keys] + + +``DataLoader`` allows you to decouple unrelated parts of your application without +sacrificing the performance of batch data-loading. While the loader presents +an API that loads individual values, all concurrent requests will be coalesced +and presented to your batch loading function. This allows your application to +safely distribute data fetching requirements throughout your application and +maintain minimal outgoing data requests. + + + +Using with Graphene +------------------- + +DataLoader pairs nicely well with Graphene/GraphQL. GraphQL fields are designed +to be stand-alone functions. Without a caching or batching mechanism, it's easy +for a naive GraphQL server to issue new database requests each time a field is resolved. + +Consider the following GraphQL request: + + +.. code:: + + { + me { + name + bestFriend { + name + } + friends(first: 5) { + name + bestFriend { + name + } + } + } + } + + +If ``me``, ``bestFriend`` and ``friends`` each need to send a request to the backend, +there could be at most 13 database requests! + + +When using DataLoader, we could define the User type using our previous example with +leaner code and at most 4 database requests, and possibly fewer if there are cache hits. + + +.. code:: python + + class User(graphene.ObjectType): + name = graphene.String() + best_friend = graphene.Field(lambda: User) + friends = graphene.List(lambda: User) + + async def resolve_best_friend(root, info): + return await user_loader.load(root.best_friend_id) + + async def resolve_friends(root, info): + return await user_loader.load_many(root.friend_ids) diff --git a/testbed/graphql-python__graphene/docs/execution/execute.rst b/testbed/graphql-python__graphene/docs/execution/execute.rst new file mode 100644 index 0000000000000000000000000000000000000000..1c0e2599afb615a00b63032afe7be0b2e4475cc9 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/execution/execute.rst @@ -0,0 +1,138 @@ +.. _SchemaExecute: + +Executing a query +================= + +For executing a query against a schema, you can directly call the ``execute`` method on it. + + +.. code:: python + + from graphene import Schema + + schema = Schema(...) + result = schema.execute('{ name }') + +``result`` represents the result of execution. ``result.data`` is the result of executing the query, ``result.errors`` is ``None`` if no errors occurred, and is a non-empty list if an error occurred. + + +.. _SchemaExecuteContext: + +Context +_______ + +You can pass context to a query via ``context``. + + +.. code:: python + + from graphene import ObjectType, String, Schema + + class Query(ObjectType): + name = String() + + def resolve_name(root, info): + return info.context.get('name') + + schema = Schema(Query) + result = schema.execute('{ name }', context={'name': 'Syrus'}) + assert result.data['name'] == 'Syrus' + + +Variables +_________ + +You can pass variables to a query via ``variables``. + + +.. code:: python + + from graphene import ObjectType, Field, ID, Schema + + class Query(ObjectType): + user = Field(User, id=ID(required=True)) + + def resolve_user(root, info, id): + return get_user_by_id(id) + + schema = Schema(Query) + result = schema.execute( + ''' + query getUser($id: ID) { + user(id: $id) { + id + firstName + lastName + } + } + ''', + variables={'id': 12}, + ) + +Root Value +__________ + +Value used for :ref:`ResolverParamParent` in root queries and mutations can be overridden using ``root`` parameter. + +.. code:: python + + from graphene import ObjectType, Field, Schema + + class Query(ObjectType): + me = Field(User) + + def resolve_user(root, info): + return {'id': root.id, 'firstName': root.name} + + schema = Schema(Query) + user_root = User(id=12, name='bob') + result = schema.execute( + ''' + query getUser { + user { + id + firstName + lastName + } + } + ''', + root=user_root + ) + assert result.data['user']['id'] == user_root.id + +Operation Name +______________ + +If there are multiple operations defined in a query string, ``operation_name`` should be used to indicate which should be executed. + +.. code:: python + + from graphene import ObjectType, Field, Schema + + class Query(ObjectType): + user = Field(User) + + def resolve_user(root, info): + return get_user_by_id(12) + + schema = Schema(Query) + query_string = ''' + query getUserWithFirstName { + user { + id + firstName + lastName + } + } + query getUserWithFullName { + user { + id + fullName + } + } + ''' + result = schema.execute( + query_string, + operation_name='getUserWithFullName' + ) + assert result.data['user']['fullName'] diff --git a/testbed/graphql-python__graphene/docs/execution/fileuploading.rst b/testbed/graphql-python__graphene/docs/execution/fileuploading.rst new file mode 100644 index 0000000000000000000000000000000000000000..66ce9bd37161a7c28a7fd9de56fc13b0a5da57cf --- /dev/null +++ b/testbed/graphql-python__graphene/docs/execution/fileuploading.rst @@ -0,0 +1,8 @@ +File uploading +============== + +File uploading is not part of the official GraphQL spec yet and is not natively +implemented in Graphene. + +If your server needs to support file uploading then you can use the library: `graphene-file-upload `_ which enhances Graphene to add file +uploads and conforms to the unoffical GraphQL `multipart request spec `_. diff --git a/testbed/graphql-python__graphene/docs/execution/index.rst b/testbed/graphql-python__graphene/docs/execution/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..f26259d363a8b5f1ec2ff9752b2f7ae29a40133b --- /dev/null +++ b/testbed/graphql-python__graphene/docs/execution/index.rst @@ -0,0 +1,13 @@ +========= +Execution +========= + +.. toctree:: + :maxdepth: 2 + + execute + middleware + dataloader + fileuploading + subscriptions + queryvalidation diff --git a/testbed/graphql-python__graphene/docs/execution/middleware.rst b/testbed/graphql-python__graphene/docs/execution/middleware.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d086f3e99c72057c18e3a0d14cbd892b59b3675 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/execution/middleware.rst @@ -0,0 +1,70 @@ +Middleware +========== + +You can use ``middleware`` to affect the evaluation of fields in your schema. + +A middleware is any object or function that responds to ``resolve(next_middleware, *args)``. + +Inside that method, it should either: + +- Send ``resolve`` to the next middleware to continue the evaluation; or +- Return a value to end the evaluation early. + + +Resolve arguments +----------------- + +Middlewares ``resolve`` is invoked with several arguments: + +- ``next`` represents the execution chain. Call ``next`` to continue evaluation. +- ``root`` is the root value object passed throughout the query. +- ``info`` is the resolver info. +- ``args`` is the dict of arguments passed to the field. + +Example +------- + +This middleware only continues evaluation if the ``field_name`` is not ``'user'`` + +.. code:: python + + class AuthorizationMiddleware(object): + def resolve(self, next, root, info, **args): + if info.field_name == 'user': + return None + return next(root, info, **args) + + +And then execute it with: + +.. code:: python + + result = schema.execute('THE QUERY', middleware=[AuthorizationMiddleware()]) + +If the ``middleware`` argument includes multiple middlewares, +these middlewares will be executed bottom-up, i.e. from last to first. + +Functional example +------------------ + +Middleware can also be defined as a function. Here we define a middleware that +logs the time it takes to resolve each field: + +.. code:: python + + from time import time as timer + + def timing_middleware(next, root, info, **args): + start = timer() + return_value = next(root, info, **args) + duration = round((timer() - start) * 1000, 2) + parent_type_name = root._meta.name if root and hasattr(root, '_meta') else '' + logger.debug(f"{parent_type_name}.{info.field_name}: {duration} ms") + return return_value + + +And then execute it with: + +.. code:: python + + result = schema.execute('THE QUERY', middleware=[timing_middleware]) diff --git a/testbed/graphql-python__graphene/docs/execution/queryvalidation.rst b/testbed/graphql-python__graphene/docs/execution/queryvalidation.rst new file mode 100644 index 0000000000000000000000000000000000000000..02e29a350e08eaa225844d1d7fb970220fdab417 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/execution/queryvalidation.rst @@ -0,0 +1,123 @@ +Query Validation +================ +GraphQL uses query validators to check if Query AST is valid and can be executed. Every GraphQL server implements +standard query validators. For example, there is an validator that tests if queried field exists on queried type, that +makes query fail with "Cannot query field on type" error if it doesn't. + +To help with common use cases, graphene provides a few validation rules out of the box. + + +Depth limit Validator +--------------------- +The depth limit validator helps to prevent execution of malicious +queries. It takes in the following arguments. + +- ``max_depth`` is the maximum allowed depth for any operation in a GraphQL document. +- ``ignore`` Stops recursive depth checking based on a field name. Either a string or regexp to match the name, or a function that returns a boolean +- ``callback`` Called each time validation runs. Receives an Object which is a map of the depths for each operation. + +Usage +----- + +Here is how you would implement depth-limiting on your schema. + +.. code:: python + + from graphql import validate, parse + from graphene import ObjectType, Schema, String + from graphene.validation import depth_limit_validator + + + class MyQuery(ObjectType): + name = String(required=True) + + + schema = Schema(query=MyQuery) + + # queries which have a depth more than 20 + # will not be executed. + + validation_errors = validate( + schema=schema.graphql_schema, + document_ast=parse('THE QUERY'), + rules=( + depth_limit_validator( + max_depth=20 + ), + ) + ) + + +Disable Introspection +--------------------- +the disable introspection validation rule ensures that your schema cannot be introspected. +This is a useful security measure in production environments. + +Usage +----- + +Here is how you would disable introspection for your schema. + +.. code:: python + + from graphql import validate, parse + from graphene import ObjectType, Schema, String + from graphene.validation import DisableIntrospection + + + class MyQuery(ObjectType): + name = String(required=True) + + + schema = Schema(query=MyQuery) + + # introspection queries will not be executed. + + validation_errors = validate( + schema=schema.graphql_schema, + document_ast=parse('THE QUERY'), + rules=( + DisableIntrospection, + ) + ) + + +Implementing custom validators +------------------------------ +All custom query validators should extend the `ValidationRule `_ +base class importable from the graphql.validation.rules module. Query validators are visitor classes. They are +instantiated at the time of query validation with one required argument (context: ASTValidationContext). In order to +perform validation, your validator class should define one or more of enter_* and leave_* methods. For possible +enter/leave items as well as details on function documentation, please see contents of the visitor module. To make +validation fail, you should call validator's report_error method with the instance of GraphQLError describing failure +reason. Here is an example query validator that visits field definitions in GraphQL query and fails query validation +if any of those fields are blacklisted: + +.. code:: python + + from graphql import GraphQLError + from graphql.language import FieldNode + from graphql.validation import ValidationRule + + + my_blacklist = ( + "disallowed_field", + ) + + + def is_blacklisted_field(field_name: str): + return field_name.lower() in my_blacklist + + + class BlackListRule(ValidationRule): + def enter_field(self, node: FieldNode, *_args): + field_name = node.name.value + if not is_blacklisted_field(field_name): + return + + self.report_error( + GraphQLError( + f"Cannot query '{field_name}': field is blacklisted.", node, + ) + ) + diff --git a/testbed/graphql-python__graphene/docs/execution/subscriptions.rst b/testbed/graphql-python__graphene/docs/execution/subscriptions.rst new file mode 100644 index 0000000000000000000000000000000000000000..86ed78a1c23e17a894ad0232b944e2f740574180 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/execution/subscriptions.rst @@ -0,0 +1,40 @@ +.. _SchemaSubscription: + +Subscriptions +============= + +To create a subscription, you can directly call the ``subscribe`` method on the +schema. This method is async and must be awaited. + +.. code:: python + + import asyncio + from datetime import datetime + from graphene import ObjectType, String, Schema, Field + + # Every schema requires a query. + class Query(ObjectType): + hello = String() + + def resolve_hello(root, info): + return "Hello, world!" + + class Subscription(ObjectType): + time_of_day = String() + + async def subscribe_time_of_day(root, info): + while True: + yield datetime.now().isoformat() + await asyncio.sleep(1) + + schema = Schema(query=Query, subscription=Subscription) + + async def main(schema): + subscription = 'subscription { timeOfDay }' + result = await schema.subscribe(subscription) + async for item in result: + print(item.data['timeOfDay']) + + asyncio.run(main(schema)) + +The ``result`` is an async iterator which yields items in the same manner as a query. diff --git a/testbed/graphql-python__graphene/docs/index.rst b/testbed/graphql-python__graphene/docs/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..05b7fd87e30413124b5cf8245acd8c14950c7121 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/index.rst @@ -0,0 +1,26 @@ +Graphene +======== + +Contents: + +.. toctree:: + :maxdepth: 2 + + quickstart + types/index + execution/index + relay/index + testing/index + api/index + +.. _Integrations: + +Integrations +------------ + +* `Graphene-Django `_ (`source `_) +* Flask-Graphql (`source `_) +* `Graphene-SQLAlchemy `_ (`source `_) +* `Graphene-Mongo `_ (`source `_) +* `Starlette `_ (`source `_) +* `FastAPI `_ (`source `_) diff --git a/testbed/graphql-python__graphene/docs/quickstart.rst b/testbed/graphql-python__graphene/docs/quickstart.rst new file mode 100644 index 0000000000000000000000000000000000000000..75f201c95bd4e87b0da70b88daebddfb4aa84120 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/quickstart.rst @@ -0,0 +1,144 @@ +Getting started +=============== + +Introduction +------------ + +What is GraphQL? +~~~~~~~~~~~~~~~~ + +GraphQL is a query language for your API. + +It provides a standard way to: + +* *describe data provided by a server* in a statically typed **Schema** +* *request data* in a **Query** which exactly describes your data requirements and +* *receive data* in a **Response** containing only the data you requested. + +For an introduction to GraphQL and an overview of its concepts, please refer to `the official GraphQL documentation`_. + +.. _the official GraphQL documentation: http://graphql.org/learn/ + +What is Graphene? +~~~~~~~~~~~~~~~~~ + +Graphene is a library that provides tools to implement a GraphQL API in Python using a *code-first* approach. + +Compare Graphene's *code-first* approach to building a GraphQL API with *schema-first* approaches like `Apollo Server`_ (JavaScript) or Ariadne_ (Python). Instead of writing GraphQL **Schema Definition Language (SDL)**, we write Python code to describe the data provided by your server. + +.. _Apollo Server: https://www.apollographql.com/docs/apollo-server/ + +.. _Ariadne: https://ariadnegraphql.org/ + +Graphene is fully featured with integrations for the most popular web frameworks and ORMs. Graphene produces schemas that are fully compliant with the GraphQL spec and provides tools and patterns for building a Relay-Compliant API as well. + +An example in Graphene +---------------------- + +Let’s build a basic GraphQL schema to say "hello" and "goodbye" in Graphene. + +When we send a **Query** requesting only one **Field**, ``hello``, and specify a value for the ``firstName`` **Argument**... + +.. code:: + + { + hello(firstName: "friend") + } + +...we would expect the following Response containing only the data requested (the ``goodbye`` field is not resolved). + +.. code:: + + { + "data": { + "hello": "Hello friend!" + } + } + + +Requirements +~~~~~~~~~~~~ + +- Python (3.6, 3.7, 3.8, 3.9, 3.10, pypy) +- Graphene (3.0) + +Project setup +~~~~~~~~~~~~~ + +.. code:: bash + + pip install "graphene>=3.0" + +Creating a basic Schema +~~~~~~~~~~~~~~~~~~~~~~~ + +In Graphene, we can define a simple schema using the following code: + +.. code:: python + + from graphene import ObjectType, String, Schema + + class Query(ObjectType): + # this defines a Field `hello` in our Schema with a single Argument `first_name` + # By default, the argument name will automatically be camel-based into firstName in the generated schema + hello = String(first_name=String(default_value="stranger")) + goodbye = String() + + # our Resolver method takes the GraphQL context (root, info) as well as + # Argument (first_name) for the Field and returns data for the query Response + def resolve_hello(root, info, first_name): + return f'Hello {first_name}!' + + def resolve_goodbye(root, info): + return 'See ya!' + + schema = Schema(query=Query) + + +A GraphQL **Schema** describes each **Field** in the data model provided by the server using scalar types like *String*, *Int* and *Enum* and compound types like *List* and *Object*. For more details refer to the Graphene :ref:`TypesReference`. + +Our schema can also define any number of **Arguments** for our **Fields**. This is a powerful way for a **Query** to describe the exact data requirements for each **Field**. + +For each **Field** in our **Schema**, we write a **Resolver** method to fetch data requested by a client's **Query** using the current context and **Arguments**. For more details, refer to this section on :ref:`Resolvers`. + +Schema Definition Language (SDL) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the `GraphQL Schema Definition Language`_, we could describe the fields defined by our example code as shown below. + +.. _GraphQL Schema Definition Language: https://graphql.org/learn/schema/ + +.. code:: + + type Query { + hello(firstName: String = "stranger"): String + goodbye: String + } + +Further examples in this documentation will use SDL to describe schema created by ObjectTypes and other fields. + +Querying +~~~~~~~~ + +Then we can start querying our **Schema** by passing a GraphQL query string to ``execute``: + +.. code:: python + + # we can query for our field (with the default argument) + query_string = '{ hello }' + result = schema.execute(query_string) + print(result.data['hello']) + # "Hello stranger!" + + # or passing the argument in the query + query_with_argument = '{ hello(firstName: "GraphQL") }' + result = schema.execute(query_with_argument) + print(result.data['hello']) + # "Hello GraphQL!" + +Next steps +~~~~~~~~~~ + +Congrats! You got your first Graphene schema working! + +Normally, we don't need to directly execute a query string against our schema as Graphene provides many useful Integrations with popular web frameworks like Flask and Django. Check out :ref:`Integrations` for more information on how to get started serving your GraphQL API. diff --git a/testbed/graphql-python__graphene/docs/relay/connection.rst b/testbed/graphql-python__graphene/docs/relay/connection.rst new file mode 100644 index 0000000000000000000000000000000000000000..07d81ada6edc775dc4f8902cac745b0e88edf99d --- /dev/null +++ b/testbed/graphql-python__graphene/docs/relay/connection.rst @@ -0,0 +1,45 @@ +Connection +========== + +A connection is a vitaminized version of a List that provides ways of +slicing and paginating through it. The way you create Connection types +in ``graphene`` is using ``relay.Connection`` and ``relay.ConnectionField``. + +Quick example +------------- + +If we want to create a custom Connection on a given node, we have to subclass the +``Connection`` class. + +In the following example, ``extra`` will be an extra field in the connection, +and ``other`` an extra field in the Connection Edge. + +.. code:: python + + class ShipConnection(Connection): + extra = String() + + class Meta: + node = Ship + + class Edge: + other = String() + +The ``ShipConnection`` connection class, will have automatically a ``pageInfo`` field, +and a ``edges`` field (which is a list of ``ShipConnection.Edge``). +This ``Edge`` will have a ``node`` field linking to the specified node +(in ``ShipConnection.Meta``) and the field ``other`` that we defined in the class. + +Connection Field +---------------- +You can create connection fields in any Connection, in case any ObjectType +that implements ``Node`` will have a default Connection. + +.. code:: python + + class Faction(graphene.ObjectType): + name = graphene.String() + ships = relay.ConnectionField(ShipConnection) + + def resolve_ships(root, info): + return [] diff --git a/testbed/graphql-python__graphene/docs/relay/index.rst b/testbed/graphql-python__graphene/docs/relay/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..2efde25fe1a8c01b1c5a0a7b1f2ae9128a13fd54 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/relay/index.rst @@ -0,0 +1,26 @@ +Relay +===== + +Graphene has complete support for `Relay`_ and offers some utils to make +integration from Python easy. + + +.. toctree:: + :maxdepth: 2 + + nodes + connection + mutations + + +Useful links +------------ + +- `Getting started with Relay`_ +- `Relay Global Identification Specification`_ +- `Relay Cursor Connection Specification`_ + +.. _Relay: https://relay.dev/docs/guides/graphql-server-specification/ +.. _Getting started with Relay: https://relay.dev/docs/getting-started/step-by-step-guide/ +.. _Relay Global Identification Specification: https://relay.dev/graphql/objectidentification.htm +.. _Relay Cursor Connection Specification: https://relay.dev/graphql/connections.htm diff --git a/testbed/graphql-python__graphene/docs/relay/mutations.rst b/testbed/graphql-python__graphene/docs/relay/mutations.rst new file mode 100644 index 0000000000000000000000000000000000000000..c6099594c7289336445f7dc926ba9eb5c1abda76 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/relay/mutations.rst @@ -0,0 +1,57 @@ +Mutations +========= + +Most APIs don’t just allow you to read data, they also allow you to +write. + +In GraphQL, this is done using mutations. Just like queries, +Relay puts some additional requirements on mutations, but Graphene +nicely manages that for you. All you need to do is make your mutation a +subclass of ``relay.ClientIDMutation``. + +.. code:: python + + class IntroduceShip(relay.ClientIDMutation): + + class Input: + ship_name = graphene.String(required=True) + faction_id = graphene.String(required=True) + + ship = graphene.Field(Ship) + faction = graphene.Field(Faction) + + @classmethod + def mutate_and_get_payload(cls, root, info, **input): + ship_name = input.ship_name + faction_id = input.faction_id + ship = create_ship(ship_name, faction_id) + faction = get_faction(faction_id) + return IntroduceShip(ship=ship, faction=faction) + + + +Accepting Files +--------------- + +Mutations can also accept files, that's how it will work with different integrations: + +.. code:: python + + class UploadFile(graphene.ClientIDMutation): + class Input: + pass + # nothing needed for uploading file + + # your return fields + success = graphene.String() + + @classmethod + def mutate_and_get_payload(cls, root, info, **input): + # When using it in Django, context will be the request + files = info.context.FILES + # Or, if used in Flask, context will be the flask global request + # files = context.files + + # do something with files + + return UploadFile(success=True) diff --git a/testbed/graphql-python__graphene/docs/relay/nodes.rst b/testbed/graphql-python__graphene/docs/relay/nodes.rst new file mode 100644 index 0000000000000000000000000000000000000000..285dbb20d50f131307b454760e31023745c3e631 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/relay/nodes.rst @@ -0,0 +1,102 @@ +Nodes +===== + +A ``Node`` is an Interface provided by ``graphene.relay`` that contains +a single field ``id`` (which is a ``ID!``). Any object that inherits +from it has to implement a ``get_node`` method for retrieving a +``Node`` by an *id*. + + +Quick example +------------- + +Example usage (taken from the `Starwars Relay example`_): + +.. code:: python + + class Ship(graphene.ObjectType): + '''A ship in the Star Wars saga''' + class Meta: + interfaces = (relay.Node, ) + + name = graphene.String(description='The name of the ship.') + + @classmethod + def get_node(cls, info, id): + return get_ship(id) + +The ``id`` returned by the ``Ship`` type when you query it will be a +scalar which contains enough info for the server to know its type and +its id. + +For example, the instance ``Ship(id=1)`` will return ``U2hpcDox`` as the +id when you query it (which is the base64 encoding of ``Ship:1``), and +which could be useful later if we want to query a node by its id. + + +Custom Nodes +------------ + +You can use the predefined ``relay.Node`` or you can subclass it, defining +custom ways of how a node id is encoded (using the ``to_global_id`` method in the class) +or how we can retrieve a Node given a encoded id (with the ``get_node_from_global_id`` method). + +Example of a custom node: + +.. code:: python + + class CustomNode(Node): + + class Meta: + name = 'Node' + + @staticmethod + def to_global_id(type_, id): + return f"{type_}:{id}" + + @staticmethod + def get_node_from_global_id(info, global_id, only_type=None): + type_, id = global_id.split(':') + if only_type: + # We assure that the node type that we want to retrieve + # is the same that was indicated in the field type + assert type_ == only_type._meta.name, 'Received not compatible node.' + + if type_ == 'User': + return get_user(id) + elif type_ == 'Photo': + return get_photo(id) + + +The ``get_node_from_global_id`` method will be called when ``CustomNode.Field`` is resolved. + + +Accessing node types +-------------------- + +If we want to retrieve node instances from a ``global_id`` (scalar that identifies an instance by it's type name and id), +we can simply do ``Node.get_node_from_global_id(info, global_id)``. + +In the case we want to restrict the instance retrieval to a specific type, we can do: +``Node.get_node_from_global_id(info, global_id, only_type=Ship)``. This will raise an error +if the ``global_id`` doesn't correspond to a Ship type. + + +Node Root field +--------------- + +As is required in the `Relay specification`_, the server must implement +a root field called ``node`` that returns a ``Node`` Interface. + +For this reason, ``graphene`` provides the field ``relay.Node.Field``, +which links to any type in the Schema which implements ``Node``. +Example usage: + +.. code:: python + + class Query(graphene.ObjectType): + # Should be CustomNode.Field() if we want to use our custom Node + node = relay.Node.Field() + +.. _Relay specification: https://facebook.github.io/relay/docs/graphql-relay-specification.html +.. _Starwars Relay example: https://github.com/graphql-python/graphene/blob/master/examples/starwars_relay/schema.py diff --git a/testbed/graphql-python__graphene/docs/requirements.txt b/testbed/graphql-python__graphene/docs/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..dee009c70cc18fa72b6ced01021354015b31d58e --- /dev/null +++ b/testbed/graphql-python__graphene/docs/requirements.txt @@ -0,0 +1,5 @@ +# Required library +Sphinx==6.1.3 +sphinx-autobuild==2021.3.14 +# Docs template +http://graphene-python.org/sphinx_graphene_theme.zip diff --git a/testbed/graphql-python__graphene/docs/testing/index.rst b/testbed/graphql-python__graphene/docs/testing/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..877879f6f03774b9733017162095c70eead3c6bf --- /dev/null +++ b/testbed/graphql-python__graphene/docs/testing/index.rst @@ -0,0 +1,111 @@ +=================== +Testing in Graphene +=================== + + +Automated testing is an extremely useful bug-killing tool for the modern developer. You can use a collection of tests – a test suite – to solve, or avoid, a number of problems: + +- When you’re writing new code, you can use tests to validate your code works as expected. +- When you’re refactoring or modifying old code, you can use tests to ensure your changes haven’t affected your application’s behavior unexpectedly. + +Testing a GraphQL application is a complex task, because a GraphQL application is made of several layers of logic – schema definition, schema validation, permissions and field resolution. + +With Graphene test-execution framework and assorted utilities, you can simulate GraphQL requests, execute mutations, inspect your application’s output and generally verify your code is doing what it should be doing. + + +Testing tools +------------- + +Graphene provides a small set of tools that come in handy when writing tests. + + +Test Client +~~~~~~~~~~~ + +The test client is a Python class that acts as a dummy GraphQL client, allowing you to test your views and interact with your Graphene-powered application programmatically. + +Some of the things you can do with the test client are: + +- Simulate Queries and Mutations and observe the response. +- Test that a given query request is rendered by a given Django template, with a template context that contains certain values. + + +Overview and a quick example +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To use the test client, instantiate ``graphene.test.Client`` and retrieve GraphQL responses: + + +.. code:: python + + from graphene.test import Client + + def test_hey(): + client = Client(my_schema) + executed = client.execute('''{ hey }''') + assert executed == { + 'data': { + 'hey': 'hello!' + } + } + + +Execute parameters +~~~~~~~~~~~~~~~~~~ + +You can also add extra keyword arguments to the ``execute`` method, such as +``context``, ``root``, ``variables``, ...: + + +.. code:: python + + from graphene.test import Client + + def test_hey(): + client = Client(my_schema) + executed = client.execute('''{ hey }''', context={'user': 'Peter'}) + assert executed == { + 'data': { + 'hey': 'hello Peter!' + } + } + + +Snapshot testing +~~~~~~~~~~~~~~~~ + +As our APIs evolve, we need to know when our changes introduce any breaking changes that might break +some of the clients of our GraphQL app. + +However, writing tests and replicating the same response we expect from our GraphQL application can be a +tedious and repetitive task, and sometimes it's easier to skip this process. + +Because of that, we recommend the usage of `SnapshotTest `_. + +SnapshotTest lets us write all these tests in a breeze, as it automatically creates the ``snapshots`` for us +the first time the test are executed. + + +Here is a simple example on how our tests will look if we use ``pytest``: + +.. code:: python + + def test_hey(snapshot): + client = Client(my_schema) + # This will create a snapshot dir and a snapshot file + # the first time the test is executed, with the response + # of the execution. + snapshot.assert_match(client.execute('''{ hey }''')) + + +If we are using ``unittest``: + +.. code:: python + + from snapshottest import TestCase + + class APITestCase(TestCase): + def test_api_me(self): + """Testing the API for /me""" + client = Client(my_schema) + self.assertMatchSnapshot(client.execute('''{ hey }''')) diff --git a/testbed/graphql-python__graphene/docs/types/enums.rst b/testbed/graphql-python__graphene/docs/types/enums.rst new file mode 100644 index 0000000000000000000000000000000000000000..b9ac5333856ae86902bd7be42961da2c3f0c7d6c --- /dev/null +++ b/testbed/graphql-python__graphene/docs/types/enums.rst @@ -0,0 +1,103 @@ +Enums +===== + +An ``Enum`` is a special ``GraphQL`` type that represents a set of +symbolic names (members) bound to unique, constant values. + +Definition +---------- + +You can create an ``Enum`` using classes: + +.. code:: python + + import graphene + + class Episode(graphene.Enum): + NEWHOPE = 4 + EMPIRE = 5 + JEDI = 6 + +But also using instances of Enum: + +.. code:: python + + Episode = graphene.Enum('Episode', [('NEWHOPE', 4), ('EMPIRE', 5), ('JEDI', 6)]) + +Value descriptions +------------------ + +It's possible to add a description to an enum value, for that the enum value +needs to have the ``description`` property on it. + +.. code:: python + + class Episode(graphene.Enum): + NEWHOPE = 4 + EMPIRE = 5 + JEDI = 6 + + @property + def description(self): + if self == Episode.NEWHOPE: + return 'New Hope Episode' + return 'Other episode' + + +Usage with Python Enums +----------------------- + +In case the Enums are already defined it's possible to reuse them using +the ``Enum.from_enum`` function. + +.. code:: python + + graphene.Enum.from_enum(AlreadyExistingPyEnum) + +``Enum.from_enum`` supports a ``description`` and ``deprecation_reason`` lambdas as input so +you can add description etc. to your enum without changing the original: + +.. code:: python + + graphene.Enum.from_enum( + AlreadyExistingPyEnum, + description=lambda v: return 'foo' if v == AlreadyExistingPyEnum.Foo else 'bar' + ) + + +Notes +----- + +``graphene.Enum`` uses |enum.Enum|_ internally (or a backport if +that's not available) and can be used in a similar way, with the exception of +member getters. + +In the Python ``Enum`` implementation you can access a member by initing the Enum. + +.. code:: python + + from enum import Enum + + class Color(Enum): + RED = 1 + GREEN = 2 + BLUE = 3 + + assert Color(1) == Color.RED + + +However, in Graphene ``Enum`` you need to call `.get` to have the same effect: + +.. code:: python + + from graphene import Enum + + class Color(Enum): + RED = 1 + GREEN = 2 + BLUE = 3 + + assert Color.get(1) == Color.RED + +.. |enum.Enum| replace:: ``enum.Enum`` +.. _enum.Enum: https://docs.python.org/3/library/enum.html diff --git a/testbed/graphql-python__graphene/docs/types/index.rst b/testbed/graphql-python__graphene/docs/types/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..d82beb33ad6d7e005351556748da55e4f2d206ab --- /dev/null +++ b/testbed/graphql-python__graphene/docs/types/index.rst @@ -0,0 +1,17 @@ +.. _TypesReference: + +=============== +Types Reference +=============== + +.. toctree:: + :maxdepth: 1 + + schema + scalars + list-and-nonnull + objecttypes + enums + interfaces + unions + mutations diff --git a/testbed/graphql-python__graphene/docs/types/interfaces.rst b/testbed/graphql-python__graphene/docs/types/interfaces.rst new file mode 100644 index 0000000000000000000000000000000000000000..4d4e32a56e024b869164f1872217fd3fd837dfde --- /dev/null +++ b/testbed/graphql-python__graphene/docs/types/interfaces.rst @@ -0,0 +1,172 @@ +.. _Interfaces: + +Interfaces +========== + +An *Interface* is an abstract type that defines a certain set of fields that a +type must include to implement the interface. + +For example, you can define an Interface ``Character`` that represents any +character in the Star Wars trilogy: + +.. code:: python + + import graphene + + class Character(graphene.Interface): + id = graphene.ID(required=True) + name = graphene.String(required=True) + friends = graphene.List(lambda: Character) + + +Any ObjectType that implements ``Character`` will have these exact fields, with +these arguments and return types. + +For example, here are some types that might implement ``Character``: + +.. code:: python + + class Human(graphene.ObjectType): + class Meta: + interfaces = (Character, ) + + starships = graphene.List(Starship) + home_planet = graphene.String() + + class Droid(graphene.ObjectType): + class Meta: + interfaces = (Character, ) + + primary_function = graphene.String() + + +Both of these types have all of the fields from the ``Character`` interface, +but also bring in extra fields, ``home_planet``, ``starships`` and +``primary_function``, that are specific to that particular type of character. + +The full GraphQL schema definition will look like this: + +.. code:: + + interface Character { + id: ID! + name: String! + friends: [Character] + } + + type Human implements Character { + id: ID! + name: String! + friends: [Character] + starships: [Starship] + homePlanet: String + } + + type Droid implements Character { + id: ID! + name: String! + friends: [Character] + primaryFunction: String + } + +Interfaces are useful when you want to return an object or set of objects, +which might be of several different types. + +For example, you can define a field ``hero`` that resolves to any +``Character``, depending on the episode, like this: + +.. code:: python + + class Query(graphene.ObjectType): + hero = graphene.Field( + Character, + required=True, + episode=graphene.Int(required=True) + ) + + def resolve_hero(root, info, episode): + # Luke is the hero of Episode V + if episode == 5: + return get_human(name='Luke Skywalker') + return get_droid(name='R2-D2') + + schema = graphene.Schema(query=Query, types=[Human, Droid]) + +This allows you to directly query for fields that exist on the Character interface +as well as selecting specific fields on any type that implements the interface +using `inline fragments `_. + +For example, the following query: + +.. code:: + + query HeroForEpisode($episode: Int!) { + hero(episode: $episode) { + __typename + name + ... on Droid { + primaryFunction + } + ... on Human { + homePlanet + } + } + } + +Will return the following data with variables ``{ "episode": 4 }``: + +.. code:: json + + { + "data": { + "hero": { + "__typename": "Droid", + "name": "R2-D2", + "primaryFunction": "Astromech" + } + } + } + +And different data with the variables ``{ "episode": 5 }``: + +.. code:: json + + { + "data": { + "hero": { + "__typename": "Human", + "name": "Luke Skywalker", + "homePlanet": "Tatooine" + } + } + } + +Resolving data objects to types +------------------------------- + +As you build out your schema in Graphene it's common for your resolvers to +return objects that represent the data backing your GraphQL types rather than +instances of the Graphene types (e.g. Django or SQLAlchemy models). This works +well with ``ObjectType`` and ``Scalar`` fields, however when you start using +Interfaces you might come across this error: + +.. code:: + + "Abstract type Character must resolve to an Object type at runtime for field Query.hero ..." + +This happens because Graphene doesn't have enough information to convert the +data object into a Graphene type needed to resolve the ``Interface``. To solve +this you can define a ``resolve_type`` class method on the ``Interface`` which +maps a data object to a Graphene type: + +.. code:: python + + class Character(graphene.Interface): + id = graphene.ID(required=True) + name = graphene.String(required=True) + + @classmethod + def resolve_type(cls, instance, info): + if instance.type == 'DROID': + return Droid + return Human diff --git a/testbed/graphql-python__graphene/docs/types/list-and-nonnull.rst b/testbed/graphql-python__graphene/docs/types/list-and-nonnull.rst new file mode 100644 index 0000000000000000000000000000000000000000..a127a9d2452fcb6134c96336e9504743d23fd947 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/types/list-and-nonnull.rst @@ -0,0 +1,71 @@ +Lists and Non-Null +================== + +Object types, scalars, and enums are the only kinds of types you can +define in Graphene. But when you use the types in other parts of the +schema, or in your query variable declarations, you can apply additional +type modifiers that affect validation of those values. + +NonNull +------- + +.. code:: python + + import graphene + + class Character(graphene.ObjectType): + name = graphene.NonNull(graphene.String) + + +Here, we're using a ``String`` type and marking it as Non-Null by wrapping +it using the ``NonNull`` class. This means that our server always expects +to return a non-null value for this field, and if it ends up getting a +null value that will actually trigger a GraphQL execution error, +letting the client know that something has gone wrong. + + +The previous ``NonNull`` code snippet is also equivalent to: + +.. code:: python + + import graphene + + class Character(graphene.ObjectType): + name = graphene.String(required=True) + + +List +---- + +.. code:: python + + import graphene + + class Character(graphene.ObjectType): + appears_in = graphene.List(graphene.String) + +Lists work in a similar way: We can use a type modifier to mark a type as a +``List``, which indicates that this field will return a list of that type. +It works the same for arguments, where the validation step will expect a list +for that value. + +NonNull Lists +------------- + +By default items in a list will be considered nullable. To define a list without +any nullable items the type needs to be marked as ``NonNull``. For example: + +.. code:: python + + import graphene + + class Character(graphene.ObjectType): + appears_in = graphene.List(graphene.NonNull(graphene.String)) + +The above results in the type definition: + +.. code:: + + type Character { + appearsIn: [String!] + } diff --git a/testbed/graphql-python__graphene/docs/types/mutations.rst b/testbed/graphql-python__graphene/docs/types/mutations.rst new file mode 100644 index 0000000000000000000000000000000000000000..738660632e427ef9f6186b891eb379329c95e8ec --- /dev/null +++ b/testbed/graphql-python__graphene/docs/types/mutations.rst @@ -0,0 +1,183 @@ +Mutations +========= + +A Mutation is a special ObjectType that also defines an Input. + +Quick example +------------- + +This example defines a Mutation: + +.. code:: python + + import graphene + + class CreatePerson(graphene.Mutation): + class Arguments: + name = graphene.String() + + ok = graphene.Boolean() + person = graphene.Field(lambda: Person) + + def mutate(root, info, name): + person = Person(name=name) + ok = True + return CreatePerson(person=person, ok=ok) + +**person** and **ok** are the output fields of the Mutation when it is +resolved. + +**Arguments** attributes are the arguments that the Mutation +``CreatePerson`` needs for resolving, in this case **name** will be the +only argument for the mutation. + +**mutate** is the function that will be applied once the mutation is +called. This method is just a special resolver that we can change +data within. It takes the same arguments as the standard query :ref:`ResolverArguments`. + +So, we can finish our schema like this: + +.. code:: python + + # ... the Mutation Class + + class Person(graphene.ObjectType): + name = graphene.String() + age = graphene.Int() + + class MyMutations(graphene.ObjectType): + create_person = CreatePerson.Field() + + # We must define a query for our schema + class Query(graphene.ObjectType): + person = graphene.Field(Person) + + schema = graphene.Schema(query=Query, mutation=MyMutations) + +Executing the Mutation +---------------------- + +Then, if we query (``schema.execute(query_str)``) the following: + +.. code:: + + mutation myFirstMutation { + createPerson(name:"Peter") { + person { + name + } + ok + } + } + +We should receive: + +.. code:: json + + { + "createPerson": { + "person" : { + "name": "Peter" + }, + "ok": true + } + } + +InputFields and InputObjectTypes +---------------------------------- +InputFields are used in mutations to allow nested input data for mutations. + +To use an InputField you define an InputObjectType that specifies the structure of your input data: + + +.. code:: python + + import graphene + + class PersonInput(graphene.InputObjectType): + name = graphene.String(required=True) + age = graphene.Int(required=True) + + class CreatePerson(graphene.Mutation): + class Arguments: + person_data = PersonInput(required=True) + + person = graphene.Field(Person) + + def mutate(root, info, person_data=None): + person = Person( + name=person_data.name, + age=person_data.age + ) + return CreatePerson(person=person) + + +Note that **name** and **age** are part of **person_data** now. + +Using the above mutation your new query would look like this: + +.. code:: + + mutation myFirstMutation { + createPerson(personData: {name:"Peter", age: 24}) { + person { + name, + age + } + } + } + +InputObjectTypes can also be fields of InputObjectTypes allowing you to have +as complex of input data as you need: + +.. code:: python + + import graphene + + class LatLngInput(graphene.InputObjectType): + lat = graphene.Float() + lng = graphene.Float() + + #A location has a latlng associated to it + class LocationInput(graphene.InputObjectType): + name = graphene.String() + latlng = graphene.InputField(LatLngInput) + +Output type example +------------------- +To return an existing ObjectType instead of a mutation-specific type, set the **Output** attribute to the desired ObjectType: + +.. code:: python + + import graphene + + class CreatePerson(graphene.Mutation): + class Arguments: + name = graphene.String() + + Output = Person + + def mutate(root, info, name): + return Person(name=name) + +Then, if we query (``schema.execute(query_str)``) with the following: + +.. code:: + + mutation myFirstMutation { + createPerson(name:"Peter") { + name + __typename + } + } + +We should receive: + +.. code:: json + + { + "createPerson": { + "name": "Peter", + "__typename": "Person" + } + } diff --git a/testbed/graphql-python__graphene/docs/types/objecttypes.rst b/testbed/graphql-python__graphene/docs/types/objecttypes.rst new file mode 100644 index 0000000000000000000000000000000000000000..3cc8d8302ee377b2a2c3410d726e582c0bc9bb30 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/types/objecttypes.rst @@ -0,0 +1,431 @@ +.. _ObjectType: + +ObjectType +========== + +A Graphene *ObjectType* is the building block used to define the relationship between **Fields** in your **Schema** and how their data is retrieved. + +The basics: + +- Each ObjectType is a Python class that inherits from ``graphene.ObjectType``. +- Each attribute of the ObjectType represents a ``Field``. +- Each ``Field`` has a :ref:`resolver method` to fetch data (or :ref:`DefaultResolver`). + +Quick example +------------- + +This example model defines a Person, with a first and a last name: + +.. code:: python + + from graphene import ObjectType, String + + class Person(ObjectType): + first_name = String() + last_name = String() + full_name = String() + + def resolve_full_name(parent, info): + return f"{parent.first_name} {parent.last_name}" + +This *ObjectType* defines the field **first\_name**, **last\_name**, and **full\_name**. Each field is specified as a class attribute, and each attribute maps to a Field. Data is fetched by our ``resolve_full_name`` :ref:`resolver method` for ``full_name`` field and the :ref:`DefaultResolver` for other fields. + +The above ``Person`` ObjectType has the following schema representation: + +.. code:: + + type Person { + firstName: String + lastName: String + fullName: String + } + +.. _Resolvers: + +Resolvers +--------- + +A **Resolver** is a method that helps us answer **Queries** by fetching data for a **Field** in our **Schema**. + +Resolvers are lazily executed, so if a field is not included in a query, its resolver will not be executed. + +Each field on an *ObjectType* in Graphene should have a corresponding resolver method to fetch data. This resolver method should match the field name. For example, in the ``Person`` type above, the ``full_name`` field is resolved by the method ``resolve_full_name``. + +Each resolver method takes the parameters: + +* :ref:`ResolverParamParent` for the value object use to resolve most fields +* :ref:`ResolverParamInfo` for query and schema meta information and per-request context +* :ref:`ResolverParamGraphQLArguments` as defined on the **Field**. + +.. _ResolverArguments: + +Resolver Parameters +~~~~~~~~~~~~~~~~~~~ + +.. _ResolverParamParent: + +Parent Value Object (*parent*) +****************************** + +This parameter is typically used to derive the values for most fields on an *ObjectType*. + +The first parameter of a resolver method (*parent*) is the value object returned from the resolver of the parent field. If there is no parent field, such as a root Query field, then the value for *parent* is set to the ``root_value`` configured while executing the query (default ``None``). See :ref:`SchemaExecute` for more details on executing queries. + +Resolver example +^^^^^^^^^^^^^^^^ + +If we have a schema with Person type and one field on the root query. + +.. code:: python + + from graphene import ObjectType, String, Field + + class Person(ObjectType): + full_name = String() + + def resolve_full_name(parent, info): + return f"{parent.first_name} {parent.last_name}" + + class Query(ObjectType): + me = Field(Person) + + def resolve_me(parent, info): + # returns an object that represents a Person + return get_human(name="Luke Skywalker") + +When we execute a query against that schema. + +.. code:: python + + schema = Schema(query=Query) + + query_string = "{ me { fullName } }" + result = schema.execute(query_string) + + assert result.data["me"] == {"fullName": "Luke Skywalker"} + +Then we go through the following steps to resolve this query: + +* ``parent`` is set with the root_value from query execution (None). +* ``Query.resolve_me`` called with ``parent`` None which returns a value object ``Person("Luke", "Skywalker")``. +* This value object is then used as ``parent`` while calling ``Person.resolve_full_name`` to resolve the scalar String value "Luke Skywalker". +* The scalar value is serialized and sent back in the query response. + +Each resolver returns the next :ref:`ResolverParamParent` to be used in executing the following resolver in the chain. If the Field is a Scalar type, that value will be serialized and sent in the **Response**. Otherwise, while resolving Compound types like *ObjectType*, the value be passed forward as the next :ref:`ResolverParamParent`. + +Naming convention +^^^^^^^^^^^^^^^^^ + +This :ref:`ResolverParamParent` is sometimes named ``obj``, ``parent``, or ``source`` in other GraphQL documentation. It can also be named after the value object being resolved (ex. ``root`` for a root Query or Mutation, and ``person`` for a Person value object). Sometimes this argument will be named ``self`` in Graphene code, but this can be misleading due to :ref:`ResolverImplicitStaticMethod` while executing queries in Graphene. + +.. _ResolverParamInfo: + +GraphQL Execution Info (*info*) +******************************* + +The second parameter provides two things: + +* reference to meta information about the execution of the current GraphQL Query (fields, schema, parsed query, etc.) +* access to per-request ``context`` which can be used to store user authentication, data loader instances or anything else useful for resolving the query. + +Only context will be required for most applications. See :ref:`SchemaExecuteContext` for more information about setting context. + +.. _ResolverParamGraphQLArguments: + +GraphQL Arguments (*\*\*kwargs*) +******************************** + +Any arguments that a field defines gets passed to the resolver function as +keyword arguments. For example: + +.. code:: python + + from graphene import ObjectType, Field, String + + class Query(ObjectType): + human_by_name = Field(Human, name=String(required=True)) + + def resolve_human_by_name(parent, info, name): + return get_human(name=name) + +You can then execute the following query: + +.. code:: + + query { + humanByName(name: "Luke Skywalker") { + firstName + lastName + } + } + +*Note:* There are several arguments to a field that are "reserved" by Graphene +(see :ref:`fields-mounted-types`). +You can still define an argument that clashes with one of these fields by using +the ``args`` parameter like so: + +.. code:: python + + from graphene import ObjectType, Field, String + + class Query(ObjectType): + answer = String(args={'description': String()}) + + def resolve_answer(parent, info, description): + return description + + +Convenience Features of Graphene Resolvers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _ResolverImplicitStaticMethod: + +Implicit staticmethod +********************* + +One surprising feature of Graphene is that all resolver methods are treated implicitly as staticmethods. This means that, unlike other methods in Python, the first argument of a resolver is *never* ``self`` while it is being executed by Graphene. Instead, the first argument is always :ref:`ResolverParamParent`. In practice, this is very convenient as, in GraphQL, we are almost always more concerned with the using the parent value object to resolve queries than attributes on the Python object itself. + +The two resolvers in this example are effectively the same. + +.. code:: python + + from graphene import ObjectType, String + + class Person(ObjectType): + first_name = String() + last_name = String() + + @staticmethod + def resolve_first_name(parent, info): + ''' + Decorating a Python method with `staticmethod` ensures that `self` will not be provided as an + argument. However, Graphene does not need this decorator for this behavior. + ''' + return parent.first_name + + def resolve_last_name(parent, info): + ''' + Normally the first argument for this method would be `self`, but Graphene executes this as + a staticmethod implicitly. + ''' + return parent.last_name + + # ... + +If you prefer your code to be more explicit, feel free to use ``@staticmethod`` decorators. Otherwise, your code may be cleaner without them! + +.. _DefaultResolver: + +Default Resolver +**************** + +If a resolver method is not defined for a **Field** attribute on our *ObjectType*, Graphene supplies a default resolver. + +If the :ref:`ResolverParamParent` is a dictionary, the resolver will look for a dictionary key matching the field name. Otherwise, the resolver will get the attribute from the parent value object matching the field name. + +.. code:: python + + from collections import namedtuple + + from graphene import ObjectType, String, Field, Schema + + PersonValueObject = namedtuple("Person", ["first_name", "last_name"]) + + class Person(ObjectType): + first_name = String() + last_name = String() + + class Query(ObjectType): + me = Field(Person) + my_best_friend = Field(Person) + + def resolve_me(parent, info): + # always pass an object for `me` field + return PersonValueObject(first_name="Luke", last_name="Skywalker") + + def resolve_my_best_friend(parent, info): + # always pass a dictionary for `my_best_fiend_field` + return {"first_name": "R2", "last_name": "D2"} + + schema = Schema(query=Query) + result = schema.execute(''' + { + me { firstName lastName } + myBestFriend { firstName lastName } + } + ''') + # With default resolvers we can resolve attributes from an object.. + assert result.data["me"] == {"firstName": "Luke", "lastName": "Skywalker"} + + # With default resolvers, we can also resolve keys from a dictionary.. + assert result.data["myBestFriend"] == {"firstName": "R2", "lastName": "D2"} + +Advanced +~~~~~~~~ + +GraphQL Argument defaults +************************* + +If you define an argument for a field that is not required (and in a query +execution it is not provided as an argument) it will not be passed to the +resolver function at all. This is so that the developer can differentiate +between a ``undefined`` value for an argument and an explicit ``null`` value. + +For example, given this schema: + +.. code:: python + + from graphene import ObjectType, String + + class Query(ObjectType): + hello = String(required=True, name=String()) + + def resolve_hello(parent, info, name): + return name if name else 'World' + +And this query: + +.. code:: + + query { + hello + } + +An error will be thrown: + +.. code:: + + TypeError: resolve_hello() missing 1 required positional argument: 'name' + +You can fix this error in several ways. Either by combining all keyword arguments +into a dict: + +.. code:: python + + from graphene import ObjectType, String + + class Query(ObjectType): + hello = String(required=True, name=String()) + + def resolve_hello(parent, info, **kwargs): + name = kwargs.get('name', 'World') + return f'Hello, {name}!' + +Or by setting a default value for the keyword argument: + +.. code:: python + + from graphene import ObjectType, String + + class Query(ObjectType): + hello = String(required=True, name=String()) + + def resolve_hello(parent, info, name='World'): + return f'Hello, {name}!' + +One can also set a default value for an Argument in the GraphQL schema itself using Graphene! + +.. code:: python + + from graphene import ObjectType, String + + class Query(ObjectType): + hello = String( + required=True, + name=String(default_value='World') + ) + + def resolve_hello(parent, info, name): + return f'Hello, {name}!' + +Resolvers outside the class +*************************** + +A field can use a custom resolver from outside the class: + +.. code:: python + + from graphene import ObjectType, String + + def resolve_full_name(person, info): + return f"{person.first_name} {person.last_name}" + + class Person(ObjectType): + first_name = String() + last_name = String() + full_name = String(resolver=resolve_full_name) + + +Instances as value objects +************************** + +Graphene ``ObjectType``\ s can act as value objects too. So with the +previous example you could use ``Person`` to capture data for each of the *ObjectType*'s fields. + +.. code:: python + + peter = Person(first_name='Peter', last_name='Griffin') + + peter.first_name # prints "Peter" + peter.last_name # prints "Griffin" + +Field camelcasing +***************** + +Graphene automatically camelcases fields on *ObjectType* from ``field_name`` to ``fieldName`` to conform with GraphQL standards. See :ref:`SchemaAutoCamelCase` for more information. + +*ObjectType* Configuration - Meta class +--------------------------------------- + +Graphene uses a Meta inner class on *ObjectType* to set different options. + +GraphQL type name +~~~~~~~~~~~~~~~~~ + +By default the type name in the GraphQL schema will be the same as the class name +that defines the ``ObjectType``. This can be changed by setting the ``name`` +property on the ``Meta`` class: + +.. code:: python + + from graphene import ObjectType + + class MyGraphQlSong(ObjectType): + class Meta: + name = 'Song' + +GraphQL Description +~~~~~~~~~~~~~~~~~~~ + +The schema description of an *ObjectType* can be set as a docstring on the Python object or on the Meta inner class. + +.. code:: python + + from graphene import ObjectType + + class MyGraphQlSong(ObjectType): + ''' We can set the schema description for an Object Type here on a docstring ''' + class Meta: + description = 'But if we set the description in Meta, this value is used instead' + +Interfaces & Possible Types +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Setting ``interfaces`` in Meta inner class specifies the GraphQL Interfaces that this Object implements. + +Providing ``possible_types`` helps Graphene resolve ambiguous types such as interfaces or Unions. + +See :ref:`Interfaces` for more information. + +.. code:: python + + from graphene import ObjectType, Node + + Song = namedtuple('Song', ('title', 'artist')) + + class MyGraphQlSong(ObjectType): + class Meta: + interfaces = (Node, ) + possible_types = (Song, ) + +.. _Interface: /docs/interfaces/ diff --git a/testbed/graphql-python__graphene/docs/types/scalars.rst b/testbed/graphql-python__graphene/docs/types/scalars.rst new file mode 100644 index 0000000000000000000000000000000000000000..2a55245d2f735826503606909e722bcca029495e --- /dev/null +++ b/testbed/graphql-python__graphene/docs/types/scalars.rst @@ -0,0 +1,309 @@ +.. _Scalars: + +Scalars +======= + +Scalar types represent concrete values at the leaves of a query. There are +several built in types that Graphene provides out of the box which represent common +values in Python. You can also create your own Scalar types to better express +values that you might have in your data model. + +All Scalar types accept the following arguments. All are optional: + +``name``: *string* + + Override the name of the Field. + +``description``: *string* + + A description of the type to show in the GraphiQL browser. + +``required``: *boolean* + + If ``True``, the server will enforce a value for this field. See `NonNull <../list-and-nonnull.html#nonnull>`_. Default is ``False``. + +``deprecation_reason``: *string* + + Provide a deprecation reason for the Field. + +``default_value``: *any* + + Provide a default value for the Field. + + + +Built in scalars +---------------- + +Graphene defines the following base Scalar Types that match the default `GraphQL types `_: + +``graphene.String`` +^^^^^^^^^^^^^^^^^^^ + + Represents textual data, represented as UTF-8 + character sequences. The String type is most often used by GraphQL to + represent free-form human-readable text. + +``graphene.Int`` +^^^^^^^^^^^^^^^^ + + Represents non-fractional signed whole numeric + values. Int is a signed 32‐bit integer per the + `GraphQL spec `_ + +``graphene.Float`` +^^^^^^^^^^^^^^^^^^ + + Represents signed double-precision fractional + values as specified by + `IEEE 754 `_. + +``graphene.Boolean`` +^^^^^^^^^^^^^^^^^^^^ + + Represents `true` or `false`. + +``graphene.ID`` +^^^^^^^^^^^^^^^ + + Represents a unique identifier, often used to + refetch an object or as key for a cache. The ID type appears in a JSON + response as a String; however, it is not intended to be human-readable. + When expected as an input type, any string (such as `"4"`) or integer + (such as `4`) input value will be accepted as an ID. + +---- + +Graphene also provides custom scalars for common values: + +``graphene.Date`` +^^^^^^^^^^^^^^^^^ + + Represents a Date value as specified by `iso8601 `_. + +.. code:: python + + import datetime + from graphene import Schema, ObjectType, Date + + class Query(ObjectType): + one_week_from = Date(required=True, date_input=Date(required=True)) + + def resolve_one_week_from(root, info, date_input): + assert date_input == datetime.date(2006, 1, 2) + return date_input + datetime.timedelta(weeks=1) + + schema = Schema(query=Query) + + results = schema.execute(""" + query { + oneWeekFrom(dateInput: "2006-01-02") + } + """) + + assert results.data == {"oneWeekFrom": "2006-01-09"} + + +``graphene.DateTime`` +^^^^^^^^^^^^^^^^^^^^^ + + Represents a DateTime value as specified by `iso8601 `_. + +.. code:: python + + import datetime + from graphene import Schema, ObjectType, DateTime + + class Query(ObjectType): + one_hour_from = DateTime(required=True, datetime_input=DateTime(required=True)) + + def resolve_one_hour_from(root, info, datetime_input): + assert datetime_input == datetime.datetime(2006, 1, 2, 15, 4, 5) + return datetime_input + datetime.timedelta(hours=1) + + schema = Schema(query=Query) + + results = schema.execute(""" + query { + oneHourFrom(datetimeInput: "2006-01-02T15:04:05") + } + """) + + assert results.data == {"oneHourFrom": "2006-01-02T16:04:05"} + +``graphene.Time`` +^^^^^^^^^^^^^^^^^ + + Represents a Time value as specified by `iso8601 `_. + +.. code:: python + + import datetime + from graphene import Schema, ObjectType, Time + + class Query(ObjectType): + one_hour_from = Time(required=True, time_input=Time(required=True)) + + def resolve_one_hour_from(root, info, time_input): + assert time_input == datetime.time(15, 4, 5) + tmp_time_input = datetime.datetime.combine(datetime.date(1, 1, 1), time_input) + return (tmp_time_input + datetime.timedelta(hours=1)).time() + + schema = Schema(query=Query) + + results = schema.execute(""" + query { + oneHourFrom(timeInput: "15:04:05") + } + """) + + assert results.data == {"oneHourFrom": "16:04:05"} + +``graphene.Decimal`` +^^^^^^^^^^^^^^^^^^^^ + + Represents a Python Decimal value. + +.. code:: python + + import decimal + from graphene import Schema, ObjectType, Decimal + + class Query(ObjectType): + add_one_to = Decimal(required=True, decimal_input=Decimal(required=True)) + + def resolve_add_one_to(root, info, decimal_input): + assert decimal_input == decimal.Decimal("10.50") + return decimal_input + decimal.Decimal("1") + + schema = Schema(query=Query) + + results = schema.execute(""" + query { + addOneTo(decimalInput: "10.50") + } + """) + + assert results.data == {"addOneTo": "11.50"} + +``graphene.JSONString`` +^^^^^^^^^^^^^^^^^^^^^^^ + + Represents a JSON string. + +.. code:: python + + from graphene import Schema, ObjectType, JSONString, String + + class Query(ObjectType): + update_json_key = JSONString( + required=True, + json_input=JSONString(required=True), + key=String(required=True), + value=String(required=True) + ) + + def resolve_update_json_key(root, info, json_input, key, value): + assert json_input == {"name": "Jane"} + json_input[key] = value + return json_input + + schema = Schema(query=Query) + + results = schema.execute(""" + query { + updateJsonKey(jsonInput: "{\\"name\\": \\"Jane\\"}", key: "name", value: "Beth") + } + """) + + assert results.data == {"updateJsonKey": "{\"name\": \"Beth\"}"} + + +``graphene.Base64`` +^^^^^^^^^^^^^^^^^^^ + + Represents a Base64 encoded string. + +.. code:: python + + from graphene import Schema, ObjectType, Base64 + + class Query(ObjectType): + increment_encoded_id = Base64( + required=True, + base64_input=Base64(required=True), + ) + + def resolve_increment_encoded_id(root, info, base64_input): + assert base64_input == "4" + return int(base64_input) + 1 + + schema = Schema(query=Query) + + results = schema.execute(""" + query { + incrementEncodedId(base64Input: "NA==") + } + """) + + assert results.data == {"incrementEncodedId": "NQ=="} + + + +Custom scalars +-------------- + +You can create custom scalars for your schema. +The following is an example for creating a DateTime scalar: + +.. code:: python + + import datetime + from graphene.types import Scalar + from graphql.language import ast + + class DateTime(Scalar): + '''DateTime Scalar Description''' + + @staticmethod + def serialize(dt): + return dt.isoformat() + + @staticmethod + def parse_literal(node, _variables=None): + if isinstance(node, ast.StringValueNode): + return datetime.datetime.strptime( + node.value, "%Y-%m-%dT%H:%M:%S.%f") + + @staticmethod + def parse_value(value): + return datetime.datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%f") + +Mounting Scalars +---------------- + +Scalars mounted in a ``ObjectType``, ``Interface`` or ``Mutation`` act as +``Field``\ s. + +.. code:: python + + class Person(graphene.ObjectType): + name = graphene.String() + + # Is equivalent to: + class Person(graphene.ObjectType): + name = graphene.Field(graphene.String) + + +**Note:** when using the ``Field`` constructor directly, pass the type and +not an instance. + +Types mounted in a ``Field`` act as ``Argument``\ s. + + +.. code:: python + + graphene.Field(graphene.String, to=graphene.String()) + + # Is equivalent to: + graphene.Field(graphene.String, to=graphene.Argument(graphene.String)) diff --git a/testbed/graphql-python__graphene/docs/types/schema.rst b/testbed/graphql-python__graphene/docs/types/schema.rst new file mode 100644 index 0000000000000000000000000000000000000000..a82addc9a3bf0ddb1cd2005dfea1ba1e4f022502 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/types/schema.rst @@ -0,0 +1,94 @@ +Schema +====== + +A GraphQL **Schema** defines the types and relationships between **Fields** in your API. + +A Schema is created by supplying the root :ref:`ObjectType` of each operation, query (mandatory), mutation and subscription. + +Schema will collect all type definitions related to the root operations and then supply them to the validator and executor. + +.. code:: python + + my_schema = Schema( + query=MyRootQuery, + mutation=MyRootMutation, + subscription=MyRootSubscription + ) + +A Root Query is just a special :ref:`ObjectType` that defines the fields that are the entrypoint for your API. Root Mutation and Root Subscription are similar to Root Query, but for different operation types: + +* Query fetches data +* Mutation changes data and retrieves the changes +* Subscription sends changes to clients in real-time + +Review the `GraphQL documentation on Schema`_ for a brief overview of fields, schema and operations. + +.. _GraphQL documentation on Schema: https://graphql.org/learn/schema/ + + +Querying +-------- + +To query a schema, call the ``execute`` method on it. See :ref:`SchemaExecute` for more details. + + +.. code:: python + + query_string = 'query whoIsMyBestFriend { myBestFriend { lastName } }' + my_schema.execute(query_string) + +Types +----- + +There are some cases where the schema cannot access all of the types that we plan to have. +For example, when a field returns an ``Interface``, the schema doesn't know about any of the +implementations. + +In this case, we need to use the ``types`` argument when creating the Schema: + + +.. code:: python + + my_schema = Schema( + query=MyRootQuery, + types=[SomeExtraObjectType, ] + ) + +.. _SchemaAutoCamelCase: + +Auto camelCase field names +-------------------------- + +By default all field and argument names (that are not +explicitly set with the ``name`` arg) will be converted from +``snake_case`` to ``camelCase`` (as the API is usually being consumed by a js/mobile client) + +For example with the ObjectType the ``last_name`` field name is converted to ``lastName``: + +.. code:: python + + class Person(graphene.ObjectType): + last_name = graphene.String() + other_name = graphene.String(name='_other_Name') + +In case you don't want to apply this transformation, provide a ``name`` argument to the field constructor. +``other_name`` converts to ``_other_Name`` (without further transformations). + +Your query should look like: + +.. code:: + + { + lastName + _other_Name + } + + +To disable this behavior, set the ``auto_camelcase`` to ``False`` upon schema instantiation: + +.. code:: python + + my_schema = Schema( + query=MyRootQuery, + auto_camelcase=False, + ) diff --git a/testbed/graphql-python__graphene/docs/types/unions.rst b/testbed/graphql-python__graphene/docs/types/unions.rst new file mode 100644 index 0000000000000000000000000000000000000000..16ac24e8751a341cf06ac306ee6c382d4eff4e46 --- /dev/null +++ b/testbed/graphql-python__graphene/docs/types/unions.rst @@ -0,0 +1,63 @@ +Unions +====== + +Union types are very similar to interfaces, but they don't get +to specify any common fields between the types. + +The basics: + +- Each Union is a Python class that inherits from ``graphene.Union``. +- Unions don't have any fields on it, just links to the possible ObjectTypes. + +Quick example +------------- + +This example model defines several ObjectTypes with their own fields. +``SearchResult`` is the implementation of ``Union`` of this object types. + +.. code:: python + + import graphene + + class Human(graphene.ObjectType): + name = graphene.String() + born_in = graphene.String() + + class Droid(graphene.ObjectType): + name = graphene.String() + primary_function = graphene.String() + + class Starship(graphene.ObjectType): + name = graphene.String() + length = graphene.Int() + + class SearchResult(graphene.Union): + class Meta: + types = (Human, Droid, Starship) + + +Wherever we return a SearchResult type in our schema, we might get a Human, a Droid, or a Starship. +Note that members of a union type need to be concrete object types; +you can't create a union type out of interfaces or other unions. + +The above types have the following representation in a schema: + +.. code:: + + type Droid { + name: String + primaryFunction: String + } + + type Human { + name: String + bornIn: String + } + + type Ship { + name: String + length: Int + } + + union SearchResult = Human | Droid | Starship + diff --git a/testbed/graphql-python__graphene/examples/__init__.py b/testbed/graphql-python__graphene/examples/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/examples/complex_example.py b/testbed/graphql-python__graphene/examples/complex_example.py new file mode 100644 index 0000000000000000000000000000000000000000..73a8ac1bc545d5eea8a99666e988c3cf5042dbd2 --- /dev/null +++ b/testbed/graphql-python__graphene/examples/complex_example.py @@ -0,0 +1,69 @@ +import graphene + + +class GeoInput(graphene.InputObjectType): + lat = graphene.Float(required=True) + lng = graphene.Float(required=True) + + @property + def latlng(self): + return f"({self.lat},{self.lng})" + + +class Address(graphene.ObjectType): + latlng = graphene.String() + + +class Query(graphene.ObjectType): + address = graphene.Field(Address, geo=GeoInput(required=True)) + + def resolve_address(root, info, geo): + return Address(latlng=geo.latlng) + + +class CreateAddress(graphene.Mutation): + class Arguments: + geo = GeoInput(required=True) + + Output = Address + + def mutate(root, info, geo): + return Address(latlng=geo.latlng) + + +class Mutation(graphene.ObjectType): + create_address = CreateAddress.Field() + + +schema = graphene.Schema(query=Query, mutation=Mutation) +query = """ + query something{ + address(geo: {lat:32.2, lng:12}) { + latlng + } + } +""" +mutation = """ + mutation addAddress{ + createAddress(geo: {lat:32.2, lng:12}) { + latlng + } + } +""" + + +def test_query(): + result = schema.execute(query) + assert not result.errors + assert result.data == {"address": {"latlng": "(32.2,12.0)"}} + + +def test_mutation(): + result = schema.execute(mutation) + assert not result.errors + assert result.data == {"createAddress": {"latlng": "(32.2,12.0)"}} + + +if __name__ == "__main__": + result = schema.execute(query) + print(result.data["address"]["latlng"]) diff --git a/testbed/graphql-python__graphene/examples/context_example.py b/testbed/graphql-python__graphene/examples/context_example.py new file mode 100644 index 0000000000000000000000000000000000000000..235ae535c46de542122b5ddcdfa59119dae7e29d --- /dev/null +++ b/testbed/graphql-python__graphene/examples/context_example.py @@ -0,0 +1,35 @@ +import graphene + + +class User(graphene.ObjectType): + id = graphene.ID() + name = graphene.String() + + +class Query(graphene.ObjectType): + me = graphene.Field(User) + + def resolve_me(root, info): + return info.context["user"] + + +schema = graphene.Schema(query=Query) +query = """ + query something{ + me { + id + name + } + } +""" + + +def test_query(): + result = schema.execute(query, context={"user": User(id="1", name="Syrus")}) + assert not result.errors + assert result.data == {"me": {"id": "1", "name": "Syrus"}} + + +if __name__ == "__main__": + result = schema.execute(query, context={"user": User(id="X", name="Console")}) + print(result.data["me"]) diff --git a/testbed/graphql-python__graphene/examples/simple_example.py b/testbed/graphql-python__graphene/examples/simple_example.py new file mode 100644 index 0000000000000000000000000000000000000000..9bee8d1f4df76bf53ee8d0d4b80ac8f0a4922e1d --- /dev/null +++ b/testbed/graphql-python__graphene/examples/simple_example.py @@ -0,0 +1,38 @@ +import graphene + + +class Patron(graphene.ObjectType): + id = graphene.ID() + name = graphene.String() + age = graphene.Int() + + +class Query(graphene.ObjectType): + + patron = graphene.Field(Patron) + + def resolve_patron(root, info): + return Patron(id=1, name="Syrus", age=27) + + +schema = graphene.Schema(query=Query) +query = """ + query something{ + patron { + id + name + age + } + } +""" + + +def test_query(): + result = schema.execute(query) + assert not result.errors + assert result.data == {"patron": {"id": "1", "name": "Syrus", "age": 27}} + + +if __name__ == "__main__": + result = schema.execute(query) + print(result.data["patron"]) diff --git a/testbed/graphql-python__graphene/examples/starwars/__init__.py b/testbed/graphql-python__graphene/examples/starwars/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/examples/starwars/data.py b/testbed/graphql-python__graphene/examples/starwars/data.py new file mode 100644 index 0000000000000000000000000000000000000000..6c68b85c1449f9b9adabd38de64dabf76295015d --- /dev/null +++ b/testbed/graphql-python__graphene/examples/starwars/data.py @@ -0,0 +1,95 @@ +human_data = {} +droid_data = {} + + +def setup(): + from .schema import Human, Droid + + global human_data, droid_data + luke = Human( + id="1000", + name="Luke Skywalker", + friends=["1002", "1003", "2000", "2001"], + appears_in=[4, 5, 6], + home_planet="Tatooine", + ) + + vader = Human( + id="1001", + name="Darth Vader", + friends=["1004"], + appears_in=[4, 5, 6], + home_planet="Tatooine", + ) + + han = Human( + id="1002", + name="Han Solo", + friends=["1000", "1003", "2001"], + appears_in=[4, 5, 6], + home_planet=None, + ) + + leia = Human( + id="1003", + name="Leia Organa", + friends=["1000", "1002", "2000", "2001"], + appears_in=[4, 5, 6], + home_planet="Alderaan", + ) + + tarkin = Human( + id="1004", + name="Wilhuff Tarkin", + friends=["1001"], + appears_in=[4], + home_planet=None, + ) + + human_data = { + "1000": luke, + "1001": vader, + "1002": han, + "1003": leia, + "1004": tarkin, + } + + c3po = Droid( + id="2000", + name="C-3PO", + friends=["1000", "1002", "1003", "2001"], + appears_in=[4, 5, 6], + primary_function="Protocol", + ) + + r2d2 = Droid( + id="2001", + name="R2-D2", + friends=["1000", "1002", "1003"], + appears_in=[4, 5, 6], + primary_function="Astromech", + ) + + droid_data = {"2000": c3po, "2001": r2d2} + + +def get_character(id): + return human_data.get(id) or droid_data.get(id) + + +def get_friends(character): + return map(get_character, character.friends) + + +def get_hero(episode): + if episode == 5: + return human_data["1000"] + return droid_data["2001"] + + +def get_human(id): + return human_data.get(id) + + +def get_droid(id): + return droid_data.get(id) diff --git a/testbed/graphql-python__graphene/examples/starwars/schema.py b/testbed/graphql-python__graphene/examples/starwars/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..25642c343a4b5c030985a93c5f37b7285a7b5d81 --- /dev/null +++ b/testbed/graphql-python__graphene/examples/starwars/schema.py @@ -0,0 +1,52 @@ +import graphene + +from .data import get_character, get_droid, get_hero, get_human + + +class Episode(graphene.Enum): + NEWHOPE = 4 + EMPIRE = 5 + JEDI = 6 + + +class Character(graphene.Interface): + id = graphene.ID() + name = graphene.String() + friends = graphene.List(lambda: Character) + appears_in = graphene.List(Episode) + + def resolve_friends(self, info): + # The character friends is a list of strings + return [get_character(f) for f in self.friends] + + +class Human(graphene.ObjectType): + class Meta: + interfaces = (Character,) + + home_planet = graphene.String() + + +class Droid(graphene.ObjectType): + class Meta: + interfaces = (Character,) + + primary_function = graphene.String() + + +class Query(graphene.ObjectType): + hero = graphene.Field(Character, episode=Episode()) + human = graphene.Field(Human, id=graphene.String()) + droid = graphene.Field(Droid, id=graphene.String()) + + def resolve_hero(root, info, episode=None): + return get_hero(episode) + + def resolve_human(root, info, id): + return get_human(id) + + def resolve_droid(root, info, id): + return get_droid(id) + + +schema = graphene.Schema(query=Query) diff --git a/testbed/graphql-python__graphene/examples/starwars/tests/__init__.py b/testbed/graphql-python__graphene/examples/starwars/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/examples/starwars/tests/snapshots/__init__.py b/testbed/graphql-python__graphene/examples/starwars/tests/snapshots/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/examples/starwars/tests/snapshots/snap_test_query.py b/testbed/graphql-python__graphene/examples/starwars/tests/snapshots/snap_test_query.py new file mode 100644 index 0000000000000000000000000000000000000000..b4f05bdb8e1c272dfac3d705c0a814626e05d2bd --- /dev/null +++ b/testbed/graphql-python__graphene/examples/starwars/tests/snapshots/snap_test_query.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# snapshottest: v1 - https://goo.gl/zC4yUc +from __future__ import unicode_literals + +from snapshottest import Snapshot + +snapshots = Snapshot() + +snapshots["test_hero_name_query 1"] = {"data": {"hero": {"name": "R2-D2"}}} + +snapshots["test_hero_name_and_friends_query 1"] = { + "data": { + "hero": { + "id": "2001", + "name": "R2-D2", + "friends": [ + {"name": "Luke Skywalker"}, + {"name": "Han Solo"}, + {"name": "Leia Organa"}, + ], + } + } +} + +snapshots["test_nested_query 1"] = { + "data": { + "hero": { + "name": "R2-D2", + "friends": [ + { + "name": "Luke Skywalker", + "appearsIn": ["NEWHOPE", "EMPIRE", "JEDI"], + "friends": [ + {"name": "Han Solo"}, + {"name": "Leia Organa"}, + {"name": "C-3PO"}, + {"name": "R2-D2"}, + ], + }, + { + "name": "Han Solo", + "appearsIn": ["NEWHOPE", "EMPIRE", "JEDI"], + "friends": [ + {"name": "Luke Skywalker"}, + {"name": "Leia Organa"}, + {"name": "R2-D2"}, + ], + }, + { + "name": "Leia Organa", + "appearsIn": ["NEWHOPE", "EMPIRE", "JEDI"], + "friends": [ + {"name": "Luke Skywalker"}, + {"name": "Han Solo"}, + {"name": "C-3PO"}, + {"name": "R2-D2"}, + ], + }, + ], + } + } +} + +snapshots["test_fetch_luke_query 1"] = {"data": {"human": {"name": "Luke Skywalker"}}} + +snapshots["test_fetch_some_id_query 1"] = { + "data": {"human": {"name": "Luke Skywalker"}} +} + +snapshots["test_fetch_some_id_query2 1"] = {"data": {"human": {"name": "Han Solo"}}} + +snapshots["test_invalid_id_query 1"] = {"data": {"human": None}} + +snapshots["test_fetch_luke_aliased 1"] = {"data": {"luke": {"name": "Luke Skywalker"}}} + +snapshots["test_fetch_luke_and_leia_aliased 1"] = { + "data": {"luke": {"name": "Luke Skywalker"}, "leia": {"name": "Leia Organa"}} +} + +snapshots["test_duplicate_fields 1"] = { + "data": { + "luke": {"name": "Luke Skywalker", "homePlanet": "Tatooine"}, + "leia": {"name": "Leia Organa", "homePlanet": "Alderaan"}, + } +} + +snapshots["test_use_fragment 1"] = { + "data": { + "luke": {"name": "Luke Skywalker", "homePlanet": "Tatooine"}, + "leia": {"name": "Leia Organa", "homePlanet": "Alderaan"}, + } +} + +snapshots["test_check_type_of_r2 1"] = { + "data": {"hero": {"__typename": "Droid", "name": "R2-D2"}} +} + +snapshots["test_check_type_of_luke 1"] = { + "data": {"hero": {"__typename": "Human", "name": "Luke Skywalker"}} +} diff --git a/testbed/graphql-python__graphene/examples/starwars/tests/test_query.py b/testbed/graphql-python__graphene/examples/starwars/tests/test_query.py new file mode 100644 index 0000000000000000000000000000000000000000..88934b0ed02079c1c8fba1f186412519d19ee819 --- /dev/null +++ b/testbed/graphql-python__graphene/examples/starwars/tests/test_query.py @@ -0,0 +1,182 @@ +from graphene.test import Client + +from ..data import setup +from ..schema import schema + +setup() + +client = Client(schema) + + +def test_hero_name_query(snapshot): + query = """ + query HeroNameQuery { + hero { + name + } + } + """ + snapshot.assert_match(client.execute(query)) + + +def test_hero_name_and_friends_query(snapshot): + query = """ + query HeroNameAndFriendsQuery { + hero { + id + name + friends { + name + } + } + } + """ + snapshot.assert_match(client.execute(query)) + + +def test_nested_query(snapshot): + query = """ + query NestedQuery { + hero { + name + friends { + name + appearsIn + friends { + name + } + } + } + } + """ + snapshot.assert_match(client.execute(query)) + + +def test_fetch_luke_query(snapshot): + query = """ + query FetchLukeQuery { + human(id: "1000") { + name + } + } + """ + snapshot.assert_match(client.execute(query)) + + +def test_fetch_some_id_query(snapshot): + query = """ + query FetchSomeIDQuery($someId: String!) { + human(id: $someId) { + name + } + } + """ + params = {"someId": "1000"} + snapshot.assert_match(client.execute(query, variables=params)) + + +def test_fetch_some_id_query2(snapshot): + query = """ + query FetchSomeIDQuery($someId: String!) { + human(id: $someId) { + name + } + } + """ + params = {"someId": "1002"} + snapshot.assert_match(client.execute(query, variables=params)) + + +def test_invalid_id_query(snapshot): + query = """ + query humanQuery($id: String!) { + human(id: $id) { + name + } + } + """ + params = {"id": "not a valid id"} + snapshot.assert_match(client.execute(query, variables=params)) + + +def test_fetch_luke_aliased(snapshot): + query = """ + query FetchLukeAliased { + luke: human(id: "1000") { + name + } + } + """ + snapshot.assert_match(client.execute(query)) + + +def test_fetch_luke_and_leia_aliased(snapshot): + query = """ + query FetchLukeAndLeiaAliased { + luke: human(id: "1000") { + name + } + leia: human(id: "1003") { + name + } + } + """ + snapshot.assert_match(client.execute(query)) + + +def test_duplicate_fields(snapshot): + query = """ + query DuplicateFields { + luke: human(id: "1000") { + name + homePlanet + } + leia: human(id: "1003") { + name + homePlanet + } + } + """ + snapshot.assert_match(client.execute(query)) + + +def test_use_fragment(snapshot): + query = """ + query UseFragment { + luke: human(id: "1000") { + ...HumanFragment + } + leia: human(id: "1003") { + ...HumanFragment + } + } + fragment HumanFragment on Human { + name + homePlanet + } + """ + snapshot.assert_match(client.execute(query)) + + +def test_check_type_of_r2(snapshot): + query = """ + query CheckTypeOfR2 { + hero { + __typename + name + } + } + """ + snapshot.assert_match(client.execute(query)) + + +def test_check_type_of_luke(snapshot): + query = """ + query CheckTypeOfLuke { + hero(episode: EMPIRE) { + __typename + name + } + } + """ + snapshot.assert_match(client.execute(query)) diff --git a/testbed/graphql-python__graphene/examples/starwars/tests/test_schema.py b/testbed/graphql-python__graphene/examples/starwars/tests/test_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/examples/starwars_relay/__init__.py b/testbed/graphql-python__graphene/examples/starwars_relay/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/examples/starwars_relay/data.py b/testbed/graphql-python__graphene/examples/starwars_relay/data.py new file mode 100644 index 0000000000000000000000000000000000000000..0ed654c2ac3888866e88891bf54ef34ac8d1c373 --- /dev/null +++ b/testbed/graphql-python__graphene/examples/starwars_relay/data.py @@ -0,0 +1,71 @@ +data = {} + + +def setup(): + global data + + from .schema import Ship, Faction + + xwing = Ship(id="1", name="X-Wing") + + ywing = Ship(id="2", name="Y-Wing") + + awing = Ship(id="3", name="A-Wing") + + # Yeah, technically it's Corellian. But it flew in the service of the rebels, + # so for the purposes of this demo it's a rebel ship. + falcon = Ship(id="4", name="Millennium Falcon") + + homeOne = Ship(id="5", name="Home One") + + tieFighter = Ship(id="6", name="TIE Fighter") + + tieInterceptor = Ship(id="7", name="TIE Interceptor") + + executor = Ship(id="8", name="Executor") + + rebels = Faction( + id="1", name="Alliance to Restore the Republic", ships=["1", "2", "3", "4", "5"] + ) + + empire = Faction(id="2", name="Galactic Empire", ships=["6", "7", "8"]) + + data = { + "Faction": {"1": rebels, "2": empire}, + "Ship": { + "1": xwing, + "2": ywing, + "3": awing, + "4": falcon, + "5": homeOne, + "6": tieFighter, + "7": tieInterceptor, + "8": executor, + }, + } + + +def create_ship(ship_name, faction_id): + from .schema import Ship + + next_ship = len(data["Ship"].keys()) + 1 + new_ship = Ship(id=str(next_ship), name=ship_name) + data["Ship"][new_ship.id] = new_ship + data["Faction"][faction_id].ships.append(new_ship.id) + return new_ship + + +def get_ship(_id): + return data["Ship"][_id] + + +def get_faction(_id): + return data["Faction"][_id] + + +def get_rebels(): + return get_faction("1") + + +def get_empire(): + return get_faction("2") diff --git a/testbed/graphql-python__graphene/examples/starwars_relay/schema.py b/testbed/graphql-python__graphene/examples/starwars_relay/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..d1cce687f1b869154349b944bcd7d28de03c3e50 --- /dev/null +++ b/testbed/graphql-python__graphene/examples/starwars_relay/schema.py @@ -0,0 +1,78 @@ +import graphene +from graphene import relay + +from .data import create_ship, get_empire, get_faction, get_rebels, get_ship + + +class Ship(graphene.ObjectType): + """A ship in the Star Wars saga""" + + class Meta: + interfaces = (relay.Node,) + + name = graphene.String(description="The name of the ship.") + + @classmethod + def get_node(cls, info, id): + return get_ship(id) + + +class ShipConnection(relay.Connection): + class Meta: + node = Ship + + +class Faction(graphene.ObjectType): + """A faction in the Star Wars saga""" + + class Meta: + interfaces = (relay.Node,) + + name = graphene.String(description="The name of the faction.") + ships = relay.ConnectionField( + ShipConnection, description="The ships used by the faction." + ) + + def resolve_ships(self, info, **args): + # Transform the instance ship_ids into real instances + return [get_ship(ship_id) for ship_id in self.ships] + + @classmethod + def get_node(cls, info, id): + return get_faction(id) + + +class IntroduceShip(relay.ClientIDMutation): + class Input: + ship_name = graphene.String(required=True) + faction_id = graphene.String(required=True) + + ship = graphene.Field(Ship) + faction = graphene.Field(Faction) + + @classmethod + def mutate_and_get_payload( + cls, root, info, ship_name, faction_id, client_mutation_id=None + ): + ship = create_ship(ship_name, faction_id) + faction = get_faction(faction_id) + return IntroduceShip(ship=ship, faction=faction) + + +class Query(graphene.ObjectType): + rebels = graphene.Field(Faction) + empire = graphene.Field(Faction) + node = relay.Node.Field() + + def resolve_rebels(root, info): + return get_rebels() + + def resolve_empire(root, info): + return get_empire() + + +class Mutation(graphene.ObjectType): + introduce_ship = IntroduceShip.Field() + + +schema = graphene.Schema(query=Query, mutation=Mutation) diff --git a/testbed/graphql-python__graphene/examples/starwars_relay/tests/__init__.py b/testbed/graphql-python__graphene/examples/starwars_relay/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/examples/starwars_relay/tests/snapshots/__init__.py b/testbed/graphql-python__graphene/examples/starwars_relay/tests/snapshots/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/examples/starwars_relay/tests/snapshots/snap_test_connections.py b/testbed/graphql-python__graphene/examples/starwars_relay/tests/snapshots/snap_test_connections.py new file mode 100644 index 0000000000000000000000000000000000000000..57a7b7ea593d033c20b5e56a443021cec3adaef3 --- /dev/null +++ b/testbed/graphql-python__graphene/examples/starwars_relay/tests/snapshots/snap_test_connections.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# snapshottest: v1 - https://goo.gl/zC4yUc +from __future__ import unicode_literals + +from snapshottest import Snapshot + +snapshots = Snapshot() + +snapshots["test_correct_fetch_first_ship_rebels 1"] = { + "data": { + "rebels": { + "name": "Alliance to Restore the Republic", + "ships": { + "pageInfo": { + "startCursor": "YXJyYXljb25uZWN0aW9uOjA=", + "endCursor": "YXJyYXljb25uZWN0aW9uOjA=", + "hasNextPage": True, + "hasPreviousPage": False, + }, + "edges": [ + {"cursor": "YXJyYXljb25uZWN0aW9uOjA=", "node": {"name": "X-Wing"}} + ], + }, + } + } +} diff --git a/testbed/graphql-python__graphene/examples/starwars_relay/tests/snapshots/snap_test_mutation.py b/testbed/graphql-python__graphene/examples/starwars_relay/tests/snapshots/snap_test_mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..c35b2aebac47e0b0a32b50bdf382d3164c9dbeb0 --- /dev/null +++ b/testbed/graphql-python__graphene/examples/starwars_relay/tests/snapshots/snap_test_mutation.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# snapshottest: v1 - https://goo.gl/zC4yUc +from __future__ import unicode_literals + +from snapshottest import Snapshot + +snapshots = Snapshot() + +snapshots["test_mutations 1"] = { + "data": { + "introduceShip": { + "ship": {"id": "U2hpcDo5", "name": "Peter"}, + "faction": { + "name": "Alliance to Restore the Republic", + "ships": { + "edges": [ + {"node": {"id": "U2hpcDox", "name": "X-Wing"}}, + {"node": {"id": "U2hpcDoy", "name": "Y-Wing"}}, + {"node": {"id": "U2hpcDoz", "name": "A-Wing"}}, + {"node": {"id": "U2hpcDo0", "name": "Millennium Falcon"}}, + {"node": {"id": "U2hpcDo1", "name": "Home One"}}, + {"node": {"id": "U2hpcDo5", "name": "Peter"}}, + ] + }, + }, + } + } +} diff --git a/testbed/graphql-python__graphene/examples/starwars_relay/tests/snapshots/snap_test_objectidentification.py b/testbed/graphql-python__graphene/examples/starwars_relay/tests/snapshots/snap_test_objectidentification.py new file mode 100644 index 0000000000000000000000000000000000000000..b02a420c568e78efee2cad17cefb72635ec560cd --- /dev/null +++ b/testbed/graphql-python__graphene/examples/starwars_relay/tests/snapshots/snap_test_objectidentification.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# snapshottest: v1 - https://goo.gl/zC4yUc +from __future__ import unicode_literals + +from snapshottest import Snapshot + + +snapshots = Snapshot() + +snapshots["test_correctly_fetches_id_name_rebels 1"] = { + "data": { + "rebels": {"id": "RmFjdGlvbjox", "name": "Alliance to Restore the Republic"} + } +} + +snapshots["test_correctly_refetches_rebels 1"] = { + "data": {"node": {"id": "RmFjdGlvbjox", "name": "Alliance to Restore the Republic"}} +} + +snapshots["test_correctly_fetches_id_name_empire 1"] = { + "data": {"empire": {"id": "RmFjdGlvbjoy", "name": "Galactic Empire"}} +} + +snapshots["test_correctly_refetches_empire 1"] = { + "data": {"node": {"id": "RmFjdGlvbjoy", "name": "Galactic Empire"}} +} + +snapshots["test_correctly_refetches_xwing 1"] = { + "data": {"node": {"id": "U2hpcDox", "name": "X-Wing"}} +} + +snapshots[ + "test_str_schema 1" +] = '''type Query { + rebels: Faction + empire: Faction + node( + """The ID of the object""" + id: ID! + ): Node +} + +"""A faction in the Star Wars saga""" +type Faction implements Node { + """The ID of the object""" + id: ID! + + """The name of the faction.""" + name: String + + """The ships used by the faction.""" + ships(before: String, after: String, first: Int, last: Int): ShipConnection +} + +"""An object with an ID""" +interface Node { + """The ID of the object""" + id: ID! +} + +type ShipConnection { + """Pagination data for this connection.""" + pageInfo: PageInfo! + + """Contains the nodes in this connection.""" + edges: [ShipEdge]! +} + +""" +The Relay compliant `PageInfo` type, containing data necessary to paginate this connection. +""" +type PageInfo { + """When paginating forwards, are there more items?""" + hasNextPage: Boolean! + + """When paginating backwards, are there more items?""" + hasPreviousPage: Boolean! + + """When paginating backwards, the cursor to continue.""" + startCursor: String + + """When paginating forwards, the cursor to continue.""" + endCursor: String +} + +"""A Relay edge containing a `Ship` and its cursor.""" +type ShipEdge { + """The item at the end of the edge""" + node: Ship + + """A cursor for use in pagination""" + cursor: String! +} + +"""A ship in the Star Wars saga""" +type Ship implements Node { + """The ID of the object""" + id: ID! + + """The name of the ship.""" + name: String +} + +type Mutation { + introduceShip(input: IntroduceShipInput!): IntroduceShipPayload +} + +type IntroduceShipPayload { + ship: Ship + faction: Faction + clientMutationId: String +} + +input IntroduceShipInput { + shipName: String! + factionId: String! + clientMutationId: String +}''' diff --git a/testbed/graphql-python__graphene/examples/starwars_relay/tests/test_connections.py b/testbed/graphql-python__graphene/examples/starwars_relay/tests/test_connections.py new file mode 100644 index 0000000000000000000000000000000000000000..697796d132d0f5cab9d79fc305798be042ad1502 --- /dev/null +++ b/testbed/graphql-python__graphene/examples/starwars_relay/tests/test_connections.py @@ -0,0 +1,33 @@ +from graphene.test import Client + +from ..data import setup +from ..schema import schema + +setup() + +client = Client(schema) + + +def test_correct_fetch_first_ship_rebels(snapshot): + query = """ + query RebelsShipsQuery { + rebels { + name, + ships(first: 1) { + pageInfo { + startCursor + endCursor + hasNextPage + hasPreviousPage + } + edges { + cursor + node { + name + } + } + } + } + } + """ + snapshot.assert_match(client.execute(query)) diff --git a/testbed/graphql-python__graphene/examples/starwars_relay/tests/test_mutation.py b/testbed/graphql-python__graphene/examples/starwars_relay/tests/test_mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..e3ba7fe6dbbdd328bb1d826a2da6ca9397539de1 --- /dev/null +++ b/testbed/graphql-python__graphene/examples/starwars_relay/tests/test_mutation.py @@ -0,0 +1,33 @@ +from graphene.test import Client + +from ..data import setup +from ..schema import schema + +setup() + +client = Client(schema) + + +def test_mutations(snapshot): + query = """ + mutation MyMutation { + introduceShip(input:{clientMutationId:"abc", shipName: "Peter", factionId: "1"}) { + ship { + id + name + } + faction { + name + ships { + edges { + node { + id + name + } + } + } + } + } + } + """ + snapshot.assert_match(client.execute(query)) diff --git a/testbed/graphql-python__graphene/examples/starwars_relay/tests/test_objectidentification.py b/testbed/graphql-python__graphene/examples/starwars_relay/tests/test_objectidentification.py new file mode 100644 index 0000000000000000000000000000000000000000..c024f432a6f131f8fae2fb18f4cfac0ee5300f7b --- /dev/null +++ b/testbed/graphql-python__graphene/examples/starwars_relay/tests/test_objectidentification.py @@ -0,0 +1,78 @@ +from graphene.test import Client + +from ..data import setup +from ..schema import schema + +setup() + +client = Client(schema) + + +def test_str_schema(snapshot): + snapshot.assert_match(str(schema).strip()) + + +def test_correctly_fetches_id_name_rebels(snapshot): + query = """ + query RebelsQuery { + rebels { + id + name + } + } + """ + snapshot.assert_match(client.execute(query)) + + +def test_correctly_refetches_rebels(snapshot): + query = """ + query RebelsRefetchQuery { + node(id: "RmFjdGlvbjox") { + id + ... on Faction { + name + } + } + } + """ + snapshot.assert_match(client.execute(query)) + + +def test_correctly_fetches_id_name_empire(snapshot): + query = """ + query EmpireQuery { + empire { + id + name + } + } + """ + snapshot.assert_match(client.execute(query)) + + +def test_correctly_refetches_empire(snapshot): + query = """ + query EmpireRefetchQuery { + node(id: "RmFjdGlvbjoy") { + id + ... on Faction { + name + } + } + } + """ + snapshot.assert_match(client.execute(query)) + + +def test_correctly_refetches_xwing(snapshot): + query = """ + query XWingRefetchQuery { + node(id: "U2hpcDox") { + id + ... on Ship { + name + } + } + } + """ + snapshot.assert_match(client.execute(query)) diff --git a/testbed/graphql-python__graphene/graphene/__init__.py b/testbed/graphql-python__graphene/graphene/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..73e13a365f896740f2addfde7ce2a02a9d491765 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/__init__.py @@ -0,0 +1,98 @@ +from .pyutils.version import get_version +from .relay import ( + BaseGlobalIDType, + ClientIDMutation, + Connection, + ConnectionField, + DefaultGlobalIDType, + GlobalID, + Node, + PageInfo, + SimpleGlobalIDType, + UUIDGlobalIDType, + is_node, +) +from .types import ( + ID, + UUID, + Argument, + Base64, + BigInt, + Boolean, + Context, + Date, + DateTime, + Decimal, + Dynamic, + Enum, + Field, + Float, + InputField, + InputObjectType, + Int, + Interface, + JSONString, + List, + Mutation, + NonNull, + ObjectType, + ResolveInfo, + Scalar, + Schema, + String, + Time, + Union, +) +from .utils.module_loading import lazy_import +from .utils.resolve_only_args import resolve_only_args + +VERSION = (3, 2, 2, "final", 0) + + +__version__ = get_version(VERSION) + +__all__ = [ + "__version__", + "Argument", + "Base64", + "BigInt", + "BaseGlobalIDType", + "Boolean", + "ClientIDMutation", + "Connection", + "ConnectionField", + "Context", + "Date", + "DateTime", + "Decimal", + "DefaultGlobalIDType", + "Dynamic", + "Enum", + "Field", + "Float", + "GlobalID", + "ID", + "InputField", + "InputObjectType", + "Int", + "Interface", + "JSONString", + "List", + "Mutation", + "Node", + "NonNull", + "ObjectType", + "PageInfo", + "ResolveInfo", + "Scalar", + "Schema", + "SimpleGlobalIDType", + "String", + "Time", + "Union", + "UUID", + "UUIDGlobalIDType", + "is_node", + "lazy_import", + "resolve_only_args", +] diff --git a/testbed/graphql-python__graphene/graphene/pyutils/__init__.py b/testbed/graphql-python__graphene/graphene/pyutils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/graphene/pyutils/dataclasses.py b/testbed/graphql-python__graphene/graphene/pyutils/dataclasses.py new file mode 100644 index 0000000000000000000000000000000000000000..1a474526d667610da2500e99aaffad28bc8c2aae --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/pyutils/dataclasses.py @@ -0,0 +1,1222 @@ +# This is a polyfill for dataclasses +# https://docs.python.org/3/library/dataclasses.html +# Original PEP proposal: PEP 557 +# https://www.python.org/dev/peps/pep-0557/ +import re +import sys +import copy +import types +import inspect +import keyword + +__all__ = [ + "dataclass", + "field", + "Field", + "FrozenInstanceError", + "InitVar", + "MISSING", + # Helper functions. + "fields", + "asdict", + "astuple", + "make_dataclass", + "replace", + "is_dataclass", +] + +# Conditions for adding methods. The boxes indicate what action the +# dataclass decorator takes. For all of these tables, when I talk +# about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm +# referring to the arguments to the @dataclass decorator. When +# checking if a dunder method already exists, I mean check for an +# entry in the class's __dict__. I never check to see if an attribute +# is defined in a base class. + +# Key: +# +=========+=========================================+ +# + Value | Meaning | +# +=========+=========================================+ +# | | No action: no method is added. | +# +---------+-----------------------------------------+ +# | add | Generated method is added. | +# +---------+-----------------------------------------+ +# | raise | TypeError is raised. | +# +---------+-----------------------------------------+ +# | None | Attribute is set to None. | +# +=========+=========================================+ + +# __init__ +# +# +--- init= parameter +# | +# v | | | +# | no | yes | <--- class has __init__ in __dict__? +# +=======+=======+=======+ +# | False | | | +# +-------+-------+-------+ +# | True | add | | <- the default +# +=======+=======+=======+ + +# __repr__ +# +# +--- repr= parameter +# | +# v | | | +# | no | yes | <--- class has __repr__ in __dict__? +# +=======+=======+=======+ +# | False | | | +# +-------+-------+-------+ +# | True | add | | <- the default +# +=======+=======+=======+ + + +# __setattr__ +# __delattr__ +# +# +--- frozen= parameter +# | +# v | | | +# | no | yes | <--- class has __setattr__ or __delattr__ in __dict__? +# +=======+=======+=======+ +# | False | | | <- the default +# +-------+-------+-------+ +# | True | add | raise | +# +=======+=======+=======+ +# Raise because not adding these methods would break the "frozen-ness" +# of the class. + +# __eq__ +# +# +--- eq= parameter +# | +# v | | | +# | no | yes | <--- class has __eq__ in __dict__? +# +=======+=======+=======+ +# | False | | | +# +-------+-------+-------+ +# | True | add | | <- the default +# +=======+=======+=======+ + +# __lt__ +# __le__ +# __gt__ +# __ge__ +# +# +--- order= parameter +# | +# v | | | +# | no | yes | <--- class has any comparison method in __dict__? +# +=======+=======+=======+ +# | False | | | <- the default +# +-------+-------+-------+ +# | True | add | raise | +# +=======+=======+=======+ +# Raise because to allow this case would interfere with using +# functools.total_ordering. + +# __hash__ + +# +------------------- unsafe_hash= parameter +# | +----------- eq= parameter +# | | +--- frozen= parameter +# | | | +# v v v | | | +# | no | yes | <--- class has explicitly defined __hash__ +# +=======+=======+=======+========+========+ +# | False | False | False | | | No __eq__, use the base class __hash__ +# +-------+-------+-------+--------+--------+ +# | False | False | True | | | No __eq__, use the base class __hash__ +# +-------+-------+-------+--------+--------+ +# | False | True | False | None | | <-- the default, not hashable +# +-------+-------+-------+--------+--------+ +# | False | True | True | add | | Frozen, so hashable, allows override +# +-------+-------+-------+--------+--------+ +# | True | False | False | add | raise | Has no __eq__, but hashable +# +-------+-------+-------+--------+--------+ +# | True | False | True | add | raise | Has no __eq__, but hashable +# +-------+-------+-------+--------+--------+ +# | True | True | False | add | raise | Not frozen, but hashable +# +-------+-------+-------+--------+--------+ +# | True | True | True | add | raise | Frozen, so hashable +# +=======+=======+=======+========+========+ +# For boxes that are blank, __hash__ is untouched and therefore +# inherited from the base class. If the base is object, then +# id-based hashing is used. +# +# Note that a class may already have __hash__=None if it specified an +# __eq__ method in the class body (not one that was created by +# @dataclass). +# +# See _hash_action (below) for a coded version of this table. + + +# Raised when an attempt is made to modify a frozen class. +class FrozenInstanceError(AttributeError): + pass + + +# A sentinel object for default values to signal that a default +# factory will be used. This is given a nice repr() which will appear +# in the function signature of dataclasses' constructors. +class _HAS_DEFAULT_FACTORY_CLASS: + def __repr__(self): + return "" + + +_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS() + +# A sentinel object to detect if a parameter is supplied or not. Use +# a class to give it a better repr. +class _MISSING_TYPE: + pass + + +MISSING = _MISSING_TYPE() + +# Since most per-field metadata will be unused, create an empty +# read-only proxy that can be shared among all fields. +_EMPTY_METADATA = types.MappingProxyType({}) + +# Markers for the various kinds of fields and pseudo-fields. +class _FIELD_BASE: + def __init__(self, name): + self.name = name + + def __repr__(self): + return self.name + + +_FIELD = _FIELD_BASE("_FIELD") +_FIELD_CLASSVAR = _FIELD_BASE("_FIELD_CLASSVAR") +_FIELD_INITVAR = _FIELD_BASE("_FIELD_INITVAR") + +# The name of an attribute on the class where we store the Field +# objects. Also used to check if a class is a Data Class. +_FIELDS = "__dataclass_fields__" + +# The name of an attribute on the class that stores the parameters to +# @dataclass. +_PARAMS = "__dataclass_params__" + +# The name of the function, that if it exists, is called at the end of +# __init__. +_POST_INIT_NAME = "__post_init__" + +# String regex that string annotations for ClassVar or InitVar must match. +# Allows "identifier.identifier[" or "identifier[". +# https://bugs.python.org/issue33453 for details. +_MODULE_IDENTIFIER_RE = re.compile(r"^(?:\s*(\w+)\s*\.)?\s*(\w+)") + + +class _InitVarMeta(type): + def __getitem__(self, params): + return self + + +class InitVar(metaclass=_InitVarMeta): + pass + + +# Instances of Field are only ever created from within this module, +# and only from the field() function, although Field instances are +# exposed externally as (conceptually) read-only objects. +# +# name and type are filled in after the fact, not in __init__. +# They're not known at the time this class is instantiated, but it's +# convenient if they're available later. +# +# When cls._FIELDS is filled in with a list of Field objects, the name +# and type fields will have been populated. +class Field: + __slots__ = ( + "name", + "type", + "default", + "default_factory", + "repr", + "hash", + "init", + "compare", + "metadata", + "_field_type", # Private: not to be used by user code. + ) + + def __init__(self, default, default_factory, init, repr, hash, compare, metadata): + self.name = None + self.type = None + self.default = default + self.default_factory = default_factory + self.init = init + self.repr = repr + self.hash = hash + self.compare = compare + self.metadata = ( + _EMPTY_METADATA + if metadata is None or len(metadata) == 0 + else types.MappingProxyType(metadata) + ) + self._field_type = None + + def __repr__(self): + return ( + "Field(" + f"name={self.name!r}," + f"type={self.type!r}," + f"default={self.default!r}," + f"default_factory={self.default_factory!r}," + f"init={self.init!r}," + f"repr={self.repr!r}," + f"hash={self.hash!r}," + f"compare={self.compare!r}," + f"metadata={self.metadata!r}," + f"_field_type={self._field_type}" + ")" + ) + + # This is used to support the PEP 487 __set_name__ protocol in the + # case where we're using a field that contains a descriptor as a + # defaul value. For details on __set_name__, see + # https://www.python.org/dev/peps/pep-0487/#implementation-details. + # + # Note that in _process_class, this Field object is overwritten + # with the default value, so the end result is a descriptor that + # had __set_name__ called on it at the right time. + def __set_name__(self, owner, name): + func = getattr(type(self.default), "__set_name__", None) + if func: + # There is a __set_name__ method on the descriptor, call + # it. + func(self.default, owner, name) + + +class _DataclassParams: + __slots__ = ("init", "repr", "eq", "order", "unsafe_hash", "frozen") + + def __init__(self, init, repr, eq, order, unsafe_hash, frozen): + self.init = init + self.repr = repr + self.eq = eq + self.order = order + self.unsafe_hash = unsafe_hash + self.frozen = frozen + + def __repr__(self): + return ( + "_DataclassParams(" + f"init={self.init!r}," + f"repr={self.repr!r}," + f"eq={self.eq!r}," + f"order={self.order!r}," + f"unsafe_hash={self.unsafe_hash!r}," + f"frozen={self.frozen!r}" + ")" + ) + + +# This function is used instead of exposing Field creation directly, +# so that a type checker can be told (via overloads) that this is a +# function whose type depends on its parameters. +def field( + *, + default=MISSING, + default_factory=MISSING, + init=True, + repr=True, + hash=None, + compare=True, + metadata=None, +): + """Return an object to identify dataclass fields. + + default is the default value of the field. default_factory is a + 0-argument function called to initialize a field's value. If init + is True, the field will be a parameter to the class's __init__() + function. If repr is True, the field will be included in the + object's repr(). If hash is True, the field will be included in + the object's hash(). If compare is True, the field will be used + in comparison functions. metadata, if specified, must be a + mapping which is stored but not otherwise examined by dataclass. + + It is an error to specify both default and default_factory. + """ + + if default is not MISSING and default_factory is not MISSING: + raise ValueError("cannot specify both default and default_factory") + return Field(default, default_factory, init, repr, hash, compare, metadata) + + +def _tuple_str(obj_name, fields): + # Return a string representing each field of obj_name as a tuple + # member. So, if fields is ['x', 'y'] and obj_name is "self", + # return "(self.x,self.y)". + + # Special case for the 0-tuple. + if not fields: + return "()" + # Note the trailing comma, needed if this turns out to be a 1-tuple. + return f'({",".join([f"{obj_name}.{f.name}" for f in fields])},)' + + +def _create_fn(name, args, body, *, globals=None, locals=None, return_type=MISSING): + # Note that we mutate locals when exec() is called. Caller + # beware! The only callers are internal to this module, so no + # worries about external callers. + if locals is None: + locals = {} + return_annotation = "" + if return_type is not MISSING: + locals["_return_type"] = return_type + return_annotation = "->_return_type" + args = ",".join(args) + body = "\n".join(f" {b}" for b in body) + + # Compute the text of the entire function. + txt = f"def {name}({args}){return_annotation}:\n{body}" + + exec(txt, globals, locals) + return locals[name] + + +def _field_assign(frozen, name, value, self_name): + # If we're a frozen class, then assign to our fields in __init__ + # via object.__setattr__. Otherwise, just use a simple + # assignment. + # + # self_name is what "self" is called in this function: don't + # hard-code "self", since that might be a field name. + if frozen: + return f"object.__setattr__({self_name},{name!r},{value})" + return f"{self_name}.{name}={value}" + + +def _field_init(f, frozen, globals, self_name): + # Return the text of the line in the body of __init__ that will + # initialize this field. + + default_name = f"_dflt_{f.name}" + if f.default_factory is not MISSING: + if f.init: + # This field has a default factory. If a parameter is + # given, use it. If not, call the factory. + globals[default_name] = f.default_factory + value = ( + f"{default_name}() " + f"if {f.name} is _HAS_DEFAULT_FACTORY " + f"else {f.name}" + ) + else: + # This is a field that's not in the __init__ params, but + # has a default factory function. It needs to be + # initialized here by calling the factory function, + # because there's no other way to initialize it. + + # For a field initialized with a default=defaultvalue, the + # class dict just has the default value + # (cls.fieldname=defaultvalue). But that won't work for a + # default factory, the factory must be called in __init__ + # and we must assign that to self.fieldname. We can't + # fall back to the class dict's value, both because it's + # not set, and because it might be different per-class + # (which, after all, is why we have a factory function!). + + globals[default_name] = f.default_factory + value = f"{default_name}()" + else: + # No default factory. + if f.init: + if f.default is MISSING: + # There's no default, just do an assignment. + value = f.name + elif f.default is not MISSING: + globals[default_name] = f.default + value = f.name + else: + # This field does not need initialization. Signify that + # to the caller by returning None. + return None + # Only test this now, so that we can create variables for the + # default. However, return None to signify that we're not going + # to actually do the assignment statement for InitVars. + if f._field_type == _FIELD_INITVAR: + return None + # Now, actually generate the field assignment. + return _field_assign(frozen, f.name, value, self_name) + + +def _init_param(f): + # Return the __init__ parameter string for this field. For + # example, the equivalent of 'x:int=3' (except instead of 'int', + # reference a variable set to int, and instead of '3', reference a + # variable set to 3). + if f.default is MISSING and f.default_factory is MISSING: + # There's no default, and no default_factory, just output the + # variable name and type. + default = "" + elif f.default is not MISSING: + # There's a default, this will be the name that's used to look + # it up. + default = f"=_dflt_{f.name}" + elif f.default_factory is not MISSING: + # There's a factory function. Set a marker. + default = "=_HAS_DEFAULT_FACTORY" + return f"{f.name}:_type_{f.name}{default}" + + +def _init_fn(fields, frozen, has_post_init, self_name): + # fields contains both real fields and InitVar pseudo-fields. + + # Make sure we don't have fields without defaults following fields + # with defaults. This actually would be caught when exec-ing the + # function source code, but catching it here gives a better error + # message, and future-proofs us in case we build up the function + # using ast. + seen_default = False + for f in fields: + # Only consider fields in the __init__ call. + if f.init: + if not (f.default is MISSING and f.default_factory is MISSING): + seen_default = True + elif seen_default: + raise TypeError( + f"non-default argument {f.name!r} " "follows default argument" + ) + globals = {"MISSING": MISSING, "_HAS_DEFAULT_FACTORY": _HAS_DEFAULT_FACTORY} + + body_lines = [] + for f in fields: + line = _field_init(f, frozen, globals, self_name) + # line is None means that this field doesn't require + # initialization (it's a pseudo-field). Just skip it. + if line: + body_lines.append(line) + # Does this class have a post-init function? + if has_post_init: + params_str = ",".join(f.name for f in fields if f._field_type is _FIELD_INITVAR) + body_lines.append(f"{self_name}.{_POST_INIT_NAME}({params_str})") + # If no body lines, use 'pass'. + if not body_lines: + body_lines = ["pass"] + locals = {f"_type_{f.name}": f.type for f in fields} + return _create_fn( + "__init__", + [self_name] + [_init_param(f) for f in fields if f.init], + body_lines, + locals=locals, + globals=globals, + return_type=None, + ) + + +def _repr_fn(fields): + return _create_fn( + "__repr__", + ("self",), + [ + 'return self.__class__.__qualname__ + f"(' + + ", ".join([f"{f.name}={{self.{f.name}!r}}" for f in fields]) + + ')"' + ], + ) + + +def _frozen_get_del_attr(cls, fields): + # XXX: globals is modified on the first call to _create_fn, then + # the modified version is used in the second call. Is this okay? + globals = {"cls": cls, "FrozenInstanceError": FrozenInstanceError} + if fields: + fields_str = "(" + ",".join(repr(f.name) for f in fields) + ",)" + else: + # Special case for the zero-length tuple. + fields_str = "()" + return ( + _create_fn( + "__setattr__", + ("self", "name", "value"), + ( + f"if type(self) is cls or name in {fields_str}:", + ' raise FrozenInstanceError(f"cannot assign to field {name!r}")', + f"super(cls, self).__setattr__(name, value)", + ), + globals=globals, + ), + _create_fn( + "__delattr__", + ("self", "name"), + ( + f"if type(self) is cls or name in {fields_str}:", + ' raise FrozenInstanceError(f"cannot delete field {name!r}")', + f"super(cls, self).__delattr__(name)", + ), + globals=globals, + ), + ) + + +def _cmp_fn(name, op, self_tuple, other_tuple): + # Create a comparison function. If the fields in the object are + # named 'x' and 'y', then self_tuple is the string + # '(self.x,self.y)' and other_tuple is the string + # '(other.x,other.y)'. + + return _create_fn( + name, + ("self", "other"), + [ + "if other.__class__ is self.__class__:", + f" return {self_tuple}{op}{other_tuple}", + "return NotImplemented", + ], + ) + + +def _hash_fn(fields): + self_tuple = _tuple_str("self", fields) + return _create_fn("__hash__", ("self",), [f"return hash({self_tuple})"]) + + +def _is_classvar(a_type, typing): + # This test uses a typing internal class, but it's the best way to + # test if this is a ClassVar. + return type(a_type) is typing._ClassVar + + +def _is_initvar(a_type, dataclasses): + # The module we're checking against is the module we're + # currently in (dataclasses.py). + return a_type is dataclasses.InitVar + + +def _is_type(annotation, cls, a_module, a_type, is_type_predicate): + # Given a type annotation string, does it refer to a_type in + # a_module? For example, when checking that annotation denotes a + # ClassVar, then a_module is typing, and a_type is + # typing.ClassVar. + + # It's possible to look up a_module given a_type, but it involves + # looking in sys.modules (again!), and seems like a waste since + # the caller already knows a_module. + + # - annotation is a string type annotation + # - cls is the class that this annotation was found in + # - a_module is the module we want to match + # - a_type is the type in that module we want to match + # - is_type_predicate is a function called with (obj, a_module) + # that determines if obj is of the desired type. + + # Since this test does not do a local namespace lookup (and + # instead only a module (global) lookup), there are some things it + # gets wrong. + + # With string annotations, cv0 will be detected as a ClassVar: + # CV = ClassVar + # @dataclass + # class C0: + # cv0: CV + + # But in this example cv1 will not be detected as a ClassVar: + # @dataclass + # class C1: + # CV = ClassVar + # cv1: CV + + # In C1, the code in this function (_is_type) will look up "CV" in + # the module and not find it, so it will not consider cv1 as a + # ClassVar. This is a fairly obscure corner case, and the best + # way to fix it would be to eval() the string "CV" with the + # correct global and local namespaces. However that would involve + # a eval() penalty for every single field of every dataclass + # that's defined. It was judged not worth it. + + match = _MODULE_IDENTIFIER_RE.match(annotation) + if match: + ns = None + module_name = match.group(1) + if not module_name: + # No module name, assume the class's module did + # "from dataclasses import InitVar". + ns = sys.modules.get(cls.__module__).__dict__ + else: + # Look up module_name in the class's module. + module = sys.modules.get(cls.__module__) + if module and module.__dict__.get(module_name) is a_module: + ns = sys.modules.get(a_type.__module__).__dict__ + if ns and is_type_predicate(ns.get(match.group(2)), a_module): + return True + return False + + +def _get_field(cls, a_name, a_type): + # Return a Field object for this field name and type. ClassVars + # and InitVars are also returned, but marked as such (see + # f._field_type). + + # If the default value isn't derived from Field, then it's only a + # normal default value. Convert it to a Field(). + default = getattr(cls, a_name, MISSING) + if isinstance(default, Field): + f = default + else: + if isinstance(default, types.MemberDescriptorType): + # This is a field in __slots__, so it has no default value. + default = MISSING + f = field(default=default) + # Only at this point do we know the name and the type. Set them. + f.name = a_name + f.type = a_type + + # Assume it's a normal field until proven otherwise. We're next + # going to decide if it's a ClassVar or InitVar, everything else + # is just a normal field. + f._field_type = _FIELD + + # In addition to checking for actual types here, also check for + # string annotations. get_type_hints() won't always work for us + # (see https://github.com/python/typing/issues/508 for example), + # plus it's expensive and would require an eval for every stirng + # annotation. So, make a best effort to see if this is a ClassVar + # or InitVar using regex's and checking that the thing referenced + # is actually of the correct type. + + # For the complete discussion, see https://bugs.python.org/issue33453 + + # If typing has not been imported, then it's impossible for any + # annotation to be a ClassVar. So, only look for ClassVar if + # typing has been imported by any module (not necessarily cls's + # module). + typing = sys.modules.get("typing") + if typing: + if _is_classvar(a_type, typing) or ( + isinstance(f.type, str) + and _is_type(f.type, cls, typing, typing.ClassVar, _is_classvar) + ): + f._field_type = _FIELD_CLASSVAR + # If the type is InitVar, or if it's a matching string annotation, + # then it's an InitVar. + if f._field_type is _FIELD: + # The module we're checking against is the module we're + # currently in (dataclasses.py). + dataclasses = sys.modules[__name__] + if _is_initvar(a_type, dataclasses) or ( + isinstance(f.type, str) + and _is_type(f.type, cls, dataclasses, dataclasses.InitVar, _is_initvar) + ): + f._field_type = _FIELD_INITVAR + # Validations for individual fields. This is delayed until now, + # instead of in the Field() constructor, since only here do we + # know the field name, which allows for better error reporting. + + # Special restrictions for ClassVar and InitVar. + if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR): + if f.default_factory is not MISSING: + raise TypeError(f"field {f.name} cannot have a " "default factory") + # Should I check for other field settings? default_factory + # seems the most serious to check for. Maybe add others. For + # example, how about init=False (or really, + # init=)? It makes no sense for + # ClassVar and InitVar to specify init=. + # For real fields, disallow mutable defaults for known types. + if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)): + raise ValueError( + f"mutable default {type(f.default)} for field " + f"{f.name} is not allowed: use default_factory" + ) + return f + + +def _set_new_attribute(cls, name, value): + # Never overwrites an existing attribute. Returns True if the + # attribute already exists. + if name in cls.__dict__: + return True + setattr(cls, name, value) + return False + + +# Decide if/how we're going to create a hash function. Key is +# (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to +# take. The common case is to do nothing, so instead of providing a +# function that is a no-op, use None to signify that. + + +def _hash_set_none(cls, fields): + return None + + +def _hash_add(cls, fields): + flds = [f for f in fields if (f.compare if f.hash is None else f.hash)] + return _hash_fn(flds) + + +def _hash_exception(cls, fields): + # Raise an exception. + raise TypeError(f"Cannot overwrite attribute __hash__ " f"in class {cls.__name__}") + + +# +# +-------------------------------------- unsafe_hash? +# | +------------------------------- eq? +# | | +------------------------ frozen? +# | | | +---------------- has-explicit-hash? +# | | | | +# | | | | +------- action +# | | | | | +# v v v v v +_hash_action = { + (False, False, False, False): None, + (False, False, False, True): None, + (False, False, True, False): None, + (False, False, True, True): None, + (False, True, False, False): _hash_set_none, + (False, True, False, True): None, + (False, True, True, False): _hash_add, + (False, True, True, True): None, + (True, False, False, False): _hash_add, + (True, False, False, True): _hash_exception, + (True, False, True, False): _hash_add, + (True, False, True, True): _hash_exception, + (True, True, False, False): _hash_add, + (True, True, False, True): _hash_exception, + (True, True, True, False): _hash_add, + (True, True, True, True): _hash_exception, +} +# See https://bugs.python.org/issue32929#msg312829 for an if-statement +# version of this table. + + +def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen): + # Now that dicts retain insertion order, there's no reason to use + # an ordered dict. I am leveraging that ordering here, because + # derived class fields overwrite base class fields, but the order + # is defined by the base class, which is found first. + fields = {} + + setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order, unsafe_hash, frozen)) + + # Find our base classes in reverse MRO order, and exclude + # ourselves. In reversed order so that more derived classes + # override earlier field definitions in base classes. As long as + # we're iterating over them, see if any are frozen. + any_frozen_base = False + has_dataclass_bases = False + for b in cls.__mro__[-1:0:-1]: + # Only process classes that have been processed by our + # decorator. That is, they have a _FIELDS attribute. + base_fields = getattr(b, _FIELDS, None) + if base_fields: + has_dataclass_bases = True + for f in base_fields.values(): + fields[f.name] = f + if getattr(b, _PARAMS).frozen: + any_frozen_base = True + # Annotations that are defined in this class (not in base + # classes). If __annotations__ isn't present, then this class + # adds no new annotations. We use this to compute fields that are + # added by this class. + # + # Fields are found from cls_annotations, which is guaranteed to be + # ordered. Default values are from class attributes, if a field + # has a default. If the default value is a Field(), then it + # contains additional info beyond (and possibly including) the + # actual default value. Pseudo-fields ClassVars and InitVars are + # included, despite the fact that they're not real fields. That's + # dealt with later. + cls_annotations = cls.__dict__.get("__annotations__", {}) + + # Now find fields in our class. While doing so, validate some + # things, and set the default values (as class attributes) where + # we can. + cls_fields = [ + _get_field(cls, name, type_) for name, type_ in cls_annotations.items() + ] + for f in cls_fields: + fields[f.name] = f + + # If the class attribute (which is the default value for this + # field) exists and is of type 'Field', replace it with the + # real default. This is so that normal class introspection + # sees a real default value, not a Field. + if isinstance(getattr(cls, f.name, None), Field): + if f.default is MISSING: + # If there's no default, delete the class attribute. + # This happens if we specify field(repr=False), for + # example (that is, we specified a field object, but + # no default value). Also if we're using a default + # factory. The class attribute should not be set at + # all in the post-processed class. + delattr(cls, f.name) + else: + setattr(cls, f.name, f.default) + # Do we have any Field members that don't also have annotations? + for name, value in cls.__dict__.items(): + if isinstance(value, Field) and not name in cls_annotations: + raise TypeError(f"{name!r} is a field but has no type annotation") + # Check rules that apply if we are derived from any dataclasses. + if has_dataclass_bases: + # Raise an exception if any of our bases are frozen, but we're not. + if any_frozen_base and not frozen: + raise TypeError("cannot inherit non-frozen dataclass from a " "frozen one") + # Raise an exception if we're frozen, but none of our bases are. + if not any_frozen_base and frozen: + raise TypeError("cannot inherit frozen dataclass from a " "non-frozen one") + # Remember all of the fields on our class (including bases). This + # also marks this class as being a dataclass. + setattr(cls, _FIELDS, fields) + + # Was this class defined with an explicit __hash__? Note that if + # __eq__ is defined in this class, then python will automatically + # set __hash__ to None. This is a heuristic, as it's possible + # that such a __hash__ == None was not auto-generated, but it + # close enough. + class_hash = cls.__dict__.get("__hash__", MISSING) + has_explicit_hash = not ( + class_hash is MISSING or (class_hash is None and "__eq__" in cls.__dict__) + ) + + # If we're generating ordering methods, we must be generating the + # eq methods. + if order and not eq: + raise ValueError("eq must be true if order is true") + if init: + # Does this class have a post-init function? + has_post_init = hasattr(cls, _POST_INIT_NAME) + + # Include InitVars and regular fields (so, not ClassVars). + flds = [f for f in fields.values() if f._field_type in (_FIELD, _FIELD_INITVAR)] + _set_new_attribute( + cls, + "__init__", + _init_fn( + flds, + frozen, + has_post_init, + # The name to use for the "self" + # param in __init__. Use "self" + # if possible. + "__dataclass_self__" if "self" in fields else "self", + ), + ) + # Get the fields as a list, and include only real fields. This is + # used in all of the following methods. + field_list = [f for f in fields.values() if f._field_type is _FIELD] + + if repr: + flds = [f for f in field_list if f.repr] + _set_new_attribute(cls, "__repr__", _repr_fn(flds)) + if eq: + # Create _eq__ method. There's no need for a __ne__ method, + # since python will call __eq__ and negate it. + flds = [f for f in field_list if f.compare] + self_tuple = _tuple_str("self", flds) + other_tuple = _tuple_str("other", flds) + _set_new_attribute( + cls, "__eq__", _cmp_fn("__eq__", "==", self_tuple, other_tuple) + ) + if order: + # Create and set the ordering methods. + flds = [f for f in field_list if f.compare] + self_tuple = _tuple_str("self", flds) + other_tuple = _tuple_str("other", flds) + for name, op in [ + ("__lt__", "<"), + ("__le__", "<="), + ("__gt__", ">"), + ("__ge__", ">="), + ]: + if _set_new_attribute( + cls, name, _cmp_fn(name, op, self_tuple, other_tuple) + ): + raise TypeError( + f"Cannot overwrite attribute {name} " + f"in class {cls.__name__}. Consider using " + "functools.total_ordering" + ) + if frozen: + for fn in _frozen_get_del_attr(cls, field_list): + if _set_new_attribute(cls, fn.__name__, fn): + raise TypeError( + f"Cannot overwrite attribute {fn.__name__} " + f"in class {cls.__name__}" + ) + # Decide if/how we're going to create a hash function. + hash_action = _hash_action[ + bool(unsafe_hash), bool(eq), bool(frozen), has_explicit_hash + ] + if hash_action: + # No need to call _set_new_attribute here, since by the time + # we're here the overwriting is unconditional. + cls.__hash__ = hash_action(cls, field_list) + if not getattr(cls, "__doc__"): + # Create a class doc-string. + cls.__doc__ = cls.__name__ + str(inspect.signature(cls)).replace(" -> None", "") + return cls + + +# _cls should never be specified by keyword, so start it with an +# underscore. The presence of _cls is used to detect if this +# decorator is being called with parameters or not. +def dataclass( + _cls=None, + *, + init=True, + repr=True, + eq=True, + order=False, + unsafe_hash=False, + frozen=False, +): + """Returns the same class as was passed in, with dunder methods + added based on the fields defined in the class. + + Examines PEP 526 __annotations__ to determine fields. + + If init is true, an __init__() method is added to the class. If + repr is true, a __repr__() method is added. If order is true, rich + comparison dunder methods are added. If unsafe_hash is true, a + __hash__() method function is added. If frozen is true, fields may + not be assigned to after instance creation. + """ + + def wrap(cls): + return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen) + + # See if we're being called as @dataclass or @dataclass(). + if _cls is None: + # We're called with parens. + return wrap + # We're called as @dataclass without parens. + return wrap(_cls) + + +def fields(class_or_instance): + """Return a tuple describing the fields of this dataclass. + + Accepts a dataclass or an instance of one. Tuple elements are of + type Field. + """ + + # Might it be worth caching this, per class? + try: + fields = getattr(class_or_instance, _FIELDS) + except AttributeError: + raise TypeError("must be called with a dataclass type or instance") + # Exclude pseudo-fields. Note that fields is sorted by insertion + # order, so the order of the tuple is as the fields were defined. + return tuple(f for f in fields.values() if f._field_type is _FIELD) + + +def _is_dataclass_instance(obj): + """Returns True if obj is an instance of a dataclass.""" + return not isinstance(obj, type) and hasattr(obj, _FIELDS) + + +def is_dataclass(obj): + """Returns True if obj is a dataclass or an instance of a + dataclass.""" + return hasattr(obj, _FIELDS) + + +def asdict(obj, *, dict_factory=dict): + """Return the fields of a dataclass instance as a new dictionary mapping + field names to field values. + + Example usage: + + @dataclass + class C: + x: int + y: int + + c = C(1, 2) + assert asdict(c) == {'x': 1, 'y': 2} + + If given, 'dict_factory' will be used instead of built-in dict. + The function applies recursively to field values that are + dataclass instances. This will also look into built-in containers: + tuples, lists, and dicts. + """ + if not _is_dataclass_instance(obj): + raise TypeError("asdict() should be called on dataclass instances") + return _asdict_inner(obj, dict_factory) + + +def _asdict_inner(obj, dict_factory): + if _is_dataclass_instance(obj): + result = [] + for f in fields(obj): + value = _asdict_inner(getattr(obj, f.name), dict_factory) + result.append((f.name, value)) + return dict_factory(result) + elif isinstance(obj, (list, tuple)): + return type(obj)(_asdict_inner(v, dict_factory) for v in obj) + elif isinstance(obj, dict): + return type(obj)( + (_asdict_inner(k, dict_factory), _asdict_inner(v, dict_factory)) + for k, v in obj.items() + ) + else: + return copy.deepcopy(obj) + + +def astuple(obj, *, tuple_factory=tuple): + """Return the fields of a dataclass instance as a new tuple of field values. + + Example usage:: + + @dataclass + class C: + x: int + y: int + + c = C(1, 2) + assert astuple(c) == (1, 2) + + If given, 'tuple_factory' will be used instead of built-in tuple. + The function applies recursively to field values that are + dataclass instances. This will also look into built-in containers: + tuples, lists, and dicts. + """ + + if not _is_dataclass_instance(obj): + raise TypeError("astuple() should be called on dataclass instances") + return _astuple_inner(obj, tuple_factory) + + +def _astuple_inner(obj, tuple_factory): + if _is_dataclass_instance(obj): + result = [] + for f in fields(obj): + value = _astuple_inner(getattr(obj, f.name), tuple_factory) + result.append(value) + return tuple_factory(result) + elif isinstance(obj, (list, tuple)): + return type(obj)(_astuple_inner(v, tuple_factory) for v in obj) + elif isinstance(obj, dict): + return type(obj)( + (_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory)) + for k, v in obj.items() + ) + else: + return copy.deepcopy(obj) + + +def make_dataclass( + cls_name, + fields, + *, + bases=(), + namespace=None, + init=True, + repr=True, + eq=True, + order=False, + unsafe_hash=False, + frozen=False, +): + """Return a new dynamically created dataclass. + + The dataclass name will be 'cls_name'. 'fields' is an iterable + of either (name), (name, type) or (name, type, Field) objects. If type is + omitted, use the string 'typing.Any'. Field objects are created by + the equivalent of calling 'field(name, type [, Field-info])'. + + C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,)) + + is equivalent to: + + @dataclass + class C(Base): + x: 'typing.Any' + y: int + z: int = field(init=False) + + For the bases and namespace parameters, see the builtin type() function. + + The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to + dataclass(). + """ + + if namespace is None: + namespace = {} + else: + # Copy namespace since we're going to mutate it. + namespace = namespace.copy() + # While we're looking through the field names, validate that they + # are identifiers, are not keywords, and not duplicates. + seen = set() + anns = {} + for item in fields: + if isinstance(item, str): + name = item + tp = "typing.Any" + elif len(item) == 2: + (name, tp) = item + elif len(item) == 3: + name, tp, spec = item + namespace[name] = spec + else: + raise TypeError(f"Invalid field: {item!r}") + if not isinstance(name, str) or not name.isidentifier(): + raise TypeError(f"Field names must be valid identifers: {name!r}") + if keyword.iskeyword(name): + raise TypeError(f"Field names must not be keywords: {name!r}") + if name in seen: + raise TypeError(f"Field name duplicated: {name!r}") + seen.add(name) + anns[name] = tp + namespace["__annotations__"] = anns + # We use `types.new_class()` instead of simply `type()` to allow dynamic creation + # of generic dataclassses. + cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace)) + return dataclass( + cls, + init=init, + repr=repr, + eq=eq, + order=order, + unsafe_hash=unsafe_hash, + frozen=frozen, + ) + + +def replace(obj, **changes): + """Return a new object replacing specified fields with new values. + + This is especially useful for frozen classes. Example usage: + + @dataclass(frozen=True) + class C: + x: int + y: int + + c = C(1, 2) + c1 = replace(c, x=3) + assert c1.x == 3 and c1.y == 2 + """ + + # We're going to mutate 'changes', but that's okay because it's a + # new dict, even if called with 'replace(obj, **my_changes)'. + + if not _is_dataclass_instance(obj): + raise TypeError("replace() should be called on dataclass instances") + # It's an error to have init=False fields in 'changes'. + # If a field is not in 'changes', read its value from the provided obj. + + for f in getattr(obj, _FIELDS).values(): + if not f.init: + # Error if this field is specified in changes. + if f.name in changes: + raise ValueError( + f"field {f.name} is declared with " + "init=False, it cannot be specified with " + "replace()" + ) + continue + if f.name not in changes: + changes[f.name] = getattr(obj, f.name) + # Create the new object, which calls __init__() and + # __post_init__() (if defined), using all of the init fields we've + # added and/or left in 'changes'. If there are values supplied in + # changes that aren't fields, this will correctly raise a + # TypeError. + return obj.__class__(**changes) diff --git a/testbed/graphql-python__graphene/graphene/pyutils/version.py b/testbed/graphql-python__graphene/graphene/pyutils/version.py new file mode 100644 index 0000000000000000000000000000000000000000..8a3be07a91cddf5298a4df47d6409b3335ae4f2f --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/pyutils/version.py @@ -0,0 +1,78 @@ +from __future__ import unicode_literals + +import datetime +import os +import subprocess + + +def get_version(version=None): + "Returns a PEP 440-compliant version number from VERSION." + version = get_complete_version(version) + + # Now build the two parts of the version number: + # main = X.Y[.Z] + # sub = .devN - for pre-alpha releases + # | {a|b|rc}N - for alpha, beta, and rc releases + + main = get_main_version(version) + + sub = "" + if version[3] == "alpha" and version[4] == 0: + git_changeset = get_git_changeset() + sub = ".dev%s" % git_changeset if git_changeset else ".dev" + elif version[3] != "final": + mapping = {"alpha": "a", "beta": "b", "rc": "rc"} + sub = mapping[version[3]] + str(version[4]) + + return str(main + sub) + + +def get_main_version(version=None): + "Returns main version (X.Y[.Z]) from VERSION." + version = get_complete_version(version) + parts = 2 if version[2] == 0 else 3 + return ".".join(str(x) for x in version[:parts]) + + +def get_complete_version(version=None): + """Returns a tuple of the graphene version. If version argument is non-empty, + then checks for correctness of the tuple provided. + """ + if version is None: + from graphene import VERSION as version + else: + assert len(version) == 5 + assert version[3] in ("alpha", "beta", "rc", "final") + + return version + + +def get_docs_version(version=None): + version = get_complete_version(version) + if version[3] != "final": + return "dev" + else: + return "%d.%d" % version[:2] + + +def get_git_changeset(): + """Returns a numeric identifier of the latest git changeset. + The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format. + This value isn't guaranteed to be unique, but collisions are very unlikely, + so it's sufficient for generating the development version numbers. + """ + repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + try: + git_log = subprocess.Popen( + "git log --pretty=format:%ct --quiet -1 HEAD", + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + cwd=repo_dir, + universal_newlines=True, + ) + timestamp = git_log.communicate()[0] + timestamp = datetime.datetime.utcfromtimestamp(int(timestamp)) + except: + return None + return timestamp.strftime("%Y%m%d%H%M%S") diff --git a/testbed/graphql-python__graphene/graphene/relay/__init__.py b/testbed/graphql-python__graphene/graphene/relay/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3b842cf56fc6bfa373d2e57fcf75d4dae335fe81 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/__init__.py @@ -0,0 +1,23 @@ +from .node import Node, is_node, GlobalID +from .mutation import ClientIDMutation +from .connection import Connection, ConnectionField, PageInfo +from .id_type import ( + BaseGlobalIDType, + DefaultGlobalIDType, + SimpleGlobalIDType, + UUIDGlobalIDType, +) + +__all__ = [ + "BaseGlobalIDType", + "ClientIDMutation", + "Connection", + "ConnectionField", + "DefaultGlobalIDType", + "GlobalID", + "Node", + "PageInfo", + "SimpleGlobalIDType", + "UUIDGlobalIDType", + "is_node", +] diff --git a/testbed/graphql-python__graphene/graphene/relay/connection.py b/testbed/graphql-python__graphene/graphene/relay/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..ea4973676d82fa2c00e217a26c59704a1ed55442 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/connection.py @@ -0,0 +1,192 @@ +import re +from collections.abc import Iterable +from functools import partial +from typing import Type + +from graphql_relay import connection_from_array + +from ..types import Boolean, Enum, Int, Interface, List, NonNull, Scalar, String, Union +from ..types.field import Field +from ..types.objecttype import ObjectType, ObjectTypeOptions +from ..utils.thenables import maybe_thenable +from .node import is_node, AbstractNode + + +def get_edge_class( + connection_class: Type["Connection"], _node: Type[AbstractNode], base_name: str +): + edge_class = getattr(connection_class, "Edge", None) + + class EdgeBase: + node = Field(_node, description="The item at the end of the edge") + cursor = String(required=True, description="A cursor for use in pagination") + + class EdgeMeta: + description = f"A Relay edge containing a `{base_name}` and its cursor." + + edge_name = f"{base_name}Edge" + + edge_bases = [edge_class, EdgeBase] if edge_class else [EdgeBase] + if not isinstance(edge_class, ObjectType): + edge_bases = [*edge_bases, ObjectType] + + return type(edge_name, tuple(edge_bases), {"Meta": EdgeMeta}) + + +class PageInfo(ObjectType): + class Meta: + description = ( + "The Relay compliant `PageInfo` type, containing data necessary to" + " paginate this connection." + ) + + has_next_page = Boolean( + required=True, + name="hasNextPage", + description="When paginating forwards, are there more items?", + ) + + has_previous_page = Boolean( + required=True, + name="hasPreviousPage", + description="When paginating backwards, are there more items?", + ) + + start_cursor = String( + name="startCursor", + description="When paginating backwards, the cursor to continue.", + ) + + end_cursor = String( + name="endCursor", + description="When paginating forwards, the cursor to continue.", + ) + + +# noinspection PyPep8Naming +def page_info_adapter(startCursor, endCursor, hasPreviousPage, hasNextPage): + """Adapter for creating PageInfo instances""" + return PageInfo( + start_cursor=startCursor, + end_cursor=endCursor, + has_previous_page=hasPreviousPage, + has_next_page=hasNextPage, + ) + + +class ConnectionOptions(ObjectTypeOptions): + node = None + + +class Connection(ObjectType): + class Meta: + abstract = True + + @classmethod + def __init_subclass_with_meta__(cls, node=None, name=None, _meta=None, **options): + if not _meta: + _meta = ConnectionOptions(cls) + assert node, f"You have to provide a node in {cls.__name__}.Meta" + assert isinstance(node, NonNull) or issubclass( + node, (Scalar, Enum, ObjectType, Interface, Union, NonNull) + ), f'Received incompatible node "{node}" for Connection {cls.__name__}.' + + base_name = re.sub("Connection$", "", name or cls.__name__) or node._meta.name + if not name: + name = f"{base_name}Connection" + + options["name"] = name + + _meta.node = node + + if not _meta.fields: + _meta.fields = {} + + if "page_info" not in _meta.fields: + _meta.fields["page_info"] = Field( + PageInfo, + name="pageInfo", + required=True, + description="Pagination data for this connection.", + ) + + if "edges" not in _meta.fields: + edge_class = get_edge_class(cls, node, base_name) # type: ignore + cls.Edge = edge_class + _meta.fields["edges"] = Field( + NonNull(List(edge_class)), + description="Contains the nodes in this connection.", + ) + + return super(Connection, cls).__init_subclass_with_meta__( + _meta=_meta, **options + ) + + +# noinspection PyPep8Naming +def connection_adapter(cls, edges, pageInfo): + """Adapter for creating Connection instances""" + return cls(edges=edges, page_info=pageInfo) + + +class IterableConnectionField(Field): + def __init__(self, type_, *args, **kwargs): + kwargs.setdefault("before", String()) + kwargs.setdefault("after", String()) + kwargs.setdefault("first", Int()) + kwargs.setdefault("last", Int()) + super(IterableConnectionField, self).__init__(type_, *args, **kwargs) + + @property + def type(self): + type_ = super(IterableConnectionField, self).type + connection_type = type_ + if isinstance(type_, NonNull): + connection_type = type_.of_type + + if is_node(connection_type): + raise Exception( + "ConnectionFields now need a explicit ConnectionType for Nodes.\n" + "Read more: https://github.com/graphql-python/graphene/blob/v2.0.0/UPGRADE-v2.0.md#node-connections" + ) + + assert issubclass( + connection_type, Connection + ), f'{self.__class__.__name__} type has to be a subclass of Connection. Received "{connection_type}".' + return type_ + + @classmethod + def resolve_connection(cls, connection_type, args, resolved): + if isinstance(resolved, connection_type): + return resolved + + assert isinstance(resolved, Iterable), ( + f"Resolved value from the connection field has to be an iterable or instance of {connection_type}. " + f'Received "{resolved}"' + ) + connection = connection_from_array( + resolved, + args, + connection_type=partial(connection_adapter, connection_type), + edge_type=connection_type.Edge, + page_info_type=page_info_adapter, + ) + connection.iterable = resolved + return connection + + @classmethod + def connection_resolver(cls, resolver, connection_type, root, info, **args): + resolved = resolver(root, info, **args) + + if isinstance(connection_type, NonNull): + connection_type = connection_type.of_type + + on_resolve = partial(cls.resolve_connection, connection_type, args) + return maybe_thenable(resolved, on_resolve) + + def wrap_resolve(self, parent_resolver): + resolver = super(IterableConnectionField, self).wrap_resolve(parent_resolver) + return partial(self.connection_resolver, resolver, self.type) + + +ConnectionField = IterableConnectionField diff --git a/testbed/graphql-python__graphene/graphene/relay/id_type.py b/testbed/graphql-python__graphene/graphene/relay/id_type.py new file mode 100644 index 0000000000000000000000000000000000000000..fb5c30e721e6c16a69dcf3a99c9d58d4174076bc --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/id_type.py @@ -0,0 +1,87 @@ +from graphql_relay import from_global_id, to_global_id + +from ..types import ID, UUID +from ..types.base import BaseType + +from typing import Type + + +class BaseGlobalIDType: + """ + Base class that define the required attributes/method for a type. + """ + + graphene_type = ID # type: Type[BaseType] + + @classmethod + def resolve_global_id(cls, info, global_id): + # return _type, _id + raise NotImplementedError + + @classmethod + def to_global_id(cls, _type, _id): + # return _id + raise NotImplementedError + + +class DefaultGlobalIDType(BaseGlobalIDType): + """ + Default global ID type: base64 encoded version of ": ". + """ + + graphene_type = ID + + @classmethod + def resolve_global_id(cls, info, global_id): + try: + _type, _id = from_global_id(global_id) + if not _type: + raise ValueError("Invalid Global ID") + return _type, _id + except Exception as e: + raise Exception( + f'Unable to parse global ID "{global_id}". ' + 'Make sure it is a base64 encoded string in the format: "TypeName:id". ' + f"Exception message: {e}" + ) + + @classmethod + def to_global_id(cls, _type, _id): + return to_global_id(_type, _id) + + +class SimpleGlobalIDType(BaseGlobalIDType): + """ + Simple global ID type: simply the id of the object. + To be used carefully as the user is responsible for ensuring that the IDs are indeed global + (otherwise it could cause request caching issues). + """ + + graphene_type = ID + + @classmethod + def resolve_global_id(cls, info, global_id): + _type = info.return_type.graphene_type._meta.name + return _type, global_id + + @classmethod + def to_global_id(cls, _type, _id): + return _id + + +class UUIDGlobalIDType(BaseGlobalIDType): + """ + UUID global ID type. + By definition UUID are global so they are used as they are. + """ + + graphene_type = UUID + + @classmethod + def resolve_global_id(cls, info, global_id): + _type = info.return_type.graphene_type._meta.name + return _type, global_id + + @classmethod + def to_global_id(cls, _type, _id): + return _id diff --git a/testbed/graphql-python__graphene/graphene/relay/mutation.py b/testbed/graphql-python__graphene/graphene/relay/mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..2f4a4b7386576598daf1c218e7e7208e3171f81f --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/mutation.py @@ -0,0 +1,66 @@ +import re + +from ..types import Field, InputObjectType, String +from ..types.mutation import Mutation +from ..utils.thenables import maybe_thenable + + +class ClientIDMutation(Mutation): + class Meta: + abstract = True + + @classmethod + def __init_subclass_with_meta__( + cls, output=None, input_fields=None, arguments=None, name=None, **options + ): + input_class = getattr(cls, "Input", None) + base_name = re.sub("Payload$", "", name or cls.__name__) + + assert not output, "Can't specify any output" + assert not arguments, "Can't specify any arguments" + + bases = (InputObjectType,) + if input_class: + bases += (input_class,) + + if not input_fields: + input_fields = {} + + cls.Input = type( + f"{base_name}Input", + bases, + dict(input_fields, client_mutation_id=String(name="clientMutationId")), + ) + + arguments = dict( + input=cls.Input(required=True) + # 'client_mutation_id': String(name='clientMutationId') + ) + mutate_and_get_payload = getattr(cls, "mutate_and_get_payload", None) + if cls.mutate and cls.mutate.__func__ == ClientIDMutation.mutate.__func__: + assert mutate_and_get_payload, ( + f"{name or cls.__name__}.mutate_and_get_payload method is required" + " in a ClientIDMutation." + ) + + if not name: + name = f"{base_name}Payload" + + super(ClientIDMutation, cls).__init_subclass_with_meta__( + output=None, arguments=arguments, name=name, **options + ) + cls._meta.fields["client_mutation_id"] = Field(String, name="clientMutationId") + + @classmethod + def mutate(cls, root, info, input): + def on_resolve(payload): + try: + payload.client_mutation_id = input.get("client_mutation_id") + except Exception: + raise Exception( + f"Cannot set client_mutation_id in the payload object {repr(payload)}" + ) + return payload + + result = cls.mutate_and_get_payload(root, info, **input) + return maybe_thenable(result, on_resolve) diff --git a/testbed/graphql-python__graphene/graphene/relay/node.py b/testbed/graphql-python__graphene/graphene/relay/node.py new file mode 100644 index 0000000000000000000000000000000000000000..5443828133090d0a498fd173bfe6319ca10dfcdd --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/node.py @@ -0,0 +1,135 @@ +from functools import partial +from inspect import isclass + +from ..types import Field, Interface, ObjectType +from ..types.interface import InterfaceOptions +from ..types.utils import get_type +from .id_type import BaseGlobalIDType, DefaultGlobalIDType + + +def is_node(objecttype): + """ + Check if the given objecttype has Node as an interface + """ + if not isclass(objecttype): + return False + + if not issubclass(objecttype, ObjectType): + return False + + return any(issubclass(i, Node) for i in objecttype._meta.interfaces) + + +class GlobalID(Field): + def __init__( + self, + node=None, + parent_type=None, + required=True, + global_id_type=DefaultGlobalIDType, + *args, + **kwargs, + ): + super(GlobalID, self).__init__( + global_id_type.graphene_type, required=required, *args, **kwargs + ) + self.node = node or Node + self.parent_type_name = parent_type._meta.name if parent_type else None + + @staticmethod + def id_resolver(parent_resolver, node, root, info, parent_type_name=None, **args): + type_id = parent_resolver(root, info, **args) + parent_type_name = parent_type_name or info.parent_type.name + return node.to_global_id(parent_type_name, type_id) # root._meta.name + + def wrap_resolve(self, parent_resolver): + return partial( + self.id_resolver, + parent_resolver, + self.node, + parent_type_name=self.parent_type_name, + ) + + +class NodeField(Field): + def __init__(self, node, type_=False, **kwargs): + assert issubclass(node, Node), "NodeField can only operate in Nodes" + self.node_type = node + self.field_type = type_ + global_id_type = node._meta.global_id_type + + super(NodeField, self).__init__( + # If we don't specify a type, the field type will be the node interface + type_ or node, + id=global_id_type.graphene_type( + required=True, description="The ID of the object" + ), + **kwargs, + ) + + def wrap_resolve(self, parent_resolver): + return partial(self.node_type.node_resolver, get_type(self.field_type)) + + +class AbstractNode(Interface): + class Meta: + abstract = True + + @classmethod + def __init_subclass_with_meta__(cls, global_id_type=DefaultGlobalIDType, **options): + assert issubclass( + global_id_type, BaseGlobalIDType + ), "Custom ID type need to be implemented as a subclass of BaseGlobalIDType." + _meta = InterfaceOptions(cls) + _meta.global_id_type = global_id_type + _meta.fields = { + "id": GlobalID( + cls, global_id_type=global_id_type, description="The ID of the object" + ) + } + super(AbstractNode, cls).__init_subclass_with_meta__(_meta=_meta, **options) + + @classmethod + def resolve_global_id(cls, info, global_id): + return cls._meta.global_id_type.resolve_global_id(info, global_id) + + +class Node(AbstractNode): + """An object with an ID""" + + @classmethod + def Field(cls, *args, **kwargs): # noqa: N802 + return NodeField(cls, *args, **kwargs) + + @classmethod + def node_resolver(cls, only_type, root, info, id): + return cls.get_node_from_global_id(info, id, only_type=only_type) + + @classmethod + def get_node_from_global_id(cls, info, global_id, only_type=None): + _type, _id = cls.resolve_global_id(info, global_id) + + graphene_type = info.schema.get_type(_type) + if graphene_type is None: + raise Exception(f'Relay Node "{_type}" not found in schema') + + graphene_type = graphene_type.graphene_type + + if only_type: + assert ( + graphene_type == only_type + ), f"Must receive a {only_type._meta.name} id." + + # We make sure the ObjectType implements the "Node" interface + if cls not in graphene_type._meta.interfaces: + raise Exception( + f'ObjectType "{_type}" does not implement the "{cls}" interface.' + ) + + get_node = getattr(graphene_type, "get_node", None) + if get_node: + return get_node(info, _id) + + @classmethod + def to_global_id(cls, type_, id): + return cls._meta.global_id_type.to_global_id(type_, id) diff --git a/testbed/graphql-python__graphene/graphene/relay/tests/__init__.py b/testbed/graphql-python__graphene/graphene/relay/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/graphene/relay/tests/test_connection.py b/testbed/graphql-python__graphene/graphene/relay/tests/test_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..d45eea960168051c5aa579e3d96f35a6bc4e7abc --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/tests/test_connection.py @@ -0,0 +1,301 @@ +import re + +from pytest import raises + +from ...types import Argument, Field, Int, List, NonNull, ObjectType, Schema, String +from ..connection import ( + Connection, + ConnectionField, + PageInfo, + ConnectionOptions, + get_edge_class, +) +from ..node import Node + + +class MyObject(ObjectType): + class Meta: + interfaces = [Node] + + field = String() + + +def test_connection(): + class MyObjectConnection(Connection): + extra = String() + + class Meta: + node = MyObject + + class Edge: + other = String() + + assert MyObjectConnection._meta.name == "MyObjectConnection" + fields = MyObjectConnection._meta.fields + assert list(fields) == ["page_info", "edges", "extra"] + edge_field = fields["edges"] + pageinfo_field = fields["page_info"] + + assert isinstance(edge_field, Field) + assert isinstance(edge_field.type, NonNull) + assert isinstance(edge_field.type.of_type, List) + assert edge_field.type.of_type.of_type == MyObjectConnection.Edge + + assert isinstance(pageinfo_field, Field) + assert isinstance(pageinfo_field.type, NonNull) + assert pageinfo_field.type.of_type == PageInfo + + +def test_connection_inherit_abstracttype(): + class BaseConnection: + extra = String() + + class MyObjectConnection(BaseConnection, Connection): + class Meta: + node = MyObject + + assert MyObjectConnection._meta.name == "MyObjectConnection" + fields = MyObjectConnection._meta.fields + assert list(fields) == ["page_info", "edges", "extra"] + + +def test_connection_extra_abstract_fields(): + class ConnectionWithNodes(Connection): + class Meta: + abstract = True + + @classmethod + def __init_subclass_with_meta__(cls, node=None, name=None, **options): + _meta = ConnectionOptions(cls) + + _meta.fields = { + "nodes": Field( + NonNull(List(node)), + description="Contains all the nodes in this connection.", + ), + } + + return super(ConnectionWithNodes, cls).__init_subclass_with_meta__( + node=node, name=name, _meta=_meta, **options + ) + + class MyObjectConnection(ConnectionWithNodes): + class Meta: + node = MyObject + + class Edge: + other = String() + + assert MyObjectConnection._meta.name == "MyObjectConnection" + fields = MyObjectConnection._meta.fields + assert list(fields) == ["nodes", "page_info", "edges"] + edge_field = fields["edges"] + pageinfo_field = fields["page_info"] + nodes_field = fields["nodes"] + + assert isinstance(edge_field, Field) + assert isinstance(edge_field.type, NonNull) + assert isinstance(edge_field.type.of_type, List) + assert edge_field.type.of_type.of_type == MyObjectConnection.Edge + + assert isinstance(pageinfo_field, Field) + assert isinstance(pageinfo_field.type, NonNull) + assert pageinfo_field.type.of_type == PageInfo + + assert isinstance(nodes_field, Field) + assert isinstance(nodes_field.type, NonNull) + assert isinstance(nodes_field.type.of_type, List) + assert nodes_field.type.of_type.of_type == MyObject + + +def test_connection_override_fields(): + class ConnectionWithNodes(Connection): + class Meta: + abstract = True + + @classmethod + def __init_subclass_with_meta__(cls, node=None, name=None, **options): + _meta = ConnectionOptions(cls) + base_name = ( + re.sub("Connection$", "", name or cls.__name__) or node._meta.name + ) + + edge_class = get_edge_class(cls, node, base_name) + + _meta.fields = { + "page_info": Field( + NonNull( + PageInfo, + name="pageInfo", + required=True, + description="Pagination data for this connection.", + ) + ), + "edges": Field( + NonNull(List(NonNull(edge_class))), + description="Contains the nodes in this connection.", + ), + } + + return super(ConnectionWithNodes, cls).__init_subclass_with_meta__( + node=node, name=name, _meta=_meta, **options + ) + + class MyObjectConnection(ConnectionWithNodes): + class Meta: + node = MyObject + + assert MyObjectConnection._meta.name == "MyObjectConnection" + fields = MyObjectConnection._meta.fields + assert list(fields) == ["page_info", "edges"] + edge_field = fields["edges"] + pageinfo_field = fields["page_info"] + + assert isinstance(edge_field, Field) + assert isinstance(edge_field.type, NonNull) + assert isinstance(edge_field.type.of_type, List) + assert isinstance(edge_field.type.of_type.of_type, NonNull) + + assert edge_field.type.of_type.of_type.of_type.__name__ == "MyObjectEdge" + + # This page info is NonNull + assert isinstance(pageinfo_field, Field) + assert isinstance(edge_field.type, NonNull) + assert pageinfo_field.type.of_type == PageInfo + + +def test_connection_name(): + custom_name = "MyObjectCustomNameConnection" + + class BaseConnection: + extra = String() + + class MyObjectConnection(BaseConnection, Connection): + class Meta: + node = MyObject + name = custom_name + + assert MyObjectConnection._meta.name == custom_name + + +def test_edge(): + class MyObjectConnection(Connection): + class Meta: + node = MyObject + + class Edge: + other = String() + + Edge = MyObjectConnection.Edge + assert Edge._meta.name == "MyObjectEdge" + edge_fields = Edge._meta.fields + assert list(edge_fields) == ["node", "cursor", "other"] + + assert isinstance(edge_fields["node"], Field) + assert edge_fields["node"].type == MyObject + + assert isinstance(edge_fields["other"], Field) + assert edge_fields["other"].type == String + + +def test_edge_with_bases(): + class BaseEdge: + extra = String() + + class MyObjectConnection(Connection): + class Meta: + node = MyObject + + class Edge(BaseEdge): + other = String() + + Edge = MyObjectConnection.Edge + assert Edge._meta.name == "MyObjectEdge" + edge_fields = Edge._meta.fields + assert list(edge_fields) == ["node", "cursor", "extra", "other"] + + assert isinstance(edge_fields["node"], Field) + assert edge_fields["node"].type == MyObject + + assert isinstance(edge_fields["other"], Field) + assert edge_fields["other"].type == String + + +def test_edge_with_nonnull_node(): + class MyObjectConnection(Connection): + class Meta: + node = NonNull(MyObject) + + edge_fields = MyObjectConnection.Edge._meta.fields + assert isinstance(edge_fields["node"], Field) + assert isinstance(edge_fields["node"].type, NonNull) + assert edge_fields["node"].type.of_type == MyObject + + +def test_pageinfo(): + assert PageInfo._meta.name == "PageInfo" + fields = PageInfo._meta.fields + assert list(fields) == [ + "has_next_page", + "has_previous_page", + "start_cursor", + "end_cursor", + ] + + +def test_connectionfield(): + class MyObjectConnection(Connection): + class Meta: + node = MyObject + + field = ConnectionField(MyObjectConnection) + assert field.args == { + "before": Argument(String), + "after": Argument(String), + "first": Argument(Int), + "last": Argument(Int), + } + + +def test_connectionfield_node_deprecated(): + field = ConnectionField(MyObject) + with raises(Exception) as exc_info: + field.type + + assert "ConnectionFields now need a explicit ConnectionType for Nodes." in str( + exc_info.value + ) + + +def test_connectionfield_custom_args(): + class MyObjectConnection(Connection): + class Meta: + node = MyObject + + field = ConnectionField( + MyObjectConnection, before=String(required=True), extra=String() + ) + assert field.args == { + "before": Argument(NonNull(String)), + "after": Argument(String), + "first": Argument(Int), + "last": Argument(Int), + "extra": Argument(String), + } + + +def test_connectionfield_required(): + class MyObjectConnection(Connection): + class Meta: + node = MyObject + + class Query(ObjectType): + test_connection = ConnectionField(MyObjectConnection, required=True) + + def resolve_test_connection(root, info, **args): + return [] + + schema = Schema(query=Query) + executed = schema.execute("{ testConnection { edges { cursor } } }") + assert not executed.errors + assert executed.data == {"testConnection": {"edges": []}} diff --git a/testbed/graphql-python__graphene/graphene/relay/tests/test_connection_async.py b/testbed/graphql-python__graphene/graphene/relay/tests/test_connection_async.py new file mode 100644 index 0000000000000000000000000000000000000000..ae228cf9a76c57c10b8b60a3c74fb529d733df80 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/tests/test_connection_async.py @@ -0,0 +1,121 @@ +from pytest import mark + +from graphql_relay.utils import base64 + +from graphene.types import ObjectType, Schema, String +from graphene.relay.connection import Connection, ConnectionField, PageInfo +from graphene.relay.node import Node + +letter_chars = ["A", "B", "C", "D", "E"] + + +class Letter(ObjectType): + class Meta: + interfaces = (Node,) + + letter = String() + + +class LetterConnection(Connection): + class Meta: + node = Letter + + +class Query(ObjectType): + letters = ConnectionField(LetterConnection) + connection_letters = ConnectionField(LetterConnection) + async_letters = ConnectionField(LetterConnection) + + node = Node.Field() + + def resolve_letters(self, info, **args): + return list(letters.values()) + + async def resolve_async_letters(self, info, **args): + return list(letters.values()) + + def resolve_connection_letters(self, info, **args): + return LetterConnection( + page_info=PageInfo(has_next_page=True, has_previous_page=False), + edges=[ + LetterConnection.Edge(node=Letter(id=0, letter="A"), cursor="a-cursor") + ], + ) + + +schema = Schema(Query) + +letters = {letter: Letter(id=i, letter=letter) for i, letter in enumerate(letter_chars)} + + +def edges(selected_letters): + return [ + { + "node": {"id": base64("Letter:%s" % letter.id), "letter": letter.letter}, + "cursor": base64("arrayconnection:%s" % letter.id), + } + for letter in [letters[i] for i in selected_letters] + ] + + +def cursor_for(ltr): + letter = letters[ltr] + return base64("arrayconnection:%s" % letter.id) + + +def execute(args=""): + if args: + args = "(" + args + ")" + + return schema.execute( + """ + { + letters%s { + edges { + node { + id + letter + } + cursor + } + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + } + } + """ + % args + ) + + +@mark.asyncio +async def test_connection_async(): + result = await schema.execute_async( + """ + { + asyncLetters(first:1) { + edges { + node { + id + letter + } + } + pageInfo { + hasPreviousPage + hasNextPage + } + } + } + """ + ) + + assert not result.errors + assert result.data == { + "asyncLetters": { + "edges": [{"node": {"id": "TGV0dGVyOjA=", "letter": "A"}}], + "pageInfo": {"hasPreviousPage": False, "hasNextPage": True}, + } + } diff --git a/testbed/graphql-python__graphene/graphene/relay/tests/test_connection_query.py b/testbed/graphql-python__graphene/graphene/relay/tests/test_connection_query.py new file mode 100644 index 0000000000000000000000000000000000000000..b697c462a2df9f6adb52e2da7b24a65db569271a --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/tests/test_connection_query.py @@ -0,0 +1,286 @@ +from pytest import mark + +from graphql_relay.utils import base64 + +from ...types import ObjectType, Schema, String +from ..connection import Connection, ConnectionField, PageInfo +from ..node import Node + +letter_chars = ["A", "B", "C", "D", "E"] + + +class Letter(ObjectType): + class Meta: + interfaces = (Node,) + + letter = String() + + +class LetterConnection(Connection): + class Meta: + node = Letter + + +class Query(ObjectType): + letters = ConnectionField(LetterConnection) + connection_letters = ConnectionField(LetterConnection) + async_letters = ConnectionField(LetterConnection) + + node = Node.Field() + + def resolve_letters(self, info, **args): + return list(letters.values()) + + async def resolve_async_letters(self, info, **args): + return list(letters.values()) + + def resolve_connection_letters(self, info, **args): + return LetterConnection( + page_info=PageInfo(has_next_page=True, has_previous_page=False), + edges=[ + LetterConnection.Edge(node=Letter(id=0, letter="A"), cursor="a-cursor") + ], + ) + + +schema = Schema(Query) + +letters = {letter: Letter(id=i, letter=letter) for i, letter in enumerate(letter_chars)} + + +def edges(selected_letters): + return [ + { + "node": {"id": base64("Letter:%s" % letter.id), "letter": letter.letter}, + "cursor": base64("arrayconnection:%s" % letter.id), + } + for letter in [letters[i] for i in selected_letters] + ] + + +def cursor_for(ltr): + letter = letters[ltr] + return base64("arrayconnection:%s" % letter.id) + + +async def execute(args=""): + if args: + args = "(" + args + ")" + return await schema.execute_async( + """ + { + letters%s { + edges { + node { + id + letter + } + cursor + } + pageInfo { + hasPreviousPage + hasNextPage + startCursor + endCursor + } + } + } + """ + % args + ) + + +async def check(args, letters, has_previous_page=False, has_next_page=False): + result = await execute(args) + expected_edges = edges(letters) + expected_page_info = { + "hasPreviousPage": has_previous_page, + "hasNextPage": has_next_page, + "endCursor": expected_edges[-1]["cursor"] if expected_edges else None, + "startCursor": expected_edges[0]["cursor"] if expected_edges else None, + } + + assert not result.errors + assert result.data == { + "letters": {"edges": expected_edges, "pageInfo": expected_page_info} + } + + +@mark.asyncio +async def test_returns_all_elements_without_filters(): + await check("", "ABCDE") + + +@mark.asyncio +async def test_respects_a_smaller_first(): + await check("first: 2", "AB", has_next_page=True) + + +@mark.asyncio +async def test_respects_an_overly_large_first(): + await check("first: 10", "ABCDE") + + +@mark.asyncio +async def test_respects_a_smaller_last(): + await check("last: 2", "DE", has_previous_page=True) + + +@mark.asyncio +async def test_respects_an_overly_large_last(): + await check("last: 10", "ABCDE") + + +@mark.asyncio +async def test_respects_first_and_after(): + await check(f'first: 2, after: "{cursor_for("B")}"', "CD", has_next_page=True) + + +@mark.asyncio +async def test_respects_first_and_after_with_long_first(): + await check(f'first: 10, after: "{cursor_for("B")}"', "CDE") + + +@mark.asyncio +async def test_respects_last_and_before(): + await check(f'last: 2, before: "{cursor_for("D")}"', "BC", has_previous_page=True) + + +@mark.asyncio +async def test_respects_last_and_before_with_long_last(): + await check(f'last: 10, before: "{cursor_for("D")}"', "ABC") + + +@mark.asyncio +async def test_respects_first_and_after_and_before_too_few(): + await check( + f'first: 2, after: "{cursor_for("A")}", before: "{cursor_for("E")}"', + "BC", + has_next_page=True, + ) + + +@mark.asyncio +async def test_respects_first_and_after_and_before_too_many(): + await check( + f'first: 4, after: "{cursor_for("A")}", before: "{cursor_for("E")}"', "BCD" + ) + + +@mark.asyncio +async def test_respects_first_and_after_and_before_exactly_right(): + await check( + f'first: 3, after: "{cursor_for("A")}", before: "{cursor_for("E")}"', "BCD" + ) + + +@mark.asyncio +async def test_respects_last_and_after_and_before_too_few(): + await check( + f'last: 2, after: "{cursor_for("A")}", before: "{cursor_for("E")}"', + "CD", + has_previous_page=True, + ) + + +@mark.asyncio +async def test_respects_last_and_after_and_before_too_many(): + await check( + f'last: 4, after: "{cursor_for("A")}", before: "{cursor_for("E")}"', "BCD" + ) + + +@mark.asyncio +async def test_respects_last_and_after_and_before_exactly_right(): + await check( + f'last: 3, after: "{cursor_for("A")}", before: "{cursor_for("E")}"', "BCD" + ) + + +@mark.asyncio +async def test_returns_no_elements_if_first_is_0(): + await check("first: 0", "", has_next_page=True) + + +@mark.asyncio +async def test_returns_all_elements_if_cursors_are_invalid(): + await check('before: "invalid" after: "invalid"', "ABCDE") + + +@mark.asyncio +async def test_returns_all_elements_if_cursors_are_on_the_outside(): + await check( + f'before: "{base64("arrayconnection:%s" % 6)}" after: "{base64("arrayconnection:%s" % -1)}"', + "ABCDE", + ) + + +@mark.asyncio +async def test_returns_no_elements_if_cursors_cross(): + await check( + f'before: "{base64("arrayconnection:%s" % 2)}" after: "{base64("arrayconnection:%s" % 4)}"', + "", + ) + + +@mark.asyncio +async def test_connection_type_nodes(): + result = await schema.execute_async( + """ + { + connectionLetters { + edges { + node { + id + letter + } + cursor + } + pageInfo { + hasPreviousPage + hasNextPage + } + } + } + """ + ) + + assert not result.errors + assert result.data == { + "connectionLetters": { + "edges": [ + {"node": {"id": "TGV0dGVyOjA=", "letter": "A"}, "cursor": "a-cursor"} + ], + "pageInfo": {"hasPreviousPage": False, "hasNextPage": True}, + } + } + + +@mark.asyncio +async def test_connection_async(): + result = await schema.execute_async( + """ + { + asyncLetters(first:1) { + edges { + node { + id + letter + } + } + pageInfo { + hasPreviousPage + hasNextPage + } + } + } + """ + ) + + assert not result.errors + assert result.data == { + "asyncLetters": { + "edges": [{"node": {"id": "TGV0dGVyOjA=", "letter": "A"}}], + "pageInfo": {"hasPreviousPage": False, "hasNextPage": True}, + } + } diff --git a/testbed/graphql-python__graphene/graphene/relay/tests/test_custom_global_id.py b/testbed/graphql-python__graphene/graphene/relay/tests/test_custom_global_id.py new file mode 100644 index 0000000000000000000000000000000000000000..c1bf0fb4b31917e34dccec6b5947403b08e2518a --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/tests/test_custom_global_id.py @@ -0,0 +1,325 @@ +import re +from uuid import uuid4 + +from graphql import graphql_sync + +from ..id_type import BaseGlobalIDType, SimpleGlobalIDType, UUIDGlobalIDType +from ..node import Node +from ...types import Int, ObjectType, Schema, String + + +class TestUUIDGlobalID: + def setup(self): + self.user_list = [ + {"id": uuid4(), "name": "First"}, + {"id": uuid4(), "name": "Second"}, + {"id": uuid4(), "name": "Third"}, + {"id": uuid4(), "name": "Fourth"}, + ] + self.users = {user["id"]: user for user in self.user_list} + + class CustomNode(Node): + class Meta: + global_id_type = UUIDGlobalIDType + + class User(ObjectType): + class Meta: + interfaces = [CustomNode] + + name = String() + + @classmethod + def get_node(cls, _type, _id): + return self.users[_id] + + class RootQuery(ObjectType): + user = CustomNode.Field(User) + + self.schema = Schema(query=RootQuery, types=[User]) + self.graphql_schema = self.schema.graphql_schema + + def test_str_schema_correct(self): + """ + Check that the schema has the expected and custom node interface and user type and that they both use UUIDs + """ + parsed = re.findall(r"(.+) \{\n\s*([\w\W]*?)\n\}", str(self.schema)) + types = [t for t, f in parsed] + fields = [f for t, f in parsed] + custom_node_interface = "interface CustomNode" + assert custom_node_interface in types + assert ( + '"""The ID of the object"""\n id: UUID!' + == fields[types.index(custom_node_interface)] + ) + user_type = "type User implements CustomNode" + assert user_type in types + assert ( + '"""The ID of the object"""\n id: UUID!\n name: String' + == fields[types.index(user_type)] + ) + + def test_get_by_id(self): + query = """query userById($id: UUID!) { + user(id: $id) { + id + name + } + }""" + # UUID need to be converted to string for serialization + result = graphql_sync( + self.graphql_schema, + query, + variable_values={"id": str(self.user_list[0]["id"])}, + ) + assert not result.errors + assert result.data["user"]["id"] == str(self.user_list[0]["id"]) + assert result.data["user"]["name"] == self.user_list[0]["name"] + + +class TestSimpleGlobalID: + def setup(self): + self.user_list = [ + {"id": "my global primary key in clear 1", "name": "First"}, + {"id": "my global primary key in clear 2", "name": "Second"}, + {"id": "my global primary key in clear 3", "name": "Third"}, + {"id": "my global primary key in clear 4", "name": "Fourth"}, + ] + self.users = {user["id"]: user for user in self.user_list} + + class CustomNode(Node): + class Meta: + global_id_type = SimpleGlobalIDType + + class User(ObjectType): + class Meta: + interfaces = [CustomNode] + + name = String() + + @classmethod + def get_node(cls, _type, _id): + return self.users[_id] + + class RootQuery(ObjectType): + user = CustomNode.Field(User) + + self.schema = Schema(query=RootQuery, types=[User]) + self.graphql_schema = self.schema.graphql_schema + + def test_str_schema_correct(self): + """ + Check that the schema has the expected and custom node interface and user type and that they both use UUIDs + """ + parsed = re.findall(r"(.+) \{\n\s*([\w\W]*?)\n\}", str(self.schema)) + types = [t for t, f in parsed] + fields = [f for t, f in parsed] + custom_node_interface = "interface CustomNode" + assert custom_node_interface in types + assert ( + '"""The ID of the object"""\n id: ID!' + == fields[types.index(custom_node_interface)] + ) + user_type = "type User implements CustomNode" + assert user_type in types + assert ( + '"""The ID of the object"""\n id: ID!\n name: String' + == fields[types.index(user_type)] + ) + + def test_get_by_id(self): + query = """query { + user(id: "my global primary key in clear 3") { + id + name + } + }""" + result = graphql_sync(self.graphql_schema, query) + assert not result.errors + assert result.data["user"]["id"] == self.user_list[2]["id"] + assert result.data["user"]["name"] == self.user_list[2]["name"] + + +class TestCustomGlobalID: + def setup(self): + self.user_list = [ + {"id": 1, "name": "First"}, + {"id": 2, "name": "Second"}, + {"id": 3, "name": "Third"}, + {"id": 4, "name": "Fourth"}, + ] + self.users = {user["id"]: user for user in self.user_list} + + class CustomGlobalIDType(BaseGlobalIDType): + """ + Global id that is simply and integer in clear. + """ + + graphene_type = Int + + @classmethod + def resolve_global_id(cls, info, global_id): + _type = info.return_type.graphene_type._meta.name + return _type, global_id + + @classmethod + def to_global_id(cls, _type, _id): + return _id + + class CustomNode(Node): + class Meta: + global_id_type = CustomGlobalIDType + + class User(ObjectType): + class Meta: + interfaces = [CustomNode] + + name = String() + + @classmethod + def get_node(cls, _type, _id): + return self.users[_id] + + class RootQuery(ObjectType): + user = CustomNode.Field(User) + + self.schema = Schema(query=RootQuery, types=[User]) + self.graphql_schema = self.schema.graphql_schema + + def test_str_schema_correct(self): + """ + Check that the schema has the expected and custom node interface and user type and that they both use UUIDs + """ + parsed = re.findall(r"(.+) \{\n\s*([\w\W]*?)\n\}", str(self.schema)) + types = [t for t, f in parsed] + fields = [f for t, f in parsed] + custom_node_interface = "interface CustomNode" + assert custom_node_interface in types + assert ( + '"""The ID of the object"""\n id: Int!' + == fields[types.index(custom_node_interface)] + ) + user_type = "type User implements CustomNode" + assert user_type in types + assert ( + '"""The ID of the object"""\n id: Int!\n name: String' + == fields[types.index(user_type)] + ) + + def test_get_by_id(self): + query = """query { + user(id: 2) { + id + name + } + }""" + result = graphql_sync(self.graphql_schema, query) + assert not result.errors + assert result.data["user"]["id"] == self.user_list[1]["id"] + assert result.data["user"]["name"] == self.user_list[1]["name"] + + +class TestIncompleteCustomGlobalID: + def setup(self): + self.user_list = [ + {"id": 1, "name": "First"}, + {"id": 2, "name": "Second"}, + {"id": 3, "name": "Third"}, + {"id": 4, "name": "Fourth"}, + ] + self.users = {user["id"]: user for user in self.user_list} + + def test_must_define_to_global_id(self): + """ + Test that if the `to_global_id` method is not defined, we can query the object, but we can't request its ID. + """ + + class CustomGlobalIDType(BaseGlobalIDType): + graphene_type = Int + + @classmethod + def resolve_global_id(cls, info, global_id): + _type = info.return_type.graphene_type._meta.name + return _type, global_id + + class CustomNode(Node): + class Meta: + global_id_type = CustomGlobalIDType + + class User(ObjectType): + class Meta: + interfaces = [CustomNode] + + name = String() + + @classmethod + def get_node(cls, _type, _id): + return self.users[_id] + + class RootQuery(ObjectType): + user = CustomNode.Field(User) + + self.schema = Schema(query=RootQuery, types=[User]) + self.graphql_schema = self.schema.graphql_schema + + query = """query { + user(id: 2) { + name + } + }""" + result = graphql_sync(self.graphql_schema, query) + assert not result.errors + assert result.data["user"]["name"] == self.user_list[1]["name"] + + query = """query { + user(id: 2) { + id + name + } + }""" + result = graphql_sync(self.graphql_schema, query) + assert result.errors is not None + assert len(result.errors) == 1 + assert result.errors[0].path == ["user", "id"] + + def test_must_define_resolve_global_id(self): + """ + Test that if the `resolve_global_id` method is not defined, we can't query the object by ID. + """ + + class CustomGlobalIDType(BaseGlobalIDType): + graphene_type = Int + + @classmethod + def to_global_id(cls, _type, _id): + return _id + + class CustomNode(Node): + class Meta: + global_id_type = CustomGlobalIDType + + class User(ObjectType): + class Meta: + interfaces = [CustomNode] + + name = String() + + @classmethod + def get_node(cls, _type, _id): + return self.users[_id] + + class RootQuery(ObjectType): + user = CustomNode.Field(User) + + self.schema = Schema(query=RootQuery, types=[User]) + self.graphql_schema = self.schema.graphql_schema + + query = """query { + user(id: 2) { + id + name + } + }""" + result = graphql_sync(self.graphql_schema, query) + assert result.errors is not None + assert len(result.errors) == 1 + assert result.errors[0].path == ["user"] diff --git a/testbed/graphql-python__graphene/graphene/relay/tests/test_global_id.py b/testbed/graphql-python__graphene/graphene/relay/tests/test_global_id.py new file mode 100644 index 0000000000000000000000000000000000000000..81860d9dcd5d83dd4178fec3f24a1e3420a4d15c --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/tests/test_global_id.py @@ -0,0 +1,58 @@ +from graphql_relay import to_global_id + +from ...types import ID, NonNull, ObjectType, String +from ...types.definitions import GrapheneObjectType +from ..node import GlobalID, Node + + +class CustomNode(Node): + class Meta: + name = "Node" + + +class User(ObjectType): + class Meta: + interfaces = [CustomNode] + + name = String() + + +class Info: + def __init__(self, parent_type): + self.parent_type = GrapheneObjectType( + graphene_type=parent_type, + name=parent_type._meta.name, + description=parent_type._meta.description, + fields=None, + is_type_of=parent_type.is_type_of, + interfaces=None, + ) + + +def test_global_id_defaults_to_required_and_node(): + gid = GlobalID() + assert isinstance(gid.type, NonNull) + assert gid.type.of_type == ID + assert gid.node == Node + + +def test_global_id_allows_overriding_of_node_and_required(): + gid = GlobalID(node=CustomNode, required=False) + assert gid.type == ID + assert gid.node == CustomNode + + +def test_global_id_defaults_to_info_parent_type(): + my_id = "1" + gid = GlobalID() + id_resolver = gid.wrap_resolve(lambda *_: my_id) + my_global_id = id_resolver(None, Info(User)) + assert my_global_id == to_global_id(User._meta.name, my_id) + + +def test_global_id_allows_setting_customer_parent_type(): + my_id = "1" + gid = GlobalID(parent_type=User) + id_resolver = gid.wrap_resolve(lambda *_: my_id) + my_global_id = id_resolver(None, None) + assert my_global_id == to_global_id(User._meta.name, my_id) diff --git a/testbed/graphql-python__graphene/graphene/relay/tests/test_mutation.py b/testbed/graphql-python__graphene/graphene/relay/tests/test_mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..e079ab4ea702763692e44869be67ba73fb8d7750 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/tests/test_mutation.py @@ -0,0 +1,206 @@ +from pytest import mark, raises + +from ...types import ( + ID, + Argument, + Field, + InputField, + InputObjectType, + NonNull, + ObjectType, + Schema, +) +from ...types.scalars import String +from ..mutation import ClientIDMutation + + +class SharedFields: + shared = String() + + +class MyNode(ObjectType): + # class Meta: + # interfaces = (Node, ) + id = ID() + name = String() + + +class SaySomething(ClientIDMutation): + class Input: + what = String() + + phrase = String() + + @staticmethod + def mutate_and_get_payload(self, info, what, client_mutation_id=None): + return SaySomething(phrase=str(what)) + + +class FixedSaySomething: + __slots__ = ("phrase",) + + def __init__(self, phrase): + self.phrase = phrase + + +class SaySomethingFixed(ClientIDMutation): + class Input: + what = String() + + phrase = String() + + @staticmethod + def mutate_and_get_payload(self, info, what, client_mutation_id=None): + return FixedSaySomething(phrase=str(what)) + + +class SaySomethingAsync(ClientIDMutation): + class Input: + what = String() + + phrase = String() + + @staticmethod + async def mutate_and_get_payload(self, info, what, client_mutation_id=None): + return SaySomething(phrase=str(what)) + + +# MyEdge = MyNode.Connection.Edge +class MyEdge(ObjectType): + node = Field(MyNode) + cursor = String() + + +class OtherMutation(ClientIDMutation): + class Input(SharedFields): + additional_field = String() + + name = String() + my_node_edge = Field(MyEdge) + + @staticmethod + def mutate_and_get_payload( + self, info, shared="", additional_field="", client_mutation_id=None + ): + edge_type = MyEdge + return OtherMutation( + name=shared + additional_field, + my_node_edge=edge_type(cursor="1", node=MyNode(name="name")), + ) + + +class RootQuery(ObjectType): + something = String() + + +class Mutation(ObjectType): + say = SaySomething.Field() + say_fixed = SaySomethingFixed.Field() + say_async = SaySomethingAsync.Field() + other = OtherMutation.Field() + + +schema = Schema(query=RootQuery, mutation=Mutation) + + +def test_no_mutate_and_get_payload(): + with raises(AssertionError) as excinfo: + + class MyMutation(ClientIDMutation): + pass + + assert ( + "MyMutation.mutate_and_get_payload method is required in a ClientIDMutation." + == str(excinfo.value) + ) + + +def test_mutation(): + fields = SaySomething._meta.fields + assert list(fields) == ["phrase", "client_mutation_id"] + assert SaySomething._meta.name == "SaySomethingPayload" + assert isinstance(fields["phrase"], Field) + field = SaySomething.Field() + assert field.type == SaySomething + assert list(field.args) == ["input"] + assert isinstance(field.args["input"], Argument) + assert isinstance(field.args["input"].type, NonNull) + assert field.args["input"].type.of_type == SaySomething.Input + assert isinstance(fields["client_mutation_id"], Field) + assert fields["client_mutation_id"].name == "clientMutationId" + assert fields["client_mutation_id"].type == String + + +def test_mutation_input(): + Input = SaySomething.Input + assert issubclass(Input, InputObjectType) + fields = Input._meta.fields + assert list(fields) == ["what", "client_mutation_id"] + assert isinstance(fields["what"], InputField) + assert fields["what"].type == String + assert isinstance(fields["client_mutation_id"], InputField) + assert fields["client_mutation_id"].type == String + + +def test_subclassed_mutation(): + fields = OtherMutation._meta.fields + assert list(fields) == ["name", "my_node_edge", "client_mutation_id"] + assert isinstance(fields["name"], Field) + field = OtherMutation.Field() + assert field.type == OtherMutation + assert list(field.args) == ["input"] + assert isinstance(field.args["input"], Argument) + assert isinstance(field.args["input"].type, NonNull) + assert field.args["input"].type.of_type == OtherMutation.Input + + +def test_subclassed_mutation_input(): + Input = OtherMutation.Input + assert issubclass(Input, InputObjectType) + fields = Input._meta.fields + assert list(fields) == ["shared", "additional_field", "client_mutation_id"] + assert isinstance(fields["shared"], InputField) + assert fields["shared"].type == String + assert isinstance(fields["additional_field"], InputField) + assert fields["additional_field"].type == String + assert isinstance(fields["client_mutation_id"], InputField) + assert fields["client_mutation_id"].type == String + + +def test_node_query(): + executed = schema.execute( + 'mutation a { say(input: {what:"hello", clientMutationId:"1"}) { phrase } }' + ) + assert not executed.errors + assert executed.data == {"say": {"phrase": "hello"}} + + +def test_node_query_fixed(): + executed = schema.execute( + 'mutation a { sayFixed(input: {what:"hello", clientMutationId:"1"}) { phrase } }' + ) + assert "Cannot set client_mutation_id in the payload object" in str( + executed.errors[0] + ) + + +@mark.asyncio +async def test_node_query_async(): + executed = await schema.execute_async( + 'mutation a { sayAsync(input: {what:"hello", clientMutationId:"1"}) { phrase } }' + ) + assert not executed.errors + assert executed.data == {"sayAsync": {"phrase": "hello"}} + + +def test_edge_query(): + executed = schema.execute( + 'mutation a { other(input: {clientMutationId:"1"}) { clientMutationId, myNodeEdge { cursor node { name }} } }' + ) + assert not executed.errors + assert dict(executed.data) == { + "other": { + "clientMutationId": "1", + "myNodeEdge": {"cursor": "1", "node": {"name": "name"}}, + } + } diff --git a/testbed/graphql-python__graphene/graphene/relay/tests/test_mutation_async.py b/testbed/graphql-python__graphene/graphene/relay/tests/test_mutation_async.py new file mode 100644 index 0000000000000000000000000000000000000000..bf61555de96f5c33467b0a7c7f71aa0900b8cb36 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/tests/test_mutation_async.py @@ -0,0 +1,90 @@ +from pytest import mark + +from graphene.types import ID, Field, ObjectType, Schema +from graphene.types.scalars import String +from graphene.relay.mutation import ClientIDMutation +from graphene.test import Client + + +class SharedFields(object): + shared = String() + + +class MyNode(ObjectType): + # class Meta: + # interfaces = (Node, ) + id = ID() + name = String() + + +class SaySomethingAsync(ClientIDMutation): + class Input: + what = String() + + phrase = String() + + @staticmethod + async def mutate_and_get_payload(self, info, what, client_mutation_id=None): + return SaySomethingAsync(phrase=str(what)) + + +# MyEdge = MyNode.Connection.Edge +class MyEdge(ObjectType): + node = Field(MyNode) + cursor = String() + + +class OtherMutation(ClientIDMutation): + class Input(SharedFields): + additional_field = String() + + name = String() + my_node_edge = Field(MyEdge) + + @staticmethod + def mutate_and_get_payload( + self, info, shared="", additional_field="", client_mutation_id=None + ): + edge_type = MyEdge + return OtherMutation( + name=shared + additional_field, + my_node_edge=edge_type(cursor="1", node=MyNode(name="name")), + ) + + +class RootQuery(ObjectType): + something = String() + + +class Mutation(ObjectType): + say_promise = SaySomethingAsync.Field() + other = OtherMutation.Field() + + +schema = Schema(query=RootQuery, mutation=Mutation) +client = Client(schema) + + +@mark.asyncio +async def test_node_query_promise(): + executed = await client.execute_async( + 'mutation a { sayPromise(input: {what:"hello", clientMutationId:"1"}) { phrase } }' + ) + assert isinstance(executed, dict) + assert "errors" not in executed + assert executed["data"] == {"sayPromise": {"phrase": "hello"}} + + +@mark.asyncio +async def test_edge_query(): + executed = await client.execute_async( + 'mutation a { other(input: {clientMutationId:"1"}) { clientMutationId, myNodeEdge { cursor node { name }} } }' + ) + assert isinstance(executed, dict) + assert "errors" not in executed + assert executed["data"] == { + "other": { + "clientMutationId": "1", + "myNodeEdge": {"cursor": "1", "node": {"name": "name"}}, + } + } diff --git a/testbed/graphql-python__graphene/graphene/relay/tests/test_node.py b/testbed/graphql-python__graphene/graphene/relay/tests/test_node.py new file mode 100644 index 0000000000000000000000000000000000000000..e75645664b2d77eb83dbf652bec373ef34d1a047 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/tests/test_node.py @@ -0,0 +1,220 @@ +import re +from textwrap import dedent + +from graphql_relay import to_global_id + +from ...types import ObjectType, Schema, String +from ..node import Node, is_node + + +class SharedNodeFields: + + shared = String() + something_else = String() + + def resolve_something_else(*_): + return "----" + + +class MyNode(ObjectType): + class Meta: + interfaces = (Node,) + + name = String() + + @staticmethod + def get_node(info, id): + return MyNode(name=str(id)) + + +class MyOtherNode(SharedNodeFields, ObjectType): + extra_field = String() + + class Meta: + interfaces = (Node,) + + def resolve_extra_field(self, *_): + return "extra field info." + + @staticmethod + def get_node(info, id): + return MyOtherNode(shared=str(id)) + + +class RootQuery(ObjectType): + first = String() + node = Node.Field() + only_node = Node.Field(MyNode) + only_node_lazy = Node.Field(lambda: MyNode) + + +schema = Schema(query=RootQuery, types=[MyNode, MyOtherNode]) + + +def test_node_good(): + assert "id" in MyNode._meta.fields + assert is_node(MyNode) + assert not is_node(object) + assert not is_node("node") + + +def test_node_query(): + executed = schema.execute( + '{ node(id:"%s") { ... on MyNode { name } } }' % Node.to_global_id("MyNode", 1) + ) + assert not executed.errors + assert executed.data == {"node": {"name": "1"}} + + +def test_subclassed_node_query(): + executed = schema.execute( + '{ node(id:"%s") { ... on MyOtherNode { shared, extraField, somethingElse } } }' + % to_global_id("MyOtherNode", 1) + ) + assert not executed.errors + assert executed.data == { + "node": { + "shared": "1", + "extraField": "extra field info.", + "somethingElse": "----", + } + } + + +def test_node_requesting_non_node(): + executed = schema.execute( + '{ node(id:"%s") { __typename } } ' % Node.to_global_id("RootQuery", 1) + ) + assert executed.errors + assert re.match( + r"ObjectType .* does not implement the .* interface.", + executed.errors[0].message, + ) + assert executed.data == {"node": None} + + +def test_node_requesting_unknown_type(): + executed = schema.execute( + '{ node(id:"%s") { __typename } } ' % Node.to_global_id("UnknownType", 1) + ) + assert executed.errors + assert re.match(r"Relay Node .* not found in schema", executed.errors[0].message) + assert executed.data == {"node": None} + + +def test_node_query_incorrect_id(): + executed = schema.execute( + '{ node(id:"%s") { ... on MyNode { name } } }' % "something:2" + ) + assert executed.errors + assert re.match(r"Unable to parse global ID .*", executed.errors[0].message) + assert executed.data == {"node": None} + + +def test_node_field(): + node_field = Node.Field() + assert node_field.type == Node + assert node_field.node_type == Node + + +def test_node_field_custom(): + node_field = Node.Field(MyNode) + assert node_field.type == MyNode + assert node_field.node_type == Node + + +def test_node_field_args(): + field_args = { + "name": "my_custom_name", + "description": "my_custom_description", + "deprecation_reason": "my_custom_deprecation_reason", + } + node_field = Node.Field(**field_args) + for field_arg, value in field_args.items(): + assert getattr(node_field, field_arg) == value + + +def test_node_field_only_type(): + executed = schema.execute( + '{ onlyNode(id:"%s") { __typename, name } } ' % Node.to_global_id("MyNode", 1) + ) + assert not executed.errors + assert executed.data == {"onlyNode": {"__typename": "MyNode", "name": "1"}} + + +def test_node_field_only_type_wrong(): + executed = schema.execute( + '{ onlyNode(id:"%s") { __typename, name } } ' + % Node.to_global_id("MyOtherNode", 1) + ) + assert len(executed.errors) == 1 + assert str(executed.errors[0]).startswith("Must receive a MyNode id.") + assert executed.data == {"onlyNode": None} + + +def test_node_field_only_lazy_type(): + executed = schema.execute( + '{ onlyNodeLazy(id:"%s") { __typename, name } } ' + % Node.to_global_id("MyNode", 1) + ) + assert not executed.errors + assert executed.data == {"onlyNodeLazy": {"__typename": "MyNode", "name": "1"}} + + +def test_node_field_only_lazy_type_wrong(): + executed = schema.execute( + '{ onlyNodeLazy(id:"%s") { __typename, name } } ' + % Node.to_global_id("MyOtherNode", 1) + ) + assert len(executed.errors) == 1 + assert str(executed.errors[0]).startswith("Must receive a MyNode id.") + assert executed.data == {"onlyNodeLazy": None} + + +def test_str_schema(): + assert ( + str(schema).strip() + == dedent( + ''' + schema { + query: RootQuery + } + + type MyNode implements Node { + """The ID of the object""" + id: ID! + name: String + } + + """An object with an ID""" + interface Node { + """The ID of the object""" + id: ID! + } + + type MyOtherNode implements Node { + """The ID of the object""" + id: ID! + shared: String + somethingElse: String + extraField: String + } + + type RootQuery { + first: String + node( + """The ID of the object""" + id: ID! + ): Node + onlyNode( + """The ID of the object""" + id: ID! + ): MyNode + onlyNodeLazy( + """The ID of the object""" + id: ID! + ): MyNode + } + ''' + ).strip() + ) diff --git a/testbed/graphql-python__graphene/graphene/relay/tests/test_node_custom.py b/testbed/graphql-python__graphene/graphene/relay/tests/test_node_custom.py new file mode 100644 index 0000000000000000000000000000000000000000..762e342464e46252141a3317ef6fb8483af944e8 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/relay/tests/test_node_custom.py @@ -0,0 +1,313 @@ +from textwrap import dedent + +from graphql import graphql_sync + +from ...types import Interface, ObjectType, Schema +from ...types.scalars import Int, String +from ..node import Node + + +class CustomNode(Node): + class Meta: + name = "Node" + + @staticmethod + def to_global_id(type_, id): + return id + + @staticmethod + def get_node_from_global_id(info, id, only_type=None): + assert info.schema is graphql_schema + if id in user_data: + return user_data.get(id) + else: + return photo_data.get(id) + + +class BasePhoto(Interface): + width = Int(description="The width of the photo in pixels") + + +class User(ObjectType): + class Meta: + interfaces = [CustomNode] + + name = String(description="The full name of the user") + + +class Photo(ObjectType): + class Meta: + interfaces = [CustomNode, BasePhoto] + + +user_data = {"1": User(id="1", name="John Doe"), "2": User(id="2", name="Jane Smith")} + +photo_data = {"3": Photo(id="3", width=300), "4": Photo(id="4", width=400)} + + +class RootQuery(ObjectType): + node = CustomNode.Field() + + +schema = Schema(query=RootQuery, types=[User, Photo]) +graphql_schema = schema.graphql_schema + + +def test_str_schema_correct(): + assert ( + str(schema).strip() + == dedent( + ''' + schema { + query: RootQuery + } + + type User implements Node { + """The ID of the object""" + id: ID! + + """The full name of the user""" + name: String + } + + interface Node { + """The ID of the object""" + id: ID! + } + + type Photo implements Node & BasePhoto { + """The ID of the object""" + id: ID! + + """The width of the photo in pixels""" + width: Int + } + + interface BasePhoto { + """The width of the photo in pixels""" + width: Int + } + + type RootQuery { + node( + """The ID of the object""" + id: ID! + ): Node + } + ''' + ).strip() + ) + + +def test_gets_the_correct_id_for_users(): + query = """ + { + node(id: "1") { + id + } + } + """ + expected = {"node": {"id": "1"}} + result = graphql_sync(graphql_schema, query) + assert not result.errors + assert result.data == expected + + +def test_gets_the_correct_id_for_photos(): + query = """ + { + node(id: "4") { + id + } + } + """ + expected = {"node": {"id": "4"}} + result = graphql_sync(graphql_schema, query) + assert not result.errors + assert result.data == expected + + +def test_gets_the_correct_name_for_users(): + query = """ + { + node(id: "1") { + id + ... on User { + name + } + } + } + """ + expected = {"node": {"id": "1", "name": "John Doe"}} + result = graphql_sync(graphql_schema, query) + assert not result.errors + assert result.data == expected + + +def test_gets_the_correct_width_for_photos(): + query = """ + { + node(id: "4") { + id + ... on Photo { + width + } + } + } + """ + expected = {"node": {"id": "4", "width": 400}} + result = graphql_sync(graphql_schema, query) + assert not result.errors + assert result.data == expected + + +def test_gets_the_correct_typename_for_users(): + query = """ + { + node(id: "1") { + id + __typename + } + } + """ + expected = {"node": {"id": "1", "__typename": "User"}} + result = graphql_sync(graphql_schema, query) + assert not result.errors + assert result.data == expected + + +def test_gets_the_correct_typename_for_photos(): + query = """ + { + node(id: "4") { + id + __typename + } + } + """ + expected = {"node": {"id": "4", "__typename": "Photo"}} + result = graphql_sync(graphql_schema, query) + assert not result.errors + assert result.data == expected + + +def test_ignores_photo_fragments_on_user(): + query = """ + { + node(id: "1") { + id + ... on Photo { + width + } + } + } + """ + expected = {"node": {"id": "1"}} + result = graphql_sync(graphql_schema, query) + assert not result.errors + assert result.data == expected + + +def test_returns_null_for_bad_ids(): + query = """ + { + node(id: "5") { + id + } + } + """ + expected = {"node": None} + result = graphql_sync(graphql_schema, query) + assert not result.errors + assert result.data == expected + + +def test_have_correct_node_interface(): + query = """ + { + __type(name: "Node") { + name + kind + fields { + name + type { + kind + ofType { + name + kind + } + } + } + } + } + """ + expected = { + "__type": { + "name": "Node", + "kind": "INTERFACE", + "fields": [ + { + "name": "id", + "type": { + "kind": "NON_NULL", + "ofType": {"name": "ID", "kind": "SCALAR"}, + }, + } + ], + } + } + result = graphql_sync(graphql_schema, query) + assert not result.errors + assert result.data == expected + + +def test_has_correct_node_root_field(): + query = """ + { + __schema { + queryType { + fields { + name + type { + name + kind + } + args { + name + type { + kind + ofType { + name + kind + } + } + } + } + } + } + } + """ + expected = { + "__schema": { + "queryType": { + "fields": [ + { + "name": "node", + "type": {"name": "Node", "kind": "INTERFACE"}, + "args": [ + { + "name": "id", + "type": { + "kind": "NON_NULL", + "ofType": {"name": "ID", "kind": "SCALAR"}, + }, + } + ], + } + ] + } + } + } + result = graphql_sync(graphql_schema, query) + assert not result.errors + assert result.data == expected diff --git a/testbed/graphql-python__graphene/graphene/test/__init__.py b/testbed/graphql-python__graphene/graphene/test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1813d9284008ae6cf23b1836531e8e65ed4533a9 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/test/__init__.py @@ -0,0 +1,39 @@ +from graphql.error import GraphQLError + +from graphene.types.schema import Schema + + +def default_format_error(error): + if isinstance(error, GraphQLError): + return error.formatted + return {"message": str(error)} + + +def format_execution_result(execution_result, format_error): + if execution_result: + response = {} + if execution_result.errors: + response["errors"] = [format_error(e) for e in execution_result.errors] + response["data"] = execution_result.data + return response + + +class Client: + def __init__(self, schema, format_error=None, **execute_options): + assert isinstance(schema, Schema) + self.schema = schema + self.execute_options = execute_options + self.format_error = format_error or default_format_error + + def format_result(self, result): + return format_execution_result(result, self.format_error) + + def execute(self, *args, **kwargs): + executed = self.schema.execute(*args, **dict(self.execute_options, **kwargs)) + return self.format_result(executed) + + async def execute_async(self, *args, **kwargs): + executed = await self.schema.execute_async( + *args, **dict(self.execute_options, **kwargs) + ) + return self.format_result(executed) diff --git a/testbed/graphql-python__graphene/graphene/tests/__init__.py b/testbed/graphql-python__graphene/graphene/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/graphene/tests/issues/__init__.py b/testbed/graphql-python__graphene/graphene/tests/issues/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/graphene/tests/issues/test_1394.py b/testbed/graphql-python__graphene/graphene/tests/issues/test_1394.py new file mode 100644 index 0000000000000000000000000000000000000000..39374381996bfc665becda97026ba781a21fd060 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/tests/issues/test_1394.py @@ -0,0 +1,36 @@ +from ...types import ObjectType, Schema, String, NonNull + + +class Query(ObjectType): + hello = String(input=NonNull(String)) + + def resolve_hello(self, info, input): + if input == "nothing": + return None + return f"Hello {input}!" + + +schema = Schema(query=Query) + + +def test_required_input_provided(): + """ + Test that a required argument works when provided. + """ + input_value = "Potato" + result = schema.execute('{ hello(input: "%s") }' % input_value) + assert not result.errors + assert result.data == {"hello": "Hello Potato!"} + + +def test_required_input_missing(): + """ + Test that a required argument raised an error if not provided. + """ + result = schema.execute("{ hello }") + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message + == "Field 'hello' argument 'input' of type 'String!' is required, but it was not provided." + ) diff --git a/testbed/graphql-python__graphene/graphene/tests/issues/test_1419.py b/testbed/graphql-python__graphene/graphene/tests/issues/test_1419.py new file mode 100644 index 0000000000000000000000000000000000000000..a08374daa57eb3182f9134114521e9b4c4c4efff --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/tests/issues/test_1419.py @@ -0,0 +1,53 @@ +import pytest + +from ...types.base64 import Base64 +from ...types.datetime import Date, DateTime +from ...types.decimal import Decimal +from ...types.generic import GenericScalar +from ...types.json import JSONString +from ...types.objecttype import ObjectType +from ...types.scalars import ID, BigInt, Boolean, Float, Int, String +from ...types.schema import Schema +from ...types.uuid import UUID + + +@pytest.mark.parametrize( + "input_type,input_value", + [ + (Date, '"2022-02-02"'), + (GenericScalar, '"foo"'), + (Int, "1"), + (BigInt, "12345678901234567890"), + (Float, "1.1"), + (String, '"foo"'), + (Boolean, "true"), + (ID, "1"), + (DateTime, '"2022-02-02T11:11:11"'), + (UUID, '"cbebbc62-758e-4f75-a890-bc73b5017d81"'), + (Decimal, '"1.1"'), + (JSONString, '"{\\"key\\":\\"foo\\",\\"value\\":\\"bar\\"}"'), + (Base64, '"Q2hlbG8gd29ycmxkCg=="'), + ], +) +def test_parse_literal_with_variables(input_type, input_value): + # input_b needs to be evaluated as literal while the variable dict for + # input_a is passed along. + + class Query(ObjectType): + generic = GenericScalar(input_a=GenericScalar(), input_b=input_type()) + + def resolve_generic(self, info, input_a=None, input_b=None): + return input + + schema = Schema(query=Query) + + query = f""" + query Test($a: GenericScalar){{ + generic(inputA: $a, inputB: {input_value}) + }} + """ + result = schema.execute( + query, + variables={"a": "bar"}, + ) + assert not result.errors diff --git a/testbed/graphql-python__graphene/graphene/tests/issues/test_313.py b/testbed/graphql-python__graphene/graphene/tests/issues/test_313.py new file mode 100644 index 0000000000000000000000000000000000000000..8082677a11aeda7a562834330d2c9ed3238a9c34 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/tests/issues/test_313.py @@ -0,0 +1,57 @@ +# https://github.com/graphql-python/graphene/issues/313 + +import graphene + + +class Query(graphene.ObjectType): + rand = graphene.String() + + +class Success(graphene.ObjectType): + yeah = graphene.String() + + +class Error(graphene.ObjectType): + message = graphene.String() + + +class CreatePostResult(graphene.Union): + class Meta: + types = [Success, Error] + + +class CreatePost(graphene.Mutation): + class Arguments: + text = graphene.String(required=True) + + result = graphene.Field(CreatePostResult) + + def mutate(self, info, text): + result = Success(yeah="yeah") + + return CreatePost(result=result) + + +class Mutations(graphene.ObjectType): + create_post = CreatePost.Field() + + +# tests.py + + +def test_create_post(): + query_string = """ + mutation { + createPost(text: "Try this out") { + result { + __typename + } + } + } + """ + + schema = graphene.Schema(query=Query, mutation=Mutations) + result = schema.execute(query_string) + + assert not result.errors + assert result.data["createPost"]["result"]["__typename"] == "Success" diff --git a/testbed/graphql-python__graphene/graphene/tests/issues/test_356.py b/testbed/graphql-python__graphene/graphene/tests/issues/test_356.py new file mode 100644 index 0000000000000000000000000000000000000000..480c5cd16c5a5b2f66492c58809bbc8fe1393c69 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/tests/issues/test_356.py @@ -0,0 +1,33 @@ +# https://github.com/graphql-python/graphene/issues/356 + +from pytest import raises + +import graphene +from graphene import relay + + +class SomeTypeOne(graphene.ObjectType): + pass + + +class SomeTypeTwo(graphene.ObjectType): + pass + + +class MyUnion(graphene.Union): + class Meta: + types = (SomeTypeOne, SomeTypeTwo) + + +def test_issue(): + class Query(graphene.ObjectType): + things = relay.ConnectionField(MyUnion) + + with raises(Exception) as exc_info: + graphene.Schema(query=Query) + + assert str(exc_info.value) == ( + "Query fields cannot be resolved." + " IterableConnectionField type has to be a subclass of Connection." + ' Received "MyUnion".' + ) diff --git a/testbed/graphql-python__graphene/graphene/tests/issues/test_425.py b/testbed/graphql-python__graphene/graphene/tests/issues/test_425.py new file mode 100644 index 0000000000000000000000000000000000000000..50c187585d7d95b6438ab1cd0da30809784d33c2 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/tests/issues/test_425.py @@ -0,0 +1,117 @@ +# https://github.com/graphql-python/graphene/issues/425 +# Adapted for Graphene 2.0 + +from graphene.types.enum import Enum, EnumOptions +from graphene.types.inputobjecttype import InputObjectType +from graphene.types.objecttype import ObjectType, ObjectTypeOptions + + +# ObjectType +class SpecialOptions(ObjectTypeOptions): + other_attr = None + + +class SpecialObjectType(ObjectType): + @classmethod + def __init_subclass_with_meta__(cls, other_attr="default", **options): + _meta = SpecialOptions(cls) + _meta.other_attr = other_attr + super(SpecialObjectType, cls).__init_subclass_with_meta__( + _meta=_meta, **options + ) + + +def test_special_objecttype_could_be_subclassed(): + class MyType(SpecialObjectType): + class Meta: + other_attr = "yeah!" + + assert MyType._meta.other_attr == "yeah!" + + +def test_special_objecttype_could_be_subclassed_default(): + class MyType(SpecialObjectType): + pass + + assert MyType._meta.other_attr == "default" + + +def test_special_objecttype_inherit_meta_options(): + class MyType(SpecialObjectType): + pass + + assert MyType._meta.name == "MyType" + assert MyType._meta.default_resolver is None + assert MyType._meta.interfaces == () + + +# InputObjectType +class SpecialInputObjectTypeOptions(ObjectTypeOptions): + other_attr = None + + +class SpecialInputObjectType(InputObjectType): + @classmethod + def __init_subclass_with_meta__(cls, other_attr="default", **options): + _meta = SpecialInputObjectTypeOptions(cls) + _meta.other_attr = other_attr + super(SpecialInputObjectType, cls).__init_subclass_with_meta__( + _meta=_meta, **options + ) + + +def test_special_inputobjecttype_could_be_subclassed(): + class MyInputObjectType(SpecialInputObjectType): + class Meta: + other_attr = "yeah!" + + assert MyInputObjectType._meta.other_attr == "yeah!" + + +def test_special_inputobjecttype_could_be_subclassed_default(): + class MyInputObjectType(SpecialInputObjectType): + pass + + assert MyInputObjectType._meta.other_attr == "default" + + +def test_special_inputobjecttype_inherit_meta_options(): + class MyInputObjectType(SpecialInputObjectType): + pass + + assert MyInputObjectType._meta.name == "MyInputObjectType" + + +# Enum +class SpecialEnumOptions(EnumOptions): + other_attr = None + + +class SpecialEnum(Enum): + @classmethod + def __init_subclass_with_meta__(cls, other_attr="default", **options): + _meta = SpecialEnumOptions(cls) + _meta.other_attr = other_attr + super(SpecialEnum, cls).__init_subclass_with_meta__(_meta=_meta, **options) + + +def test_special_enum_could_be_subclassed(): + class MyEnum(SpecialEnum): + class Meta: + other_attr = "yeah!" + + assert MyEnum._meta.other_attr == "yeah!" + + +def test_special_enum_could_be_subclassed_default(): + class MyEnum(SpecialEnum): + pass + + assert MyEnum._meta.other_attr == "default" + + +def test_special_enum_inherit_meta_options(): + class MyEnum(SpecialEnum): + pass + + assert MyEnum._meta.name == "MyEnum" diff --git a/testbed/graphql-python__graphene/graphene/tests/issues/test_490.py b/testbed/graphql-python__graphene/graphene/tests/issues/test_490.py new file mode 100644 index 0000000000000000000000000000000000000000..4453e2a66679ea9c3b30fad83ea09d0d9697295f --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/tests/issues/test_490.py @@ -0,0 +1,24 @@ +# https://github.com/graphql-python/graphene/issues/313 + +import graphene + + +class Query(graphene.ObjectType): + some_field = graphene.String(from_=graphene.String(name="from")) + + def resolve_some_field(self, info, from_=None): + return from_ + + +def test_issue(): + query_string = """ + query myQuery { + someField(from: "Oh") + } + """ + + schema = graphene.Schema(query=Query) + result = schema.execute(query_string) + + assert not result.errors + assert result.data["someField"] == "Oh" diff --git a/testbed/graphql-python__graphene/graphene/tests/issues/test_720.py b/testbed/graphql-python__graphene/graphene/tests/issues/test_720.py new file mode 100644 index 0000000000000000000000000000000000000000..ea961b25d75958e7af2bc0b4359961aca25e1bb9 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/tests/issues/test_720.py @@ -0,0 +1,44 @@ +# https://github.com/graphql-python/graphene/issues/720 +# InputObjectTypes overwrite the "fields" attribute of the provided +# _meta object, so even if dynamic fields are provided with a standard +# InputObjectTypeOptions, they are ignored. + +import graphene + + +class MyInputClass(graphene.InputObjectType): + @classmethod + def __init_subclass_with_meta__( + cls, container=None, _meta=None, fields=None, **options + ): + if _meta is None: + _meta = graphene.types.inputobjecttype.InputObjectTypeOptions(cls) + _meta.fields = fields + super(MyInputClass, cls).__init_subclass_with_meta__( + container=container, _meta=_meta, **options + ) + + +class MyInput(MyInputClass): + class Meta: + fields = dict(x=graphene.Field(graphene.Int)) + + +class Query(graphene.ObjectType): + myField = graphene.Field(graphene.String, input=graphene.Argument(MyInput)) + + def resolve_myField(parent, info, input): + return "ok" + + +def test_issue(): + query_string = """ + query myQuery { + myField(input: {x: 1}) + } + """ + + schema = graphene.Schema(query=Query) + result = schema.execute(query_string) + + assert not result.errors diff --git a/testbed/graphql-python__graphene/graphene/tests/issues/test_956.py b/testbed/graphql-python__graphene/graphene/tests/issues/test_956.py new file mode 100644 index 0000000000000000000000000000000000000000..72ff9713350c3ee2cddc7201876faa31a96c6f1c --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/tests/issues/test_956.py @@ -0,0 +1,8 @@ +import graphene + + +def test_issue(): + options = {"description": "This my enum", "deprecation_reason": "For the funs"} + new_enum = graphene.Enum("MyEnum", [("some", "data")], **options) + assert new_enum._meta.description == options["description"] + assert new_enum._meta.deprecation_reason == options["deprecation_reason"] diff --git a/testbed/graphql-python__graphene/graphene/types/__init__.py b/testbed/graphql-python__graphene/graphene/types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70478a05873912622dcd282b6487b11b9741fb8d --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/__init__.py @@ -0,0 +1,54 @@ +# flake8: noqa +from graphql import GraphQLResolveInfo as ResolveInfo + +from .argument import Argument +from .base64 import Base64 +from .context import Context +from .datetime import Date, DateTime, Time +from .decimal import Decimal +from .dynamic import Dynamic +from .enum import Enum +from .field import Field +from .inputfield import InputField +from .inputobjecttype import InputObjectType +from .interface import Interface +from .json import JSONString +from .mutation import Mutation +from .objecttype import ObjectType +from .scalars import ID, BigInt, Boolean, Float, Int, Scalar, String +from .schema import Schema +from .structures import List, NonNull +from .union import Union +from .uuid import UUID + +__all__ = [ + "Argument", + "Base64", + "BigInt", + "Boolean", + "Context", + "Date", + "DateTime", + "Decimal", + "Dynamic", + "Enum", + "Field", + "Float", + "ID", + "InputField", + "InputObjectType", + "Int", + "Interface", + "JSONString", + "List", + "Mutation", + "NonNull", + "ObjectType", + "ResolveInfo", + "Scalar", + "Schema", + "String", + "Time", + "UUID", + "Union", +] diff --git a/testbed/graphql-python__graphene/graphene/types/argument.py b/testbed/graphql-python__graphene/graphene/types/argument.py new file mode 100644 index 0000000000000000000000000000000000000000..d9283c4164e39c20d34a75946a326a3912621b2d --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/argument.py @@ -0,0 +1,120 @@ +from itertools import chain +from graphql import Undefined + +from .dynamic import Dynamic +from .mountedtype import MountedType +from .structures import NonNull +from .utils import get_type + + +class Argument(MountedType): + """ + Makes an Argument available on a Field in the GraphQL schema. + + Arguments will be parsed and provided to resolver methods for fields as keyword arguments. + + All ``arg`` and ``**extra_args`` for a ``graphene.Field`` are implicitly mounted as Argument + using the below parameters. + + .. code:: python + + from graphene import String, Boolean, Argument + + age = String( + # Boolean implicitly mounted as Argument + dog_years=Boolean(description="convert to dog years"), + # Boolean explicitly mounted as Argument + decades=Argument(Boolean, default_value=False), + ) + + args: + type (class for a graphene.UnmountedType): must be a class (not an instance) of an + unmounted graphene type (ex. scalar or object) which is used for the type of this + argument in the GraphQL schema. + required (optional, bool): indicates this argument as not null in the graphql schema. Same behavior + as graphene.NonNull. Default False. + name (optional, str): the name of the GraphQL argument. Defaults to parameter name. + description (optional, str): the description of the GraphQL argument in the schema. + default_value (optional, Any): The value to be provided if the user does not set this argument in + the operation. + deprecation_reason (optional, str): Setting this value indicates that the argument is + depreciated and may provide instruction or reason on how for clients to proceed. Cannot be + set if the argument is required (see spec). + """ + + def __init__( + self, + type_, + default_value=Undefined, + deprecation_reason=None, + description=None, + name=None, + required=False, + _creation_counter=None, + ): + super(Argument, self).__init__(_creation_counter=_creation_counter) + + if required: + assert ( + deprecation_reason is None + ), f"Argument {name} is required, cannot deprecate it." + type_ = NonNull(type_) + + self.name = name + self._type = type_ + self.default_value = default_value + self.description = description + self.deprecation_reason = deprecation_reason + + @property + def type(self): + return get_type(self._type) + + def __eq__(self, other): + return isinstance(other, Argument) and ( + self.name == other.name + and self.type == other.type + and self.default_value == other.default_value + and self.description == other.description + and self.deprecation_reason == other.deprecation_reason + ) + + +def to_arguments(args, extra_args=None): + from .unmountedtype import UnmountedType + from .field import Field + from .inputfield import InputField + + if extra_args: + extra_args = sorted(extra_args.items(), key=lambda f: f[1]) + else: + extra_args = [] + iter_arguments = chain(args.items(), extra_args) + arguments = {} + for default_name, arg in iter_arguments: + if isinstance(arg, Dynamic): + arg = arg.get_type() + if arg is None: + # If the Dynamic type returned None + # then we skip the Argument + continue + + if isinstance(arg, UnmountedType): + arg = Argument.mounted(arg) + + if isinstance(arg, (InputField, Field)): + raise ValueError( + f"Expected {default_name} to be Argument, " + f"but received {type(arg).__name__}. Try using Argument({arg.type})." + ) + + if not isinstance(arg, Argument): + raise ValueError(f'Unknown argument "{default_name}".') + + arg_name = default_name or arg.name + assert ( + arg_name not in arguments + ), f'More than one Argument have same name "{arg_name}".' + arguments[arg_name] = arg + + return arguments diff --git a/testbed/graphql-python__graphene/graphene/types/base.py b/testbed/graphql-python__graphene/graphene/types/base.py new file mode 100644 index 0000000000000000000000000000000000000000..84cb377a2f02a43860d9dbc9b8a1541a43048bb1 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/base.py @@ -0,0 +1,48 @@ +from typing import Type + +from ..utils.subclass_with_meta import SubclassWithMeta, SubclassWithMeta_Meta +from ..utils.trim_docstring import trim_docstring + + +class BaseOptions: + name = None # type: str + description = None # type: str + + _frozen = False # type: bool + + def __init__(self, class_type): + self.class_type = class_type # type: Type + + def freeze(self): + self._frozen = True + + def __setattr__(self, name, value): + if not self._frozen: + super(BaseOptions, self).__setattr__(name, value) + else: + raise Exception(f"Can't modify frozen Options {self}") + + def __repr__(self): + return f"<{self.__class__.__name__} name={repr(self.name)}>" + + +BaseTypeMeta = SubclassWithMeta_Meta + + +class BaseType(SubclassWithMeta): + @classmethod + def create_type(cls, class_name, **options): + return type(class_name, (cls,), {"Meta": options}) + + @classmethod + def __init_subclass_with_meta__( + cls, name=None, description=None, _meta=None, **_kwargs + ): + assert "_meta" not in cls.__dict__, "Can't assign meta directly" + if not _meta: + return + _meta.name = name or cls.__name__ + _meta.description = description or trim_docstring(cls.__doc__) + _meta.freeze() + cls._meta = _meta + super(BaseType, cls).__init_subclass_with_meta__() diff --git a/testbed/graphql-python__graphene/graphene/types/base64.py b/testbed/graphql-python__graphene/graphene/types/base64.py new file mode 100644 index 0000000000000000000000000000000000000000..69bb3380e37cbf6960e3bd28d858e23759883b7a --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/base64.py @@ -0,0 +1,43 @@ +from binascii import Error as _Error +from base64 import b64decode, b64encode + +from graphql.error import GraphQLError +from graphql.language import StringValueNode, print_ast + +from .scalars import Scalar + + +class Base64(Scalar): + """ + The `Base64` scalar type represents a base64-encoded String. + """ + + @staticmethod + def serialize(value): + if not isinstance(value, bytes): + if isinstance(value, str): + value = value.encode("utf-8") + else: + value = str(value).encode("utf-8") + return b64encode(value).decode("utf-8") + + @classmethod + def parse_literal(cls, node, _variables=None): + if not isinstance(node, StringValueNode): + raise GraphQLError( + f"Base64 cannot represent non-string value: {print_ast(node)}" + ) + return cls.parse_value(node.value) + + @staticmethod + def parse_value(value): + if not isinstance(value, bytes): + if not isinstance(value, str): + raise GraphQLError( + f"Base64 cannot represent non-string value: {repr(value)}" + ) + value = value.encode("utf-8") + try: + return b64decode(value, validate=True).decode("utf-8") + except _Error: + raise GraphQLError(f"Base64 cannot decode value: {repr(value)}") diff --git a/testbed/graphql-python__graphene/graphene/types/context.py b/testbed/graphql-python__graphene/graphene/types/context.py new file mode 100644 index 0000000000000000000000000000000000000000..fa405179acec80a5a41301faa820440967765718 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/context.py @@ -0,0 +1,25 @@ +class Context: + """ + Context can be used to make a convenient container for attributes to provide + for execution for resolvers of a GraphQL operation like a query. + + .. code:: python + + from graphene import Context + + context = Context(loaders=build_dataloaders(), request=my_web_request) + schema.execute('{ hello(name: "world") }', context=context) + + def resolve_hello(parent, info, name): + info.context.request # value set in Context + info.context.loaders # value set in Context + # ... + + args: + **params (Dict[str, Any]): values to make available on Context instance as attributes. + + """ + + def __init__(self, **params): + for key, value in params.items(): + setattr(self, key, value) diff --git a/testbed/graphql-python__graphene/graphene/types/datetime.py b/testbed/graphql-python__graphene/graphene/types/datetime.py new file mode 100644 index 0000000000000000000000000000000000000000..d4f74470b18162137d5f82764ad390e7c2dddf99 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/datetime.py @@ -0,0 +1,112 @@ +from __future__ import absolute_import + +import datetime + +from aniso8601 import parse_date, parse_datetime, parse_time +from graphql.error import GraphQLError +from graphql.language import StringValueNode, print_ast + +from .scalars import Scalar + + +class Date(Scalar): + """ + The `Date` scalar type represents a Date + value as specified by + [iso8601](https://en.wikipedia.org/wiki/ISO_8601). + """ + + @staticmethod + def serialize(date): + if isinstance(date, datetime.datetime): + date = date.date() + if not isinstance(date, datetime.date): + raise GraphQLError(f"Date cannot represent value: {repr(date)}") + return date.isoformat() + + @classmethod + def parse_literal(cls, node, _variables=None): + if not isinstance(node, StringValueNode): + raise GraphQLError( + f"Date cannot represent non-string value: {print_ast(node)}" + ) + return cls.parse_value(node.value) + + @staticmethod + def parse_value(value): + if isinstance(value, datetime.date): + return value + if not isinstance(value, str): + raise GraphQLError(f"Date cannot represent non-string value: {repr(value)}") + try: + return parse_date(value) + except ValueError: + raise GraphQLError(f"Date cannot represent value: {repr(value)}") + + +class DateTime(Scalar): + """ + The `DateTime` scalar type represents a DateTime + value as specified by + [iso8601](https://en.wikipedia.org/wiki/ISO_8601). + """ + + @staticmethod + def serialize(dt): + if not isinstance(dt, (datetime.datetime, datetime.date)): + raise GraphQLError(f"DateTime cannot represent value: {repr(dt)}") + return dt.isoformat() + + @classmethod + def parse_literal(cls, node, _variables=None): + if not isinstance(node, StringValueNode): + raise GraphQLError( + f"DateTime cannot represent non-string value: {print_ast(node)}" + ) + return cls.parse_value(node.value) + + @staticmethod + def parse_value(value): + if isinstance(value, datetime.datetime): + return value + if not isinstance(value, str): + raise GraphQLError( + f"DateTime cannot represent non-string value: {repr(value)}" + ) + try: + return parse_datetime(value) + except ValueError: + raise GraphQLError(f"DateTime cannot represent value: {repr(value)}") + + +class Time(Scalar): + """ + The `Time` scalar type represents a Time value as + specified by + [iso8601](https://en.wikipedia.org/wiki/ISO_8601). + """ + + @staticmethod + def serialize(time): + if not isinstance(time, datetime.time): + raise GraphQLError(f"Time cannot represent value: {repr(time)}") + return time.isoformat() + + @classmethod + def parse_literal(cls, node, _variables=None): + if not isinstance(node, StringValueNode): + raise GraphQLError( + f"Time cannot represent non-string value: {print_ast(node)}" + ) + return cls.parse_value(node.value) + + @classmethod + def parse_value(cls, value): + if isinstance(value, datetime.time): + return value + if not isinstance(value, str): + raise GraphQLError(f"Time cannot represent non-string value: {repr(value)}") + try: + return parse_time(value) + except ValueError: + raise GraphQLError(f"Time cannot represent value: {repr(value)}") diff --git a/testbed/graphql-python__graphene/graphene/types/decimal.py b/testbed/graphql-python__graphene/graphene/types/decimal.py new file mode 100644 index 0000000000000000000000000000000000000000..0c6ccc97446d125aae8418258d3a8ca2609e4604 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/decimal.py @@ -0,0 +1,36 @@ +from __future__ import absolute_import + +from decimal import Decimal as _Decimal + +from graphql import Undefined +from graphql.language.ast import StringValueNode, IntValueNode + +from .scalars import Scalar + + +class Decimal(Scalar): + """ + The `Decimal` scalar type represents a python Decimal. + """ + + @staticmethod + def serialize(dec): + if isinstance(dec, str): + dec = _Decimal(dec) + assert isinstance( + dec, _Decimal + ), f'Received not compatible Decimal "{repr(dec)}"' + return str(dec) + + @classmethod + def parse_literal(cls, node, _variables=None): + if isinstance(node, (StringValueNode, IntValueNode)): + return cls.parse_value(node.value) + return Undefined + + @staticmethod + def parse_value(value): + try: + return _Decimal(value) + except Exception: + return Undefined diff --git a/testbed/graphql-python__graphene/graphene/types/definitions.py b/testbed/graphql-python__graphene/graphene/types/definitions.py new file mode 100644 index 0000000000000000000000000000000000000000..ac574bed5672a4ba3698870c1295c4adb12f5055 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/definitions.py @@ -0,0 +1,62 @@ +from enum import Enum as PyEnum + +from graphql import ( + GraphQLEnumType, + GraphQLInputObjectType, + GraphQLInterfaceType, + GraphQLObjectType, + GraphQLScalarType, + GraphQLUnionType, +) + + +class GrapheneGraphQLType: + """ + A class for extending the base GraphQLType with the related + graphene_type + """ + + def __init__(self, *args, **kwargs): + self.graphene_type = kwargs.pop("graphene_type") + super(GrapheneGraphQLType, self).__init__(*args, **kwargs) + + def __copy__(self): + result = GrapheneGraphQLType(graphene_type=self.graphene_type) + result.__dict__.update(self.__dict__) + return result + + +class GrapheneInterfaceType(GrapheneGraphQLType, GraphQLInterfaceType): + pass + + +class GrapheneUnionType(GrapheneGraphQLType, GraphQLUnionType): + pass + + +class GrapheneObjectType(GrapheneGraphQLType, GraphQLObjectType): + pass + + +class GrapheneScalarType(GrapheneGraphQLType, GraphQLScalarType): + pass + + +class GrapheneEnumType(GrapheneGraphQLType, GraphQLEnumType): + def serialize(self, value): + if not isinstance(value, PyEnum): + enum = self.graphene_type._meta.enum + try: + # Try and get enum by value + value = enum(value) + except ValueError: + # Try and get enum by name + try: + value = enum[value] + except KeyError: + pass + return super(GrapheneEnumType, self).serialize(value) + + +class GrapheneInputObjectType(GrapheneGraphQLType, GraphQLInputObjectType): + pass diff --git a/testbed/graphql-python__graphene/graphene/types/dynamic.py b/testbed/graphql-python__graphene/graphene/types/dynamic.py new file mode 100644 index 0000000000000000000000000000000000000000..3bb2b0fde72ef81e972467dbe4957c6a7087802c --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/dynamic.py @@ -0,0 +1,22 @@ +import inspect +from functools import partial + +from .mountedtype import MountedType + + +class Dynamic(MountedType): + """ + A Dynamic Type let us get the type in runtime when we generate + the schema. So we can have lazy fields. + """ + + def __init__(self, type_, with_schema=False, _creation_counter=None): + super(Dynamic, self).__init__(_creation_counter=_creation_counter) + assert inspect.isfunction(type_) or isinstance(type_, partial) + self.type = type_ + self.with_schema = with_schema + + def get_type(self, schema=None): + if schema and self.with_schema: + return self.type(schema=schema) + return self.type() diff --git a/testbed/graphql-python__graphene/graphene/types/enum.py b/testbed/graphql-python__graphene/graphene/types/enum.py new file mode 100644 index 0000000000000000000000000000000000000000..7d68ccd4848759f556cd66fd84117e5010167e23 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/enum.py @@ -0,0 +1,118 @@ +from enum import Enum as PyEnum + +from graphene.utils.subclass_with_meta import SubclassWithMeta_Meta + +from .base import BaseOptions, BaseType +from .unmountedtype import UnmountedType + + +def eq_enum(self, other): + if isinstance(other, self.__class__): + return self is other + return self.value is other + + +def hash_enum(self): + return hash(self.name) + + +EnumType = type(PyEnum) + + +class EnumOptions(BaseOptions): + enum = None # type: Enum + deprecation_reason = None + + +class EnumMeta(SubclassWithMeta_Meta): + def __new__(cls, name_, bases, classdict, **options): + enum_members = dict(classdict, __eq__=eq_enum, __hash__=hash_enum) + # We remove the Meta attribute from the class to not collide + # with the enum values. + enum_members.pop("Meta", None) + enum = PyEnum(cls.__name__, enum_members) + return SubclassWithMeta_Meta.__new__( + cls, name_, bases, dict(classdict, __enum__=enum), **options + ) + + def get(cls, value): + return cls._meta.enum(value) + + def __getitem__(cls, value): + return cls._meta.enum[value] + + def __prepare__(name, bases, **kwargs): # noqa: N805 + return {} + + def __call__(cls, *args, **kwargs): # noqa: N805 + if cls is Enum: + description = kwargs.pop("description", None) + deprecation_reason = kwargs.pop("deprecation_reason", None) + return cls.from_enum( + PyEnum(*args, **kwargs), + description=description, + deprecation_reason=deprecation_reason, + ) + return super(EnumMeta, cls).__call__(*args, **kwargs) + # return cls._meta.enum(*args, **kwargs) + + def __iter__(cls): + return cls._meta.enum.__iter__() + + def from_enum( + cls, enum, name=None, description=None, deprecation_reason=None + ): # noqa: N805 + name = name or enum.__name__ + description = description or enum.__doc__ or "An enumeration." + meta_dict = { + "enum": enum, + "description": description, + "deprecation_reason": deprecation_reason, + } + meta_class = type("Meta", (object,), meta_dict) + return type(name, (Enum,), {"Meta": meta_class}) + + +class Enum(UnmountedType, BaseType, metaclass=EnumMeta): + """ + Enum type definition + + Defines a static set of values that can be provided as a Field, Argument or InputField. + + .. code:: python + + from graphene import Enum + + class NameFormat(Enum): + FIRST_LAST = "first_last" + LAST_FIRST = "last_first" + + Meta: + enum (optional, Enum): Python enum to use as a base for GraphQL Enum. + + name (optional, str): Name of the GraphQL type (must be unique in schema). Defaults to class + name. + description (optional, str): Description of the GraphQL type in the schema. Defaults to class + docstring. + deprecation_reason (optional, str): Setting this value indicates that the enum is + depreciated and may provide instruction or reason on how for clients to proceed. + """ + + @classmethod + def __init_subclass_with_meta__(cls, enum=None, _meta=None, **options): + if not _meta: + _meta = EnumOptions(cls) + _meta.enum = enum or cls.__enum__ + _meta.deprecation_reason = options.pop("deprecation_reason", None) + for key, value in _meta.enum.__members__.items(): + setattr(cls, key, value) + + super(Enum, cls).__init_subclass_with_meta__(_meta=_meta, **options) + + @classmethod + def get_type(cls): + """ + This function is called when the unmounted type (Enum instance) + is mounted (as a Field, InputField or Argument) + """ + return cls diff --git a/testbed/graphql-python__graphene/graphene/types/field.py b/testbed/graphql-python__graphene/graphene/types/field.py new file mode 100644 index 0000000000000000000000000000000000000000..dafb04b5361239cc64367120b0f05b151611e012 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/field.py @@ -0,0 +1,138 @@ +import inspect +from collections.abc import Mapping +from functools import partial + +from .argument import Argument, to_arguments +from .mountedtype import MountedType +from .resolver import default_resolver +from .structures import NonNull +from .unmountedtype import UnmountedType +from .utils import get_type +from ..utils.deprecated import warn_deprecation + +base_type = type + + +def source_resolver(source, root, info, **args): + resolved = default_resolver(source, None, root, info, **args) + if inspect.isfunction(resolved) or inspect.ismethod(resolved): + return resolved() + return resolved + + +class Field(MountedType): + """ + Makes a field available on an ObjectType in the GraphQL schema. Any type can be mounted as a + Field: + + - Object Type + - Scalar Type + - Enum + - Interface + - Union + + All class attributes of ``graphene.ObjectType`` are implicitly mounted as Field using the below + arguments. + + .. code:: python + + class Person(ObjectType): + first_name = graphene.String(required=True) # implicitly mounted as Field + last_name = graphene.Field(String, description='Surname') # explicitly mounted as Field + + args: + type (class for a graphene.UnmountedType): Must be a class (not an instance) of an + unmounted graphene type (ex. scalar or object) which is used for the type of this + field in the GraphQL schema. + args (optional, Dict[str, graphene.Argument]): Arguments that can be input to the field. + Prefer to use ``**extra_args``, unless you use an argument name that clashes with one + of the Field arguments presented here (see :ref:`example`). + resolver (optional, Callable): A function to get the value for a Field from the parent + value object. If not set, the default resolver method for the schema is used. + source (optional, str): attribute name to resolve for this field from the parent value + object. Alternative to resolver (cannot set both source and resolver). + deprecation_reason (optional, str): Setting this value indicates that the field is + depreciated and may provide instruction or reason on how for clients to proceed. + required (optional, bool): indicates this field as not null in the graphql schema. Same behavior as + graphene.NonNull. Default False. + name (optional, str): the name of the GraphQL field (must be unique in a type). Defaults to attribute + name. + description (optional, str): the description of the GraphQL field in the schema. + default_value (optional, Any): Default value to resolve if none set from schema. + **extra_args (optional, Dict[str, Union[graphene.Argument, graphene.UnmountedType]): any + additional arguments to mount on the field. + """ + + def __init__( + self, + type_, + args=None, + resolver=None, + source=None, + deprecation_reason=None, + name=None, + description=None, + required=False, + _creation_counter=None, + default_value=None, + **extra_args, + ): + super(Field, self).__init__(_creation_counter=_creation_counter) + assert not args or isinstance( + args, Mapping + ), f'Arguments in a field have to be a mapping, received "{args}".' + assert not ( + source and resolver + ), "A Field cannot have a source and a resolver in at the same time." + assert not callable( + default_value + ), f'The default value can not be a function but received "{base_type(default_value)}".' + + if required: + type_ = NonNull(type_) + + # Check if name is actually an argument of the field + if isinstance(name, (Argument, UnmountedType)): + extra_args["name"] = name + name = None + + # Check if source is actually an argument of the field + if isinstance(source, (Argument, UnmountedType)): + extra_args["source"] = source + source = None + + self.name = name + self._type = type_ + self.args = to_arguments(args or {}, extra_args) + if source: + resolver = partial(source_resolver, source) + self.resolver = resolver + self.deprecation_reason = deprecation_reason + self.description = description + self.default_value = default_value + + @property + def type(self): + return get_type(self._type) + + get_resolver = None + + def wrap_resolve(self, parent_resolver): + """ + Wraps a function resolver, using the ObjectType resolve_{FIELD_NAME} + (parent_resolver) if the Field definition has no resolver. + """ + if self.get_resolver is not None: + warn_deprecation( + "The get_resolver method is being deprecated, please rename it to wrap_resolve." + ) + return self.get_resolver(parent_resolver) + + return self.resolver or parent_resolver + + def wrap_subscribe(self, parent_subscribe): + """ + Wraps a function subscribe, using the ObjectType subscribe_{FIELD_NAME} + (parent_subscribe) if the Field definition has no subscribe. + """ + return parent_subscribe diff --git a/testbed/graphql-python__graphene/graphene/types/generic.py b/testbed/graphql-python__graphene/graphene/types/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..2a3c8d5246c2fa0fca1b289dc387df5c402106e1 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/generic.py @@ -0,0 +1,49 @@ +from __future__ import unicode_literals + +from graphql.language.ast import ( + BooleanValueNode, + FloatValueNode, + IntValueNode, + ListValueNode, + ObjectValueNode, + StringValueNode, +) + +from graphene.types.scalars import MAX_INT, MIN_INT + +from .scalars import Scalar + + +class GenericScalar(Scalar): + """ + The `GenericScalar` scalar type represents a generic + GraphQL scalar value that could be: + String, Boolean, Int, Float, List or Object. + """ + + @staticmethod + def identity(value): + return value + + serialize = identity + parse_value = identity + + @staticmethod + def parse_literal(ast, _variables=None): + if isinstance(ast, (StringValueNode, BooleanValueNode)): + return ast.value + elif isinstance(ast, IntValueNode): + num = int(ast.value) + if MIN_INT <= num <= MAX_INT: + return num + elif isinstance(ast, FloatValueNode): + return float(ast.value) + elif isinstance(ast, ListValueNode): + return [GenericScalar.parse_literal(value) for value in ast.values] + elif isinstance(ast, ObjectValueNode): + return { + field.name.value: GenericScalar.parse_literal(field.value) + for field in ast.fields + } + else: + return None diff --git a/testbed/graphql-python__graphene/graphene/types/inputfield.py b/testbed/graphql-python__graphene/graphene/types/inputfield.py new file mode 100644 index 0000000000000000000000000000000000000000..e7ededb0b1f5011244fc20d92e97a002f6cec25f --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/inputfield.py @@ -0,0 +1,74 @@ +from graphql import Undefined + +from .mountedtype import MountedType +from .structures import NonNull +from .utils import get_type + + +class InputField(MountedType): + """ + Makes a field available on an ObjectType in the GraphQL schema. Any type can be mounted as a + Input Field except Interface and Union: + + - Object Type + - Scalar Type + - Enum + + Input object types also can't have arguments on their input fields, unlike regular ``graphene.Field``. + + All class attributes of ``graphene.InputObjectType`` are implicitly mounted as InputField + using the below arguments. + + .. code:: python + + from graphene import InputObjectType, String, InputField + + class Person(InputObjectType): + # implicitly mounted as Input Field + first_name = String(required=True) + # explicitly mounted as Input Field + last_name = InputField(String, description="Surname") + + args: + type (class for a graphene.UnmountedType): Must be a class (not an instance) of an + unmounted graphene type (ex. scalar or object) which is used for the type of this + field in the GraphQL schema. + name (optional, str): Name of the GraphQL input field (must be unique in a type). + Defaults to attribute name. + default_value (optional, Any): Default value to use as input if none set in user operation ( + query, mutation, etc.). + deprecation_reason (optional, str): Setting this value indicates that the field is + depreciated and may provide instruction or reason on how for clients to proceed. + description (optional, str): Description of the GraphQL field in the schema. + required (optional, bool): Indicates this input field as not null in the graphql schema. + Raises a validation error if argument not provided. Same behavior as graphene.NonNull. + Default False. + **extra_args (optional, Dict): Not used. + """ + + def __init__( + self, + type_, + name=None, + default_value=Undefined, + deprecation_reason=None, + description=None, + required=False, + _creation_counter=None, + **extra_args, + ): + super(InputField, self).__init__(_creation_counter=_creation_counter) + self.name = name + if required: + assert ( + deprecation_reason is None + ), f"InputField {name} is required, cannot deprecate it." + type_ = NonNull(type_) + self._type = type_ + self.deprecation_reason = deprecation_reason + self.default_value = default_value + self.description = description + + @property + def type(self): + return get_type(self._type) diff --git a/testbed/graphql-python__graphene/graphene/types/inputobjecttype.py b/testbed/graphql-python__graphene/graphene/types/inputobjecttype.py new file mode 100644 index 0000000000000000000000000000000000000000..5d2785105f4441dceeb611c918c37311063f9524 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/inputobjecttype.py @@ -0,0 +1,90 @@ +from .base import BaseOptions, BaseType +from .inputfield import InputField +from .unmountedtype import UnmountedType +from .utils import yank_fields_from_attrs + +# For static type checking with Mypy +MYPY = False +if MYPY: + from typing import Dict, Callable # NOQA + + +class InputObjectTypeOptions(BaseOptions): + fields = None # type: Dict[str, InputField] + container = None # type: InputObjectTypeContainer + + +class InputObjectTypeContainer(dict, BaseType): # type: ignore + class Meta: + abstract = True + + def __init__(self, *args, **kwargs): + dict.__init__(self, *args, **kwargs) + for key in self._meta.fields: + setattr(self, key, self.get(key, None)) + + def __init_subclass__(cls, *args, **kwargs): + pass + + +class InputObjectType(UnmountedType, BaseType): + """ + Input Object Type Definition + + An input object defines a structured collection of fields which may be + supplied to a field argument. + + Using ``graphene.NonNull`` will ensure that a input value must be provided by the query. + + All class attributes of ``graphene.InputObjectType`` are implicitly mounted as InputField + using the below Meta class options. + + .. code:: python + + from graphene import InputObjectType, String, InputField + + class Person(InputObjectType): + # implicitly mounted as Input Field + first_name = String(required=True) + # explicitly mounted as Input Field + last_name = InputField(String, description="Surname") + + The fields on an input object type can themselves refer to input object types, but you can't + mix input and output types in your schema. + + Meta class options (optional): + name (str): the name of the GraphQL type (must be unique in schema). Defaults to class + name. + description (str): the description of the GraphQL type in the schema. Defaults to class + docstring. + container (class): A class reference for a value object that allows for + attribute initialization and access. Default InputObjectTypeContainer. + fields (Dict[str, graphene.InputField]): Dictionary of field name to InputField. Not + recommended to use (prefer class attributes). + """ + + @classmethod + def __init_subclass_with_meta__(cls, container=None, _meta=None, **options): + if not _meta: + _meta = InputObjectTypeOptions(cls) + + fields = {} + for base in reversed(cls.__mro__): + fields.update(yank_fields_from_attrs(base.__dict__, _as=InputField)) + + if _meta.fields: + _meta.fields.update(fields) + else: + _meta.fields = fields + if container is None: + container = type(cls.__name__, (InputObjectTypeContainer, cls), {}) + _meta.container = container + super(InputObjectType, cls).__init_subclass_with_meta__(_meta=_meta, **options) + + @classmethod + def get_type(cls): + """ + This function is called when the unmounted type (InputObjectType instance) + is mounted (as a Field, InputField or Argument) + """ + return cls diff --git a/testbed/graphql-python__graphene/graphene/types/interface.py b/testbed/graphql-python__graphene/graphene/types/interface.py new file mode 100644 index 0000000000000000000000000000000000000000..6503b78b36353b2751095c9e7fcb70a4a7c9da2a --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/interface.py @@ -0,0 +1,75 @@ +from .base import BaseOptions, BaseType +from .field import Field +from .utils import yank_fields_from_attrs + +# For static type checking with Mypy +MYPY = False +if MYPY: + from typing import Dict, Iterable, Type # NOQA + + +class InterfaceOptions(BaseOptions): + fields = None # type: Dict[str, Field] + interfaces = () # type: Iterable[Type[Interface]] + + +class Interface(BaseType): + """ + Interface Type Definition + + When a field can return one of a heterogeneous set of types, a Interface type + is used to describe what types are possible, what fields are in common across + all types, as well as a function to determine which type is actually used + when the field is resolved. + + .. code:: python + + from graphene import Interface, String + + class HasAddress(Interface): + class Meta: + description = "Address fields" + + address1 = String() + address2 = String() + + If a field returns an Interface Type, the ambiguous type of the object can be determined using + ``resolve_type`` on Interface and an ObjectType with ``Meta.possible_types`` or ``is_type_of``. + + Meta: + name (str): Name of the GraphQL type (must be unique in schema). Defaults to class + name. + description (str): Description of the GraphQL type in the schema. Defaults to class + docstring. + fields (Dict[str, graphene.Field]): Dictionary of field name to Field. Not recommended to + use (prefer class attributes). + """ + + @classmethod + def __init_subclass_with_meta__(cls, _meta=None, interfaces=(), **options): + if not _meta: + _meta = InterfaceOptions(cls) + + fields = {} + for base in reversed(cls.__mro__): + fields.update(yank_fields_from_attrs(base.__dict__, _as=Field)) + + if _meta.fields: + _meta.fields.update(fields) + else: + _meta.fields = fields + + if not _meta.interfaces: + _meta.interfaces = interfaces + + super(Interface, cls).__init_subclass_with_meta__(_meta=_meta, **options) + + @classmethod + def resolve_type(cls, instance, info): + from .objecttype import ObjectType + + if isinstance(instance, ObjectType): + return type(instance) + + def __init__(self, *args, **kwargs): + raise Exception("An Interface cannot be initialized") diff --git a/testbed/graphql-python__graphene/graphene/types/json.py b/testbed/graphql-python__graphene/graphene/types/json.py new file mode 100644 index 0000000000000000000000000000000000000000..ca55836b988770cbf4fea0cc24a0ff196efaab7d --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/json.py @@ -0,0 +1,34 @@ +from __future__ import absolute_import + +import json + +from graphql import Undefined +from graphql.language.ast import StringValueNode + +from .scalars import Scalar + + +class JSONString(Scalar): + """ + Allows use of a JSON String for input / output from the GraphQL schema. + + Use of this type is *not recommended* as you lose the benefits of having a defined, static + schema (one of the key benefits of GraphQL). + """ + + @staticmethod + def serialize(dt): + return json.dumps(dt) + + @staticmethod + def parse_literal(node, _variables=None): + if isinstance(node, StringValueNode): + try: + return json.loads(node.value) + except Exception as error: + raise ValueError(f"Badly formed JSONString: {str(error)}") + return Undefined + + @staticmethod + def parse_value(value): + return json.loads(value) diff --git a/testbed/graphql-python__graphene/graphene/types/mountedtype.py b/testbed/graphql-python__graphene/graphene/types/mountedtype.py new file mode 100644 index 0000000000000000000000000000000000000000..c42383e24a2cb973481eacc1571751f0c8e94443 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/mountedtype.py @@ -0,0 +1,20 @@ +from ..utils.orderedtype import OrderedType +from .unmountedtype import UnmountedType + + +class MountedType(OrderedType): + @classmethod + def mounted(cls, unmounted): # noqa: N802 + """ + Mount the UnmountedType instance + """ + assert isinstance( + unmounted, UnmountedType + ), f"{cls.__name__} can't mount {repr(unmounted)}" + + return cls( + unmounted.get_type(), + *unmounted.args, + _creation_counter=unmounted.creation_counter, + **unmounted.kwargs, + ) diff --git a/testbed/graphql-python__graphene/graphene/types/mutation.py b/testbed/graphql-python__graphene/graphene/types/mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..ad47c62a70af0303b13e08aec3987cc0e3891a0f --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/mutation.py @@ -0,0 +1,133 @@ +from ..utils.deprecated import warn_deprecation +from ..utils.get_unbound_function import get_unbound_function +from ..utils.props import props +from .field import Field +from .objecttype import ObjectType, ObjectTypeOptions +from .utils import yank_fields_from_attrs +from .interface import Interface + +# For static type checking with Mypy +MYPY = False +if MYPY: + from .argument import Argument # NOQA + from typing import Dict, Type, Callable, Iterable # NOQA + + +class MutationOptions(ObjectTypeOptions): + arguments = None # type: Dict[str, Argument] + output = None # type: Type[ObjectType] + resolver = None # type: Callable + interfaces = () # type: Iterable[Type[Interface]] + + +class Mutation(ObjectType): + """ + Object Type Definition (mutation field) + + Mutation is a convenience type that helps us build a Field which takes Arguments and returns a + mutation Output ObjectType. + + .. code:: python + + import graphene + + class CreatePerson(graphene.Mutation): + class Arguments: + name = graphene.String() + + ok = graphene.Boolean() + person = graphene.Field(Person) + + def mutate(parent, info, name): + person = Person(name=name) + ok = True + return CreatePerson(person=person, ok=ok) + + class Mutation(graphene.ObjectType): + create_person = CreatePerson.Field() + + Meta class options (optional): + output (graphene.ObjectType): Or ``Output`` inner class with attributes on Mutation class. + Or attributes from Mutation class. Fields which can be returned from this mutation + field. + resolver (Callable resolver method): Or ``mutate`` method on Mutation class. Perform data + change and return output. + arguments (Dict[str, graphene.Argument]): Or ``Arguments`` inner class with attributes on + Mutation class. Arguments to use for the mutation Field. + name (str): Name of the GraphQL type (must be unique in schema). Defaults to class + name. + description (str): Description of the GraphQL type in the schema. Defaults to class + docstring. + interfaces (Iterable[graphene.Interface]): GraphQL interfaces to extend with the payload + object. All fields from interface will be included in this object's schema. + fields (Dict[str, graphene.Field]): Dictionary of field name to Field. Not recommended to + use (prefer class attributes or ``Meta.output``). + """ + + @classmethod + def __init_subclass_with_meta__( + cls, + interfaces=(), + resolver=None, + output=None, + arguments=None, + _meta=None, + **options, + ): + if not _meta: + _meta = MutationOptions(cls) + output = output or getattr(cls, "Output", None) + fields = {} + + for interface in interfaces: + assert issubclass( + interface, Interface + ), f'All interfaces of {cls.__name__} must be a subclass of Interface. Received "{interface}".' + fields.update(interface._meta.fields) + if not output: + # If output is defined, we don't need to get the fields + fields = {} + for base in reversed(cls.__mro__): + fields.update(yank_fields_from_attrs(base.__dict__, _as=Field)) + output = cls + if not arguments: + input_class = getattr(cls, "Arguments", None) + if not input_class: + input_class = getattr(cls, "Input", None) + if input_class: + warn_deprecation( + f"Please use {cls.__name__}.Arguments instead of {cls.__name__}.Input." + " Input is now only used in ClientMutationID.\n" + "Read more:" + " https://github.com/graphql-python/graphene/blob/v2.0.0/UPGRADE-v2.0.md#mutation-input" + ) + arguments = props(input_class) if input_class else {} + if not resolver: + mutate = getattr(cls, "mutate", None) + assert mutate, "All mutations must define a mutate method in it" + resolver = get_unbound_function(mutate) + if _meta.fields: + _meta.fields.update(fields) + else: + _meta.fields = fields + _meta.interfaces = interfaces + _meta.output = output + _meta.resolver = resolver + _meta.arguments = arguments + + super(Mutation, cls).__init_subclass_with_meta__(_meta=_meta, **options) + + @classmethod + def Field( + cls, name=None, description=None, deprecation_reason=None, required=False + ): + """Mount instance of mutation Field.""" + return Field( + cls._meta.output, + args=cls._meta.arguments, + resolver=cls._meta.resolver, + name=name, + description=description or cls._meta.description, + deprecation_reason=deprecation_reason, + required=required, + ) diff --git a/testbed/graphql-python__graphene/graphene/types/objecttype.py b/testbed/graphql-python__graphene/graphene/types/objecttype.py new file mode 100644 index 0000000000000000000000000000000000000000..1ff29a2e43a07b5aae06edca735c119ac9307dd3 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/objecttype.py @@ -0,0 +1,160 @@ +from .base import BaseOptions, BaseType, BaseTypeMeta +from .field import Field +from .interface import Interface +from .utils import yank_fields_from_attrs + +try: + from dataclasses import make_dataclass, field +except ImportError: + from ..pyutils.dataclasses import make_dataclass, field # type: ignore +# For static type checking with Mypy +MYPY = False +if MYPY: + from typing import Dict, Iterable, Type # NOQA + + +class ObjectTypeOptions(BaseOptions): + fields = None # type: Dict[str, Field] + interfaces = () # type: Iterable[Type[Interface]] + + +class ObjectTypeMeta(BaseTypeMeta): + def __new__(cls, name_, bases, namespace, **options): + # Note: it's safe to pass options as keyword arguments as they are still type-checked by ObjectTypeOptions. + + # We create this type, to then overload it with the dataclass attrs + class InterObjectType: + pass + + base_cls = super().__new__( + cls, name_, (InterObjectType,) + bases, namespace, **options + ) + if base_cls._meta: + fields = [ + ( + key, + "typing.Any", + field( + default=field_value.default_value + if isinstance(field_value, Field) + else None + ), + ) + for key, field_value in base_cls._meta.fields.items() + ] + dataclass = make_dataclass(name_, fields, bases=()) + InterObjectType.__init__ = dataclass.__init__ + InterObjectType.__eq__ = dataclass.__eq__ + InterObjectType.__repr__ = dataclass.__repr__ + return base_cls + + +class ObjectType(BaseType, metaclass=ObjectTypeMeta): + """ + Object Type Definition + + Almost all of the GraphQL types you define will be object types. Object types + have a name, but most importantly describe their fields. + + The name of the type defined by an _ObjectType_ defaults to the class name. The type + description defaults to the class docstring. This can be overridden by adding attributes + to a Meta inner class. + + The class attributes of an _ObjectType_ are mounted as instances of ``graphene.Field``. + + Methods starting with ``resolve_`` are bound as resolvers of the matching Field + name. If no resolver is provided, the default resolver is used. + + Ambiguous types with Interface and Union can be determined through ``is_type_of`` method and + ``Meta.possible_types`` attribute. + + .. code:: python + + from graphene import ObjectType, String, Field + + class Person(ObjectType): + class Meta: + description = 'A human' + + # implicitly mounted as Field + first_name = String() + # explicitly mounted as Field + last_name = Field(String) + + def resolve_last_name(parent, info): + return last_name + + ObjectType must be mounted using ``graphene.Field``. + + .. code:: python + + from graphene import ObjectType, Field + + class Query(ObjectType): + + person = Field(Person, description="My favorite person") + + Meta class options (optional): + name (str): Name of the GraphQL type (must be unique in schema). Defaults to class + name. + description (str): Description of the GraphQL type in the schema. Defaults to class + docstring. + interfaces (Iterable[graphene.Interface]): GraphQL interfaces to extend with this object. + all fields from interface will be included in this object's schema. + possible_types (Iterable[class]): Used to test parent value object via isinstance to see if + this type can be used to resolve an ambiguous type (interface, union). + default_resolver (any Callable resolver): Override the default resolver for this + type. Defaults to graphene default resolver which returns an attribute or dictionary + key with the same name as the field. + fields (Dict[str, graphene.Field]): Dictionary of field name to Field. Not recommended to + use (prefer class attributes). + + An _ObjectType_ can be used as a simple value object by creating an instance of the class. + + .. code:: python + + p = Person(first_name='Bob', last_name='Roberts') + assert p.first_name == 'Bob' + + Args: + *args (List[Any]): Positional values to use for Field values of value object + **kwargs (Dict[str: Any]): Keyword arguments to use for Field values of value object + """ + + @classmethod + def __init_subclass_with_meta__( + cls, + interfaces=(), + possible_types=(), + default_resolver=None, + _meta=None, + **options, + ): + if not _meta: + _meta = ObjectTypeOptions(cls) + fields = {} + + for interface in interfaces: + assert issubclass( + interface, Interface + ), f'All interfaces of {cls.__name__} must be a subclass of Interface. Received "{interface}".' + fields.update(interface._meta.fields) + for base in reversed(cls.__mro__): + fields.update(yank_fields_from_attrs(base.__dict__, _as=Field)) + assert not (possible_types and cls.is_type_of), ( + f"{cls.__name__}.Meta.possible_types will cause type collision with {cls.__name__}.is_type_of. " + "Please use one or other." + ) + + if _meta.fields: + _meta.fields.update(fields) + else: + _meta.fields = fields + if not _meta.interfaces: + _meta.interfaces = interfaces + _meta.possible_types = possible_types + _meta.default_resolver = default_resolver + + super(ObjectType, cls).__init_subclass_with_meta__(_meta=_meta, **options) + + is_type_of = None diff --git a/testbed/graphql-python__graphene/graphene/types/resolver.py b/testbed/graphql-python__graphene/graphene/types/resolver.py new file mode 100644 index 0000000000000000000000000000000000000000..72d2edb889ca347f01d3f1361a7faf93e8b4295c --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/resolver.py @@ -0,0 +1,24 @@ +def attr_resolver(attname, default_value, root, info, **args): + return getattr(root, attname, default_value) + + +def dict_resolver(attname, default_value, root, info, **args): + return root.get(attname, default_value) + + +def dict_or_attr_resolver(attname, default_value, root, info, **args): + resolver = dict_resolver if isinstance(root, dict) else attr_resolver + return resolver(attname, default_value, root, info, **args) + + +default_resolver = dict_or_attr_resolver + + +def set_default_resolver(resolver): + global default_resolver + assert callable(resolver), "Received non-callable resolver." + default_resolver = resolver + + +def get_default_resolver(): + return default_resolver diff --git a/testbed/graphql-python__graphene/graphene/types/scalars.py b/testbed/graphql-python__graphene/graphene/types/scalars.py new file mode 100644 index 0000000000000000000000000000000000000000..a468bb3e67300ebed48af0a10fae4457b41e042d --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/scalars.py @@ -0,0 +1,195 @@ +from typing import Any + +from graphql import Undefined +from graphql.language.ast import ( + BooleanValueNode, + FloatValueNode, + IntValueNode, + StringValueNode, +) + +from .base import BaseOptions, BaseType +from .unmountedtype import UnmountedType + + +class ScalarOptions(BaseOptions): + pass + + +class Scalar(UnmountedType, BaseType): + """ + Scalar Type Definition + + The leaf values of any request and input values to arguments are + Scalars (or Enums) and are defined with a name and a series of functions + used to parse input from ast or variables and to ensure validity. + """ + + @classmethod + def __init_subclass_with_meta__(cls, **options): + _meta = ScalarOptions(cls) + super(Scalar, cls).__init_subclass_with_meta__(_meta=_meta, **options) + + serialize = None + parse_value = None + parse_literal = None + + @classmethod + def get_type(cls): + """ + This function is called when the unmounted type (Scalar instance) + is mounted (as a Field, InputField or Argument) + """ + return cls + + +# As per the GraphQL Spec, Integers are only treated as valid when a valid +# 32-bit signed integer, providing the broadest support across platforms. +# +# n.b. JavaScript's integers are safe between -(2^53 - 1) and 2^53 - 1 because +# they are internally represented as IEEE 754 doubles. +MAX_INT = 2147483647 +MIN_INT = -2147483648 + + +class Int(Scalar): + """ + The `Int` scalar type represents non-fractional signed whole numeric + values. Int can represent values between -(2^53 - 1) and 2^53 - 1 since + represented in JSON as double-precision floating point numbers specified + by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point). + """ + + @staticmethod + def coerce_int(value): + try: + num = int(value) + except ValueError: + try: + num = int(float(value)) + except ValueError: + return Undefined + if MIN_INT <= num <= MAX_INT: + return num + return Undefined + + serialize = coerce_int + parse_value = coerce_int + + @staticmethod + def parse_literal(ast, _variables=None): + if isinstance(ast, IntValueNode): + num = int(ast.value) + if MIN_INT <= num <= MAX_INT: + return num + return Undefined + + +class BigInt(Scalar): + """ + The `BigInt` scalar type represents non-fractional whole numeric values. + `BigInt` is not constrained to 32-bit like the `Int` type and thus is a less + compatible type. + """ + + @staticmethod + def coerce_int(value): + try: + num = int(value) + except ValueError: + try: + num = int(float(value)) + except ValueError: + return Undefined + return num + + serialize = coerce_int + parse_value = coerce_int + + @staticmethod + def parse_literal(ast, _variables=None): + if isinstance(ast, IntValueNode): + return int(ast.value) + return Undefined + + +class Float(Scalar): + """ + The `Float` scalar type represents signed double-precision fractional + values as specified by + [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point). + """ + + @staticmethod + def coerce_float(value): + # type: (Any) -> float + try: + return float(value) + except ValueError: + return Undefined + + serialize = coerce_float + parse_value = coerce_float + + @staticmethod + def parse_literal(ast, _variables=None): + if isinstance(ast, (FloatValueNode, IntValueNode)): + return float(ast.value) + return Undefined + + +class String(Scalar): + """ + The `String` scalar type represents textual data, represented as UTF-8 + character sequences. The String type is most often used by GraphQL to + represent free-form human-readable text. + """ + + @staticmethod + def coerce_string(value): + if isinstance(value, bool): + return "true" if value else "false" + return str(value) + + serialize = coerce_string + parse_value = coerce_string + + @staticmethod + def parse_literal(ast, _variables=None): + if isinstance(ast, StringValueNode): + return ast.value + return Undefined + + +class Boolean(Scalar): + """ + The `Boolean` scalar type represents `true` or `false`. + """ + + serialize = bool + parse_value = bool + + @staticmethod + def parse_literal(ast, _variables=None): + if isinstance(ast, BooleanValueNode): + return ast.value + return Undefined + + +class ID(Scalar): + """ + The `ID` scalar type represents a unique identifier, often used to + refetch an object or as key for a cache. The ID type appears in a JSON + response as a String; however, it is not intended to be human-readable. + When expected as an input type, any string (such as `"4"`) or integer + (such as `4`) input value will be accepted as an ID. + """ + + serialize = str + parse_value = str + + @staticmethod + def parse_literal(ast, _variables=None): + if isinstance(ast, (StringValueNode, IntValueNode)): + return ast.value + return Undefined diff --git a/testbed/graphql-python__graphene/graphene/types/schema.py b/testbed/graphql-python__graphene/graphene/types/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..bceede6ae791b1cacc4661a8a3e71d914ea8e9e1 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/schema.py @@ -0,0 +1,535 @@ +from enum import Enum as PyEnum +import inspect +from functools import partial + +from graphql import ( + default_type_resolver, + get_introspection_query, + graphql, + graphql_sync, + introspection_types, + parse, + print_schema, + subscribe, + validate, + ExecutionResult, + GraphQLArgument, + GraphQLBoolean, + GraphQLError, + GraphQLEnumValue, + GraphQLField, + GraphQLFloat, + GraphQLID, + GraphQLInputField, + GraphQLInt, + GraphQLList, + GraphQLNonNull, + GraphQLObjectType, + GraphQLSchema, + GraphQLString, +) + +from ..utils.str_converters import to_camel_case +from ..utils.get_unbound_function import get_unbound_function +from .definitions import ( + GrapheneEnumType, + GrapheneGraphQLType, + GrapheneInputObjectType, + GrapheneInterfaceType, + GrapheneObjectType, + GrapheneScalarType, + GrapheneUnionType, +) +from .dynamic import Dynamic +from .enum import Enum +from .field import Field +from .inputobjecttype import InputObjectType +from .interface import Interface +from .objecttype import ObjectType +from .resolver import get_default_resolver +from .scalars import ID, Boolean, Float, Int, Scalar, String +from .structures import List, NonNull +from .union import Union +from .utils import get_field_as + +introspection_query = get_introspection_query() +IntrospectionSchema = introspection_types["__Schema"] + + +def assert_valid_root_type(type_): + if type_ is None: + return + is_graphene_objecttype = inspect.isclass(type_) and issubclass(type_, ObjectType) + is_graphql_objecttype = isinstance(type_, GraphQLObjectType) + assert ( + is_graphene_objecttype or is_graphql_objecttype + ), f"Type {type_} is not a valid ObjectType." + + +def is_graphene_type(type_): + if isinstance(type_, (List, NonNull)): + return True + if inspect.isclass(type_) and issubclass( + type_, (ObjectType, InputObjectType, Scalar, Interface, Union, Enum) + ): + return True + + +def is_type_of_from_possible_types(possible_types, root, _info): + return isinstance(root, possible_types) + + +# We use this resolver for subscriptions +def identity_resolve(root, info, **arguments): + return root + + +class TypeMap(dict): + def __init__( + self, + query=None, + mutation=None, + subscription=None, + types=None, + auto_camelcase=True, + ): + assert_valid_root_type(query) + assert_valid_root_type(mutation) + assert_valid_root_type(subscription) + if types is None: + types = [] + for type_ in types: + assert is_graphene_type(type_) + + self.auto_camelcase = auto_camelcase + + create_graphql_type = self.add_type + + self.query = create_graphql_type(query) if query else None + self.mutation = create_graphql_type(mutation) if mutation else None + self.subscription = create_graphql_type(subscription) if subscription else None + + self.types = [create_graphql_type(graphene_type) for graphene_type in types] + + def add_type(self, graphene_type): + if inspect.isfunction(graphene_type): + graphene_type = graphene_type() + if isinstance(graphene_type, List): + return GraphQLList(self.add_type(graphene_type.of_type)) + if isinstance(graphene_type, NonNull): + return GraphQLNonNull(self.add_type(graphene_type.of_type)) + try: + name = graphene_type._meta.name + except AttributeError: + raise TypeError(f"Expected Graphene type, but received: {graphene_type}.") + graphql_type = self.get(name) + if graphql_type: + return graphql_type + if issubclass(graphene_type, ObjectType): + graphql_type = self.create_objecttype(graphene_type) + elif issubclass(graphene_type, InputObjectType): + graphql_type = self.create_inputobjecttype(graphene_type) + elif issubclass(graphene_type, Interface): + graphql_type = self.create_interface(graphene_type) + elif issubclass(graphene_type, Scalar): + graphql_type = self.create_scalar(graphene_type) + elif issubclass(graphene_type, Enum): + graphql_type = self.create_enum(graphene_type) + elif issubclass(graphene_type, Union): + graphql_type = self.construct_union(graphene_type) + else: + raise TypeError(f"Expected Graphene type, but received: {graphene_type}.") + self[name] = graphql_type + return graphql_type + + @staticmethod + def create_scalar(graphene_type): + # We have a mapping to the original GraphQL types + # so there are no collisions. + _scalars = { + String: GraphQLString, + Int: GraphQLInt, + Float: GraphQLFloat, + Boolean: GraphQLBoolean, + ID: GraphQLID, + } + if graphene_type in _scalars: + return _scalars[graphene_type] + + return GrapheneScalarType( + graphene_type=graphene_type, + name=graphene_type._meta.name, + description=graphene_type._meta.description, + serialize=getattr(graphene_type, "serialize", None), + parse_value=getattr(graphene_type, "parse_value", None), + parse_literal=getattr(graphene_type, "parse_literal", None), + ) + + @staticmethod + def create_enum(graphene_type): + values = {} + for name, value in graphene_type._meta.enum.__members__.items(): + description = getattr(value, "description", None) + # if the "description" attribute is an Enum, it is likely an enum member + # called description, not a description property + if isinstance(description, PyEnum): + description = None + if not description and callable(graphene_type._meta.description): + description = graphene_type._meta.description(value) + + deprecation_reason = getattr(value, "deprecation_reason", None) + if isinstance(deprecation_reason, PyEnum): + deprecation_reason = None + if not deprecation_reason and callable( + graphene_type._meta.deprecation_reason + ): + deprecation_reason = graphene_type._meta.deprecation_reason(value) + + values[name] = GraphQLEnumValue( + value=value, + description=description, + deprecation_reason=deprecation_reason, + ) + + type_description = ( + graphene_type._meta.description(None) + if callable(graphene_type._meta.description) + else graphene_type._meta.description + ) + + return GrapheneEnumType( + graphene_type=graphene_type, + values=values, + name=graphene_type._meta.name, + description=type_description, + ) + + def create_objecttype(self, graphene_type): + create_graphql_type = self.add_type + + def interfaces(): + interfaces = [] + for graphene_interface in graphene_type._meta.interfaces: + interface = create_graphql_type(graphene_interface) + assert interface.graphene_type == graphene_interface + interfaces.append(interface) + return interfaces + + if graphene_type._meta.possible_types: + is_type_of = partial( + is_type_of_from_possible_types, graphene_type._meta.possible_types + ) + else: + is_type_of = graphene_type.is_type_of + + return GrapheneObjectType( + graphene_type=graphene_type, + name=graphene_type._meta.name, + description=graphene_type._meta.description, + fields=partial(self.create_fields_for_type, graphene_type), + is_type_of=is_type_of, + interfaces=interfaces, + ) + + def create_interface(self, graphene_type): + resolve_type = ( + partial( + self.resolve_type, graphene_type.resolve_type, graphene_type._meta.name + ) + if graphene_type.resolve_type + else None + ) + + def interfaces(): + interfaces = [] + for graphene_interface in graphene_type._meta.interfaces: + interface = self.add_type(graphene_interface) + assert interface.graphene_type == graphene_interface + interfaces.append(interface) + return interfaces + + return GrapheneInterfaceType( + graphene_type=graphene_type, + name=graphene_type._meta.name, + description=graphene_type._meta.description, + fields=partial(self.create_fields_for_type, graphene_type), + interfaces=interfaces, + resolve_type=resolve_type, + ) + + def create_inputobjecttype(self, graphene_type): + return GrapheneInputObjectType( + graphene_type=graphene_type, + name=graphene_type._meta.name, + description=graphene_type._meta.description, + out_type=graphene_type._meta.container, + fields=partial( + self.create_fields_for_type, graphene_type, is_input_type=True + ), + ) + + def construct_union(self, graphene_type): + create_graphql_type = self.add_type + + def types(): + union_types = [] + for graphene_objecttype in graphene_type._meta.types: + object_type = create_graphql_type(graphene_objecttype) + assert object_type.graphene_type == graphene_objecttype + union_types.append(object_type) + return union_types + + resolve_type = ( + partial( + self.resolve_type, graphene_type.resolve_type, graphene_type._meta.name + ) + if graphene_type.resolve_type + else None + ) + + return GrapheneUnionType( + graphene_type=graphene_type, + name=graphene_type._meta.name, + description=graphene_type._meta.description, + types=types, + resolve_type=resolve_type, + ) + + def get_name(self, name): + if self.auto_camelcase: + return to_camel_case(name) + return name + + def create_fields_for_type(self, graphene_type, is_input_type=False): + create_graphql_type = self.add_type + + fields = {} + for name, field in graphene_type._meta.fields.items(): + if isinstance(field, Dynamic): + field = get_field_as(field.get_type(self), _as=Field) + if not field: + continue + field_type = create_graphql_type(field.type) + if is_input_type: + _field = GraphQLInputField( + field_type, + default_value=field.default_value, + out_name=name, + description=field.description, + deprecation_reason=field.deprecation_reason, + ) + else: + args = {} + for arg_name, arg in field.args.items(): + arg_type = create_graphql_type(arg.type) + processed_arg_name = arg.name or self.get_name(arg_name) + args[processed_arg_name] = GraphQLArgument( + arg_type, + out_name=arg_name, + description=arg.description, + default_value=arg.default_value, + deprecation_reason=arg.deprecation_reason, + ) + subscribe = field.wrap_subscribe( + self.get_function_for_type( + graphene_type, f"subscribe_{name}", name, field.default_value + ) + ) + + # If we are in a subscription, we use (by default) an + # identity-based resolver for the root, rather than the + # default resolver for objects/dicts. + if subscribe: + field_default_resolver = identity_resolve + elif issubclass(graphene_type, ObjectType): + default_resolver = ( + graphene_type._meta.default_resolver or get_default_resolver() + ) + field_default_resolver = partial( + default_resolver, name, field.default_value + ) + else: + field_default_resolver = None + + resolve = field.wrap_resolve( + self.get_function_for_type( + graphene_type, f"resolve_{name}", name, field.default_value + ) + or field_default_resolver + ) + + _field = GraphQLField( + field_type, + args=args, + resolve=resolve, + subscribe=subscribe, + deprecation_reason=field.deprecation_reason, + description=field.description, + ) + field_name = field.name or self.get_name(name) + fields[field_name] = _field + return fields + + def get_function_for_type(self, graphene_type, func_name, name, default_value): + """Gets a resolve or subscribe function for a given ObjectType""" + if not issubclass(graphene_type, ObjectType): + return + resolver = getattr(graphene_type, func_name, None) + if not resolver: + # If we don't find the resolver in the ObjectType class, then try to + # find it in each of the interfaces + interface_resolver = None + for interface in graphene_type._meta.interfaces: + if name not in interface._meta.fields: + continue + interface_resolver = getattr(interface, func_name, None) + if interface_resolver: + break + resolver = interface_resolver + + # Only if is not decorated with classmethod + if resolver: + return get_unbound_function(resolver) + + def resolve_type(self, resolve_type_func, type_name, root, info, _type): + type_ = resolve_type_func(root, info) + + if inspect.isclass(type_) and issubclass(type_, ObjectType): + return type_._meta.name + + return_type = self[type_name] + return default_type_resolver(root, info, return_type) + + +class Schema: + """Schema Definition. + A Graphene Schema can execute operations (query, mutation, subscription) against the defined + types. For advanced purposes, the schema can be used to lookup type definitions and answer + questions about the types through introspection. + Args: + query (Type[ObjectType]): Root query *ObjectType*. Describes entry point for fields to *read* + data in your Schema. + mutation (Optional[Type[ObjectType]]): Root mutation *ObjectType*. Describes entry point for + fields to *create, update or delete* data in your API. + subscription (Optional[Type[ObjectType]]): Root subscription *ObjectType*. Describes entry point + for fields to receive continuous updates. + types (Optional[List[Type[ObjectType]]]): List of any types to include in schema that + may not be introspected through root types. + directives (List[GraphQLDirective], optional): List of custom directives to include in the + GraphQL schema. Defaults to only include directives defined by GraphQL spec (@include + and @skip) [GraphQLIncludeDirective, GraphQLSkipDirective]. + auto_camelcase (bool): Fieldnames will be transformed in Schema's TypeMap from snake_case + to camelCase (preferred by GraphQL standard). Default True. + """ + + def __init__( + self, + query=None, + mutation=None, + subscription=None, + types=None, + directives=None, + auto_camelcase=True, + ): + self.query = query + self.mutation = mutation + self.subscription = subscription + type_map = TypeMap( + query, mutation, subscription, types, auto_camelcase=auto_camelcase + ) + self.graphql_schema = GraphQLSchema( + type_map.query, + type_map.mutation, + type_map.subscription, + type_map.types, + directives, + ) + + def __str__(self): + return print_schema(self.graphql_schema) + + def __getattr__(self, type_name): + """ + This function let the developer select a type in a given schema + by accessing its attrs. + Example: using schema.Query for accessing the "Query" type in the Schema + """ + _type = self.graphql_schema.get_type(type_name) + if _type is None: + raise AttributeError(f'Type "{type_name}" not found in the Schema') + if isinstance(_type, GrapheneGraphQLType): + return _type.graphene_type + return _type + + def lazy(self, _type): + return lambda: self.get_type(_type) + + def execute(self, *args, **kwargs): + """Execute a GraphQL query on the schema. + Use the `graphql_sync` function from `graphql-core` to provide the result + for a query string. Most of the time this method will be called by one of the Graphene + :ref:`Integrations` via a web request. + Args: + request_string (str or Document): GraphQL request (query, mutation or subscription) + as string or parsed AST form from `graphql-core`. + root_value (Any, optional): Value to use as the parent value object when resolving + root types. + context_value (Any, optional): Value to be made available to all resolvers via + `info.context`. Can be used to share authorization, dataloaders or other + information needed to resolve an operation. + variable_values (dict, optional): If variables are used in the request string, they can + be provided in dictionary form mapping the variable name to the variable value. + operation_name (str, optional): If multiple operations are provided in the + request_string, an operation name must be provided for the result to be provided. + middleware (List[SupportsGraphQLMiddleware]): Supply request level middleware as + defined in `graphql-core`. + execution_context_class (ExecutionContext, optional): The execution context class + to use when resolving queries and mutations. + Returns: + :obj:`ExecutionResult` containing any data and errors for the operation. + """ + kwargs = normalize_execute_kwargs(kwargs) + return graphql_sync(self.graphql_schema, *args, **kwargs) + + async def execute_async(self, *args, **kwargs): + """Execute a GraphQL query on the schema asynchronously. + Same as `execute`, but uses `graphql` instead of `graphql_sync`. + """ + kwargs = normalize_execute_kwargs(kwargs) + return await graphql(self.graphql_schema, *args, **kwargs) + + async def subscribe(self, query, *args, **kwargs): + """Execute a GraphQL subscription on the schema asynchronously.""" + # Do parsing + try: + document = parse(query) + except GraphQLError as error: + return ExecutionResult(data=None, errors=[error]) + + # Do validation + validation_errors = validate(self.graphql_schema, document) + if validation_errors: + return ExecutionResult(data=None, errors=validation_errors) + + # Execute the query + kwargs = normalize_execute_kwargs(kwargs) + return await subscribe(self.graphql_schema, document, *args, **kwargs) + + def introspect(self): + introspection = self.execute(introspection_query) + if introspection.errors: + raise introspection.errors[0] + return introspection.data + + +def normalize_execute_kwargs(kwargs): + """Replace alias names in keyword arguments for graphql()""" + if "root" in kwargs and "root_value" not in kwargs: + kwargs["root_value"] = kwargs.pop("root") + if "context" in kwargs and "context_value" not in kwargs: + kwargs["context_value"] = kwargs.pop("context") + if "variables" in kwargs and "variable_values" not in kwargs: + kwargs["variable_values"] = kwargs.pop("variables") + if "operation" in kwargs and "operation_name" not in kwargs: + kwargs["operation_name"] = kwargs.pop("operation") + return kwargs diff --git a/testbed/graphql-python__graphene/graphene/types/structures.py b/testbed/graphql-python__graphene/graphene/types/structures.py new file mode 100644 index 0000000000000000000000000000000000000000..a6763978e963cbc9cd6e82a550e57e3154cdcd15 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/structures.py @@ -0,0 +1,99 @@ +from .unmountedtype import UnmountedType +from .utils import get_type + + +class Structure(UnmountedType): + """ + A structure is a GraphQL type instance that + wraps a main type with certain structure. + """ + + def __init__(self, of_type, *args, **kwargs): + super(Structure, self).__init__(*args, **kwargs) + if not isinstance(of_type, Structure) and isinstance(of_type, UnmountedType): + cls_name = type(self).__name__ + of_type_name = type(of_type).__name__ + raise Exception( + f"{cls_name} could not have a mounted {of_type_name}()" + f" as inner type. Try with {cls_name}({of_type_name})." + ) + self._of_type = of_type + + @property + def of_type(self): + return get_type(self._of_type) + + def get_type(self): + """ + This function is called when the unmounted type (List or NonNull instance) + is mounted (as a Field, InputField or Argument) + """ + return self + + +class List(Structure): + """ + List Modifier + + A list is a kind of type marker, a wrapping type which points to another + type. Lists are often created within the context of defining the fields of + an object type. + + List indicates that many values will be returned (or input) for this field. + + .. code:: python + + from graphene import List, String + + field_name = List(String, description="There will be many values") + """ + + def __str__(self): + return f"[{self.of_type}]" + + def __eq__(self, other): + return isinstance(other, List) and ( + self.of_type == other.of_type + and self.args == other.args + and self.kwargs == other.kwargs + ) + + +class NonNull(Structure): + """ + Non-Null Modifier + + A non-null is a kind of type marker, a wrapping type which points to another + type. Non-null types enforce that their values are never null and can ensure + an error is raised if this ever occurs during a request. It is useful for + fields which you can make a strong guarantee on non-nullability, for example + usually the id field of a database row will never be null. + + Note: the enforcement of non-nullability occurs within the executor. + + NonNull can also be indicated on all Mounted types with the keyword argument ``required``. + + .. code:: python + + from graphene import NonNull, String + + field_name = NonNull(String, description='This field will not be null') + another_field = String(required=True, description='This is equivalent to the above') + + """ + + def __init__(self, *args, **kwargs): + super(NonNull, self).__init__(*args, **kwargs) + assert not isinstance( + self._of_type, NonNull + ), f"Can only create NonNull of a Nullable GraphQLType but got: {self._of_type}." + + def __str__(self): + return f"{self.of_type}!" + + def __eq__(self, other): + return isinstance(other, NonNull) and ( + self.of_type == other.of_type + and self.args == other.args + and self.kwargs == other.kwargs + ) diff --git a/testbed/graphql-python__graphene/graphene/types/tests/__init__.py b/testbed/graphql-python__graphene/graphene/types/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_argument.py b/testbed/graphql-python__graphene/graphene/types/tests/test_argument.py new file mode 100644 index 0000000000000000000000000000000000000000..c5521b6c25a7b487411e68c39537cb23fcb0ba2e --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_argument.py @@ -0,0 +1,112 @@ +from functools import partial + +from pytest import raises + +from ..argument import Argument, to_arguments +from ..field import Field +from ..inputfield import InputField +from ..scalars import String +from ..structures import NonNull + + +def test_argument(): + arg = Argument(String, default_value="a", description="desc", name="b") + assert arg.type == String + assert arg.default_value == "a" + assert arg.description == "desc" + assert arg.name == "b" + + +def test_argument_comparasion(): + arg1 = Argument( + String, + name="Hey", + description="Desc", + default_value="default", + deprecation_reason="deprecated", + ) + arg2 = Argument( + String, + name="Hey", + description="Desc", + default_value="default", + deprecation_reason="deprecated", + ) + + assert arg1 == arg2 + assert arg1 != String() + + +def test_argument_required(): + arg = Argument(String, required=True) + assert arg.type == NonNull(String) + + +def test_to_arguments(): + args = {"arg_string": Argument(String), "unmounted_arg": String(required=True)} + + my_args = to_arguments(args) + assert my_args == { + "arg_string": Argument(String), + "unmounted_arg": Argument(String, required=True), + } + + +def test_to_arguments_deprecated(): + args = {"unmounted_arg": String(required=False, deprecation_reason="deprecated")} + + my_args = to_arguments(args) + assert my_args == { + "unmounted_arg": Argument( + String, required=False, deprecation_reason="deprecated" + ), + } + + +def test_to_arguments_required_deprecated(): + args = { + "unmounted_arg": String( + required=True, name="arg", deprecation_reason="deprecated" + ) + } + + with raises(AssertionError) as exc_info: + to_arguments(args) + + assert str(exc_info.value) == "Argument arg is required, cannot deprecate it." + + +def test_to_arguments_raises_if_field(): + args = {"arg_string": Field(String)} + + with raises(ValueError) as exc_info: + to_arguments(args) + + assert str(exc_info.value) == ( + "Expected arg_string to be Argument, but received Field. Try using " + "Argument(String)." + ) + + +def test_to_arguments_raises_if_inputfield(): + args = {"arg_string": InputField(String)} + + with raises(ValueError) as exc_info: + to_arguments(args) + + assert str(exc_info.value) == ( + "Expected arg_string to be Argument, but received InputField. Try " + "using Argument(String)." + ) + + +def test_argument_with_lazy_type(): + MyType = object() + arg = Argument(lambda: MyType) + assert arg.type == MyType + + +def test_argument_with_lazy_partial_type(): + MyType = object() + arg = Argument(partial(lambda: MyType)) + assert arg.type == MyType diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_base.py b/testbed/graphql-python__graphene/graphene/types/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..2f70903cacb809d98eafa9505423516b024a0110 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_base.py @@ -0,0 +1,63 @@ +from ..base import BaseOptions, BaseType + + +class CustomOptions(BaseOptions): + pass + + +class CustomType(BaseType): + @classmethod + def __init_subclass_with_meta__(cls, **options): + _meta = CustomOptions(cls) + super(CustomType, cls).__init_subclass_with_meta__(_meta=_meta, **options) + + +def test_basetype(): + class MyBaseType(CustomType): + pass + + assert isinstance(MyBaseType._meta, CustomOptions) + assert MyBaseType._meta.name == "MyBaseType" + assert MyBaseType._meta.description is None + + +def test_basetype_nones(): + class MyBaseType(CustomType): + """Documentation""" + + class Meta: + name = None + description = None + + assert isinstance(MyBaseType._meta, CustomOptions) + assert MyBaseType._meta.name == "MyBaseType" + assert MyBaseType._meta.description == "Documentation" + + +def test_basetype_custom(): + class MyBaseType(CustomType): + """Documentation""" + + class Meta: + name = "Base" + description = "Desc" + + assert isinstance(MyBaseType._meta, CustomOptions) + assert MyBaseType._meta.name == "Base" + assert MyBaseType._meta.description == "Desc" + + +def test_basetype_create(): + MyBaseType = CustomType.create_type("MyBaseType") + + assert isinstance(MyBaseType._meta, CustomOptions) + assert MyBaseType._meta.name == "MyBaseType" + assert MyBaseType._meta.description is None + + +def test_basetype_create_extra(): + MyBaseType = CustomType.create_type("MyBaseType", name="Base", description="Desc") + + assert isinstance(MyBaseType._meta, CustomOptions) + assert MyBaseType._meta.name == "Base" + assert MyBaseType._meta.description == "Desc" diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_base64.py b/testbed/graphql-python__graphene/graphene/types/tests/test_base64.py new file mode 100644 index 0000000000000000000000000000000000000000..433f63c340817fc74476ef95ad84cb44468e5aeb --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_base64.py @@ -0,0 +1,97 @@ +import base64 + +from graphql import GraphQLError + +from ..objecttype import ObjectType +from ..scalars import String +from ..schema import Schema +from ..base64 import Base64 + + +class Query(ObjectType): + base64 = Base64(_in=Base64(name="input"), _match=String(name="match")) + bytes_as_base64 = Base64() + string_as_base64 = Base64() + number_as_base64 = Base64() + + def resolve_base64(self, info, _in=None, _match=None): + if _match: + assert _in == _match + return _in + + def resolve_bytes_as_base64(self, info): + return b"Hello world" + + def resolve_string_as_base64(self, info): + return "Spam and eggs" + + def resolve_number_as_base64(self, info): + return 42 + + +schema = Schema(query=Query) + + +def test_base64_query(): + base64_value = base64.b64encode(b"Random string").decode("utf-8") + result = schema.execute( + """{{ base64(input: "{}", match: "Random string") }}""".format(base64_value) + ) + assert not result.errors + assert result.data == {"base64": base64_value} + + +def test_base64_query_with_variable(): + base64_value = base64.b64encode(b"Another string").decode("utf-8") + + # test datetime variable in string representation + result = schema.execute( + """ + query GetBase64($base64: Base64) { + base64(input: $base64, match: "Another string") + } + """, + variables={"base64": base64_value}, + ) + assert not result.errors + assert result.data == {"base64": base64_value} + + +def test_base64_query_none(): + result = schema.execute("""{ base64 }""") + assert not result.errors + assert result.data == {"base64": None} + + +def test_base64_query_invalid(): + bad_inputs = [dict(), 123, "This is not valid base64"] + + for input_ in bad_inputs: + result = schema.execute( + """{ base64(input: $input) }""", variables={"input": input_} + ) + assert isinstance(result.errors, list) + assert len(result.errors) == 1 + assert isinstance(result.errors[0], GraphQLError) + assert result.data is None + + +def test_base64_from_bytes(): + base64_value = base64.b64encode(b"Hello world").decode("utf-8") + result = schema.execute("""{ bytesAsBase64 }""") + assert not result.errors + assert result.data == {"bytesAsBase64": base64_value} + + +def test_base64_from_string(): + base64_value = base64.b64encode(b"Spam and eggs").decode("utf-8") + result = schema.execute("""{ stringAsBase64 }""") + assert not result.errors + assert result.data == {"stringAsBase64": base64_value} + + +def test_base64_from_number(): + base64_value = base64.b64encode(b"42").decode("utf-8") + result = schema.execute("""{ numberAsBase64 }""") + assert not result.errors + assert result.data == {"numberAsBase64": base64_value} diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_datetime.py b/testbed/graphql-python__graphene/graphene/types/tests/test_datetime.py new file mode 100644 index 0000000000000000000000000000000000000000..74f88bd883ac98dd87879b7d34f8440de426d24d --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_datetime.py @@ -0,0 +1,263 @@ +import datetime + +import pytz +from graphql import GraphQLError + +from pytest import fixture + +from ..datetime import Date, DateTime, Time +from ..objecttype import ObjectType +from ..schema import Schema + + +class Query(ObjectType): + datetime = DateTime(_in=DateTime(name="in")) + date = Date(_in=Date(name="in")) + time = Time(_at=Time(name="at")) + + def resolve_datetime(self, info, _in=None): + return _in + + def resolve_date(self, info, _in=None): + return _in + + def resolve_time(self, info, _at=None): + return _at + + +schema = Schema(query=Query) + + +@fixture +def sample_datetime(): + utc_datetime = datetime.datetime(2019, 5, 25, 5, 30, 15, 10, pytz.utc) + return utc_datetime + + +@fixture +def sample_time(sample_datetime): + time = datetime.time( + sample_datetime.hour, + sample_datetime.minute, + sample_datetime.second, + sample_datetime.microsecond, + sample_datetime.tzinfo, + ) + return time + + +@fixture +def sample_date(sample_datetime): + date = sample_datetime.date() + return date + + +def test_datetime_query(sample_datetime): + isoformat = sample_datetime.isoformat() + + result = schema.execute("""{ datetime(in: "%s") }""" % isoformat) + assert not result.errors + assert result.data == {"datetime": isoformat} + + +def test_datetime_query_with_variables(sample_datetime): + isoformat = sample_datetime.isoformat() + + result = schema.execute( + """ + query GetDate($datetime: DateTime) { + literal: datetime(in: "%s") + value: datetime(in: $datetime) + } + """ + % isoformat, + variable_values={"datetime": isoformat}, + ) + assert not result.errors + assert result.data == {"literal": isoformat, "value": isoformat} + + +def test_date_query(sample_date): + isoformat = sample_date.isoformat() + + result = schema.execute("""{ date(in: "%s") }""" % isoformat) + assert not result.errors + assert result.data == {"date": isoformat} + + +def test_date_query_with_variables(sample_date): + isoformat = sample_date.isoformat() + + result = schema.execute( + """ + query GetDate($date: Date) { + literal: date(in: "%s") + value: date(in: $date) + } + """ + % isoformat, + variable_values={"date": isoformat}, + ) + assert not result.errors + assert result.data == {"literal": isoformat, "value": isoformat} + + +def test_time_query(sample_time): + isoformat = sample_time.isoformat() + + result = schema.execute("""{ time(at: "%s") }""" % isoformat) + assert not result.errors + assert result.data == {"time": isoformat} + + +def test_time_query_with_variables(sample_time): + isoformat = sample_time.isoformat() + + result = schema.execute( + """ + query GetTime($time: Time) { + literal: time(at: "%s") + value: time(at: $time) + } + """ + % isoformat, + variable_values={"time": isoformat}, + ) + assert not result.errors + assert result.data == {"literal": isoformat, "value": isoformat} + + +def test_bad_datetime_query(): + not_a_date = "Some string that's not a datetime" + + result = schema.execute("""{ datetime(in: "%s") }""" % not_a_date) + + assert result.errors and len(result.errors) == 1 + error = result.errors[0] + assert isinstance(error, GraphQLError) + assert ( + error.message == "DateTime cannot represent value:" + ' "Some string that\'s not a datetime"' + ) + assert result.data is None + + +def test_bad_date_query(): + not_a_date = "Some string that's not a date" + + result = schema.execute("""{ date(in: "%s") }""" % not_a_date) + + error = result.errors[0] + assert isinstance(error, GraphQLError) + assert ( + error.message == "Date cannot represent value:" + ' "Some string that\'s not a date"' + ) + assert result.data is None + + +def test_bad_time_query(): + not_a_date = "Some string that's not a time" + + result = schema.execute("""{ time(at: "%s") }""" % not_a_date) + + error = result.errors[0] + assert isinstance(error, GraphQLError) + assert ( + error.message == "Time cannot represent value:" + ' "Some string that\'s not a time"' + ) + assert result.data is None + + +def test_datetime_query_variable(sample_datetime): + isoformat = sample_datetime.isoformat() + + # test datetime variable provided as Python datetime + result = schema.execute( + """query Test($date: DateTime){ datetime(in: $date) }""", + variables={"date": sample_datetime}, + ) + assert not result.errors + assert result.data == {"datetime": isoformat} + + # test datetime variable in string representation + result = schema.execute( + """query Test($date: DateTime){ datetime(in: $date) }""", + variables={"date": isoformat}, + ) + assert not result.errors + assert result.data == {"datetime": isoformat} + + +def test_date_query_variable(sample_date): + isoformat = sample_date.isoformat() + + # test date variable provided as Python date + result = schema.execute( + """query Test($date: Date){ date(in: $date) }""", + variables={"date": sample_date}, + ) + assert not result.errors + assert result.data == {"date": isoformat} + + # test date variable in string representation + result = schema.execute( + """query Test($date: Date){ date(in: $date) }""", variables={"date": isoformat} + ) + assert not result.errors + assert result.data == {"date": isoformat} + + +def test_time_query_variable(sample_time): + isoformat = sample_time.isoformat() + + # test time variable provided as Python time + result = schema.execute( + """query Test($time: Time){ time(at: $time) }""", + variables={"time": sample_time}, + ) + assert not result.errors + assert result.data == {"time": isoformat} + + # test time variable in string representation + result = schema.execute( + """query Test($time: Time){ time(at: $time) }""", variables={"time": isoformat} + ) + assert not result.errors + assert result.data == {"time": isoformat} + + +def test_bad_variables(sample_date, sample_datetime, sample_time): + def _test_bad_variables(type_, input_): + result = schema.execute( + f"""query Test($input: {type_}){{ {type_.lower()}(in: $input) }}""", + variables={"input": input_}, + ) + assert isinstance(result.errors, list) + assert len(result.errors) == 1 + assert isinstance(result.errors[0], GraphQLError) + assert result.data is None + + not_a_date = dict() + not_a_date_str = "Some string that's not a date" + today = sample_date + now = sample_datetime + time = sample_time + + bad_pairs = [ + ("DateTime", not_a_date), + ("DateTime", not_a_date_str), + ("DateTime", today), + ("DateTime", time), + ("Date", not_a_date), + ("Date", not_a_date_str), + ("Date", time), + ("Time", not_a_date), + ("Time", not_a_date_str), + ("Time", now), + ("Time", today), + ] + + for type_, input_ in bad_pairs: + _test_bad_variables(type_, input_) diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_decimal.py b/testbed/graphql-python__graphene/graphene/types/tests/test_decimal.py new file mode 100644 index 0000000000000000000000000000000000000000..1ba48bd1d224475d652210e7a28dfebd9e0d2950 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_decimal.py @@ -0,0 +1,68 @@ +import decimal + +from ..decimal import Decimal +from ..objecttype import ObjectType +from ..schema import Schema + + +class Query(ObjectType): + decimal = Decimal(input=Decimal()) + + def resolve_decimal(self, info, input): + return input + + +schema = Schema(query=Query) + + +def test_decimal_string_query(): + decimal_value = decimal.Decimal("1969.1974") + result = schema.execute("""{ decimal(input: "%s") }""" % decimal_value) + assert not result.errors + assert result.data == {"decimal": str(decimal_value)} + assert decimal.Decimal(result.data["decimal"]) == decimal_value + + +def test_decimal_string_query_variable(): + decimal_value = decimal.Decimal("1969.1974") + + result = schema.execute( + """query Test($decimal: Decimal){ decimal(input: $decimal) }""", + variables={"decimal": decimal_value}, + ) + assert not result.errors + assert result.data == {"decimal": str(decimal_value)} + assert decimal.Decimal(result.data["decimal"]) == decimal_value + + +def test_bad_decimal_query(): + not_a_decimal = "Nobody expects the Spanish Inquisition!" + + result = schema.execute("""{ decimal(input: "%s") }""" % not_a_decimal) + assert result.errors + assert len(result.errors) == 1 + assert result.data is None + assert ( + result.errors[0].message + == "Expected value of type 'Decimal', found \"Nobody expects the Spanish Inquisition!\"." + ) + + result = schema.execute("{ decimal(input: true) }") + assert result.errors + assert len(result.errors) == 1 + assert result.data is None + assert result.errors[0].message == "Expected value of type 'Decimal', found true." + + result = schema.execute("{ decimal(input: 1.2) }") + assert result.errors + assert len(result.errors) == 1 + assert result.data is None + assert result.errors[0].message == "Expected value of type 'Decimal', found 1.2." + + +def test_decimal_string_query_integer(): + decimal_value = 1 + result = schema.execute("""{ decimal(input: %s) }""" % decimal_value) + assert not result.errors + assert result.data == {"decimal": str(decimal_value)} + assert decimal.Decimal(result.data["decimal"]) == decimal_value diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_definition.py b/testbed/graphql-python__graphene/graphene/types/tests/test_definition.py new file mode 100644 index 0000000000000000000000000000000000000000..898fac71b69c673e9734aecb79888106a1d8e678 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_definition.py @@ -0,0 +1,330 @@ +import copy + +from ..argument import Argument +from ..definitions import GrapheneGraphQLType +from ..enum import Enum +from ..field import Field +from ..inputfield import InputField +from ..inputobjecttype import InputObjectType +from ..interface import Interface +from ..objecttype import ObjectType +from ..scalars import Boolean, Int, String +from ..schema import Schema +from ..structures import List, NonNull +from ..union import Union + + +class Image(ObjectType): + url = String() + width = Int() + height = Int() + + +class Author(ObjectType): + id = String() + name = String() + pic = Field(Image, width=Int(), height=Int()) + recent_article = Field(lambda: Article) + + +class Article(ObjectType): + id = String() + is_published = Boolean() + author = Field(Author) + title = String() + body = String() + + +class Query(ObjectType): + article = Field(Article, id=String()) + feed = List(Article) + + +class Mutation(ObjectType): + write_article = Field(Article) + + +class Subscription(ObjectType): + article_subscribe = Field(Article, id=String()) + + +class MyObjectType(ObjectType): + pass + + +class MyInterface(Interface): + pass + + +class MyUnion(Union): + class Meta: + types = (Article,) + + +class MyEnum(Enum): + foo = "foo" + + +class MyInputObjectType(InputObjectType): + pass + + +def test_defines_a_query_only_schema(): + blog_schema = Schema(Query) + + assert blog_schema.query == Query + assert blog_schema.graphql_schema.query_type.graphene_type == Query + + article_field = Query._meta.fields["article"] + assert article_field.type == Article + assert article_field.type._meta.name == "Article" + + article_field_type = article_field.type + assert issubclass(article_field_type, ObjectType) + + title_field = article_field_type._meta.fields["title"] + assert title_field.type == String + + author_field = article_field_type._meta.fields["author"] + author_field_type = author_field.type + assert issubclass(author_field_type, ObjectType) + recent_article_field = author_field_type._meta.fields["recent_article"] + + assert recent_article_field.type == Article + + feed_field = Query._meta.fields["feed"] + assert feed_field.type.of_type == Article + + +def test_defines_a_mutation_schema(): + blog_schema = Schema(Query, mutation=Mutation) + + assert blog_schema.mutation == Mutation + assert blog_schema.graphql_schema.mutation_type.graphene_type == Mutation + + write_mutation = Mutation._meta.fields["write_article"] + assert write_mutation.type == Article + assert write_mutation.type._meta.name == "Article" + + +def test_defines_a_subscription_schema(): + blog_schema = Schema(Query, subscription=Subscription) + + assert blog_schema.subscription == Subscription + assert blog_schema.graphql_schema.subscription_type.graphene_type == Subscription + + subscription = Subscription._meta.fields["article_subscribe"] + assert subscription.type == Article + assert subscription.type._meta.name == "Article" + + +def test_includes_nested_input_objects_in_the_map(): + class NestedInputObject(InputObjectType): + value = String() + + class SomeInputObject(InputObjectType): + nested = InputField(NestedInputObject) + + class SomeMutation(Mutation): + mutate_something = Field(Article, input=Argument(SomeInputObject)) + + class SomeSubscription(Mutation): + subscribe_to_something = Field(Article, input=Argument(SomeInputObject)) + + schema = Schema(query=Query, mutation=SomeMutation, subscription=SomeSubscription) + type_map = schema.graphql_schema.type_map + + assert type_map["NestedInputObject"].graphene_type is NestedInputObject + + +def test_includes_interfaces_thunk_subtypes_in_the_type_map(): + class SomeInterface(Interface): + f = Int() + + class SomeSubtype(ObjectType): + class Meta: + interfaces = (SomeInterface,) + + class Query(ObjectType): + iface = Field(lambda: SomeInterface) + + schema = Schema(query=Query, types=[SomeSubtype]) + type_map = schema.graphql_schema.type_map + + assert type_map["SomeSubtype"].graphene_type is SomeSubtype + + +def test_includes_types_in_union(): + class SomeType(ObjectType): + a = String() + + class OtherType(ObjectType): + b = String() + + class MyUnion(Union): + class Meta: + types = (SomeType, OtherType) + + class Query(ObjectType): + union = Field(MyUnion) + + schema = Schema(query=Query) + type_map = schema.graphql_schema.type_map + + assert type_map["OtherType"].graphene_type is OtherType + assert type_map["SomeType"].graphene_type is SomeType + + +def test_maps_enum(): + class SomeType(ObjectType): + a = String() + + class OtherType(ObjectType): + b = String() + + class MyUnion(Union): + class Meta: + types = (SomeType, OtherType) + + class Query(ObjectType): + union = Field(MyUnion) + + schema = Schema(query=Query) + type_map = schema.graphql_schema.type_map + + assert type_map["OtherType"].graphene_type is OtherType + assert type_map["SomeType"].graphene_type is SomeType + + +def test_includes_interfaces_subtypes_in_the_type_map(): + class SomeInterface(Interface): + f = Int() + + class SomeSubtype(ObjectType): + class Meta: + interfaces = (SomeInterface,) + + class Query(ObjectType): + iface = Field(SomeInterface) + + schema = Schema(query=Query, types=[SomeSubtype]) + type_map = schema.graphql_schema.type_map + + assert type_map["SomeSubtype"].graphene_type is SomeSubtype + + +def test_stringifies_simple_types(): + assert str(Int) == "Int" + assert str(Article) == "Article" + assert str(MyInterface) == "MyInterface" + assert str(MyUnion) == "MyUnion" + assert str(MyEnum) == "MyEnum" + assert str(MyInputObjectType) == "MyInputObjectType" + assert str(NonNull(Int)) == "Int!" + assert str(List(Int)) == "[Int]" + assert str(NonNull(List(Int))) == "[Int]!" + assert str(List(NonNull(Int))) == "[Int!]" + assert str(List(List(Int))) == "[[Int]]" + + +# def test_identifies_input_types(): +# expected = ( +# (GraphQLInt, True), +# (ObjectType, False), +# (InterfaceType, False), +# (UnionType, False), +# (EnumType, True), +# (InputObjectType, True) +# ) + +# for type_, answer in expected: +# assert is_input_type(type_) == answer +# assert is_input_type(GraphQLList(type_)) == answer +# assert is_input_type(GraphQLNonNull(type_)) == answer + + +# def test_identifies_output_types(): +# expected = ( +# (GraphQLInt, True), +# (ObjectType, True), +# (InterfaceType, True), +# (UnionType, True), +# (EnumType, True), +# (InputObjectType, False) +# ) + +# for type, answer in expected: +# assert is_output_type(type) == answer +# assert is_output_type(GraphQLList(type)) == answer +# assert is_output_type(GraphQLNonNull(type)) == answer + + +# def test_prohibits_nesting_nonnull_inside_nonnull(): +# with raises(Exception) as excinfo: +# GraphQLNonNull(GraphQLNonNull(GraphQLInt)) + +# assert 'Can only create NonNull of a Nullable GraphQLType but got: Int!.' in str(excinfo.value) + + +# def test_prohibits_putting_non_object_types_in_unions(): +# bad_union_types = [ +# GraphQLInt, +# GraphQLNonNull(GraphQLInt), +# GraphQLList(GraphQLInt), +# InterfaceType, +# UnionType, +# EnumType, +# InputObjectType +# ] +# for x in bad_union_types: +# with raises(Exception) as excinfo: +# GraphQLSchema( +# GraphQLObjectType( +# 'Root', +# fields={ +# 'union': GraphQLField(GraphQLUnionType('BadUnion', [x])) +# } +# ) +# ) + +# assert 'BadUnion may only contain Object types, it cannot contain: ' + str(x) + '.' \ +# == str(excinfo.value) + + +def test_does_not_mutate_passed_field_definitions(): + class CommonFields: + field1 = String() + field2 = String(id=String()) + + class TestObject1(CommonFields, ObjectType): + pass + + class TestObject2(CommonFields, ObjectType): + pass + + assert TestObject1._meta.fields == TestObject2._meta.fields + + class CommonFields: + field1 = String() + field2 = String() + + class TestInputObject1(CommonFields, InputObjectType): + pass + + class TestInputObject2(CommonFields, InputObjectType): + pass + + assert TestInputObject1._meta.fields == TestInputObject2._meta.fields + + +def test_graphene_graphql_type_can_be_copied(): + class Query(ObjectType): + field = String() + + def resolve_field(self, info): + return "" + + schema = Schema(query=Query) + query_type_copy = copy.copy(schema.graphql_schema.query_type) + assert query_type_copy.__dict__ == schema.graphql_schema.query_type.__dict__ + assert isinstance(schema.graphql_schema.query_type, GrapheneGraphQLType) diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_dynamic.py b/testbed/graphql-python__graphene/graphene/types/tests/test_dynamic.py new file mode 100644 index 0000000000000000000000000000000000000000..3b5f7a3b9d2426958d47cc390e152353aec1b35f --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_dynamic.py @@ -0,0 +1,38 @@ +from functools import partial + +from ..dynamic import Dynamic +from ..scalars import String +from ..structures import List, NonNull + + +def test_dynamic(): + dynamic = Dynamic(lambda: String) + assert dynamic.get_type() == String + assert str(dynamic.get_type()) == "String" + + +def test_nonnull(): + dynamic = Dynamic(lambda: NonNull(String)) + assert dynamic.get_type().of_type == String + assert str(dynamic.get_type()) == "String!" + + +def test_list(): + dynamic = Dynamic(lambda: List(String)) + assert dynamic.get_type().of_type == String + assert str(dynamic.get_type()) == "[String]" + + +def test_list_non_null(): + dynamic = Dynamic(lambda: List(NonNull(String))) + assert dynamic.get_type().of_type.of_type == String + assert str(dynamic.get_type()) == "[String!]" + + +def test_partial(): + def __type(_type): + return _type + + dynamic = Dynamic(partial(__type, String)) + assert dynamic.get_type() == String + assert str(dynamic.get_type()) == "String" diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_enum.py b/testbed/graphql-python__graphene/graphene/types/tests/test_enum.py new file mode 100644 index 0000000000000000000000000000000000000000..e6fce66c98ed2afcb4846a3219c27a6ac6e78e96 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_enum.py @@ -0,0 +1,615 @@ +from textwrap import dedent + +from ..argument import Argument +from ..enum import Enum, PyEnum +from ..field import Field +from ..inputfield import InputField +from ..inputobjecttype import InputObjectType +from ..mutation import Mutation +from ..scalars import String +from ..schema import ObjectType, Schema + + +def test_enum_construction(): + class RGB(Enum): + """Description""" + + RED = 1 + GREEN = 2 + BLUE = 3 + + @property + def description(self): + return f"Description {self.name}" + + assert RGB._meta.name == "RGB" + assert RGB._meta.description == "Description" + + values = RGB._meta.enum.__members__.values() + assert sorted(v.name for v in values) == ["BLUE", "GREEN", "RED"] + assert sorted(v.description for v in values) == [ + "Description BLUE", + "Description GREEN", + "Description RED", + ] + + +def test_enum_construction_meta(): + class RGB(Enum): + class Meta: + name = "RGBEnum" + description = "Description" + + RED = 1 + GREEN = 2 + BLUE = 3 + + assert RGB._meta.name == "RGBEnum" + assert RGB._meta.description == "Description" + + +def test_enum_instance_construction(): + RGB = Enum("RGB", "RED,GREEN,BLUE") + + values = RGB._meta.enum.__members__.values() + assert sorted(v.name for v in values) == ["BLUE", "GREEN", "RED"] + + +def test_enum_from_builtin_enum(): + PyRGB = PyEnum("RGB", "RED,GREEN,BLUE") + + RGB = Enum.from_enum(PyRGB) + assert RGB._meta.enum == PyRGB + assert RGB.RED + assert RGB.GREEN + assert RGB.BLUE + + +def test_enum_custom_description_in_constructor(): + description = "An enumeration, but with a custom description" + RGB = Enum( + "RGB", + "RED,GREEN,BLUE", + description=description, + ) + assert RGB._meta.description == description + + +def test_enum_from_python3_enum_uses_default_builtin_doc(): + RGB = Enum("RGB", "RED,GREEN,BLUE") + assert RGB._meta.description == "An enumeration." + + +def test_enum_from_builtin_enum_accepts_lambda_description(): + def custom_description(value): + if not value: + return "StarWars Episodes" + + return "New Hope Episode" if value == Episode.NEWHOPE else "Other" + + def custom_deprecation_reason(value): + return "meh" if value == Episode.NEWHOPE else None + + PyEpisode = PyEnum("PyEpisode", "NEWHOPE,EMPIRE,JEDI") + Episode = Enum.from_enum( + PyEpisode, + description=custom_description, + deprecation_reason=custom_deprecation_reason, + ) + + class Query(ObjectType): + foo = Episode() + + schema = Schema(query=Query).graphql_schema + + episode = schema.get_type("PyEpisode") + + assert episode.description == "StarWars Episodes" + assert [ + (name, value.description, value.deprecation_reason) + for name, value in episode.values.items() + ] == [ + ("NEWHOPE", "New Hope Episode", "meh"), + ("EMPIRE", "Other", None), + ("JEDI", "Other", None), + ] + + +def test_enum_from_python3_enum_uses_enum_doc(): + from enum import Enum as PyEnum + + class Color(PyEnum): + """This is the description""" + + RED = 1 + GREEN = 2 + BLUE = 3 + + RGB = Enum.from_enum(Color) + assert RGB._meta.enum == Color + assert RGB._meta.description == "This is the description" + assert RGB + assert RGB.RED + assert RGB.GREEN + assert RGB.BLUE + + +def test_enum_value_from_class(): + class RGB(Enum): + RED = 1 + GREEN = 2 + BLUE = 3 + + assert RGB.RED.value == 1 + assert RGB.GREEN.value == 2 + assert RGB.BLUE.value == 3 + + +def test_enum_value_as_unmounted_field(): + class RGB(Enum): + RED = 1 + GREEN = 2 + BLUE = 3 + + unmounted = RGB() + unmounted_field = unmounted.Field() + assert isinstance(unmounted_field, Field) + assert unmounted_field.type == RGB + + +def test_enum_value_as_unmounted_inputfield(): + class RGB(Enum): + RED = 1 + GREEN = 2 + BLUE = 3 + + unmounted = RGB() + unmounted_field = unmounted.InputField() + assert isinstance(unmounted_field, InputField) + assert unmounted_field.type == RGB + + +def test_enum_value_as_unmounted_argument(): + class RGB(Enum): + RED = 1 + GREEN = 2 + BLUE = 3 + + unmounted = RGB() + unmounted_field = unmounted.Argument() + assert isinstance(unmounted_field, Argument) + assert unmounted_field.type == RGB + + +def test_enum_can_be_compared(): + class RGB(Enum): + RED = 1 + GREEN = 2 + BLUE = 3 + + assert RGB.RED == 1 + assert RGB.GREEN == 2 + assert RGB.BLUE == 3 + + +def test_enum_can_be_initialized(): + class RGB(Enum): + RED = 1 + GREEN = 2 + BLUE = 3 + + assert RGB.get(1) == RGB.RED + assert RGB.get(2) == RGB.GREEN + assert RGB.get(3) == RGB.BLUE + + +def test_enum_can_retrieve_members(): + class RGB(Enum): + RED = 1 + GREEN = 2 + BLUE = 3 + + assert RGB["RED"] == RGB.RED + assert RGB["GREEN"] == RGB.GREEN + assert RGB["BLUE"] == RGB.BLUE + + +def test_enum_to_enum_comparison_should_differ(): + class RGB1(Enum): + RED = 1 + GREEN = 2 + BLUE = 3 + + class RGB2(Enum): + RED = 1 + GREEN = 2 + BLUE = 3 + + assert RGB1.RED != RGB2.RED + assert RGB1.GREEN != RGB2.GREEN + assert RGB1.BLUE != RGB2.BLUE + + +def test_enum_skip_meta_from_members(): + class RGB1(Enum): + class Meta: + name = "RGB" + + RED = 1 + GREEN = 2 + BLUE = 3 + + assert dict(RGB1._meta.enum.__members__) == { + "RED": RGB1.RED, + "GREEN": RGB1.GREEN, + "BLUE": RGB1.BLUE, + } + + +def test_enum_types(): + from enum import Enum as PyEnum + + class Color(PyEnum): + """Primary colors""" + + RED = 1 + YELLOW = 2 + BLUE = 3 + + GColor = Enum.from_enum(Color) + + class Query(ObjectType): + color = GColor(required=True) + + def resolve_color(_, info): + return Color.RED + + schema = Schema(query=Query) + + assert ( + str(schema).strip() + == dedent( + ''' + type Query { + color: Color! + } + + """Primary colors""" + enum Color { + RED + YELLOW + BLUE + } + ''' + ).strip() + ) + + +def test_enum_resolver(): + from enum import Enum as PyEnum + + class Color(PyEnum): + RED = 1 + GREEN = 2 + BLUE = 3 + + GColor = Enum.from_enum(Color) + + class Query(ObjectType): + color = GColor(required=True) + + def resolve_color(_, info): + return Color.RED + + schema = Schema(query=Query) + + results = schema.execute("query { color }") + assert not results.errors + + assert results.data["color"] == Color.RED.name + + +def test_enum_resolver_compat(): + from enum import Enum as PyEnum + + class Color(PyEnum): + RED = 1 + GREEN = 2 + BLUE = 3 + + GColor = Enum.from_enum(Color) + + class Query(ObjectType): + color = GColor(required=True) + color_by_name = GColor(required=True) + + def resolve_color(_, info): + return Color.RED.value + + def resolve_color_by_name(_, info): + return Color.RED.name + + schema = Schema(query=Query) + + results = schema.execute( + """query { + color + colorByName + }""" + ) + assert not results.errors + + assert results.data["color"] == Color.RED.name + assert results.data["colorByName"] == Color.RED.name + + +def test_enum_with_name(): + from enum import Enum as PyEnum + + class Color(PyEnum): + RED = 1 + YELLOW = 2 + BLUE = 3 + + GColor = Enum.from_enum(Color, description="original colors") + UniqueGColor = Enum.from_enum( + Color, name="UniqueColor", description="unique colors" + ) + + class Query(ObjectType): + color = GColor(required=True) + unique_color = UniqueGColor(required=True) + + schema = Schema(query=Query) + + assert ( + str(schema).strip() + == dedent( + ''' + type Query { + color: Color! + uniqueColor: UniqueColor! + } + + """original colors""" + enum Color { + RED + YELLOW + BLUE + } + + """unique colors""" + enum UniqueColor { + RED + YELLOW + BLUE + } + ''' + ).strip() + ) + + +def test_enum_resolver_invalid(): + from enum import Enum as PyEnum + + class Color(PyEnum): + RED = 1 + GREEN = 2 + BLUE = 3 + + GColor = Enum.from_enum(Color) + + class Query(ObjectType): + color = GColor(required=True) + + def resolve_color(_, info): + return "BLACK" + + schema = Schema(query=Query) + + results = schema.execute("query { color }") + assert results.errors + assert results.errors[0].message == "Enum 'Color' cannot represent value: 'BLACK'" + + +def test_field_enum_argument(): + class Color(Enum): + RED = 1 + GREEN = 2 + BLUE = 3 + + class Brick(ObjectType): + color = Color(required=True) + + color_filter = None + + class Query(ObjectType): + bricks_by_color = Field(Brick, color=Color(required=True)) + + def resolve_bricks_by_color(_, info, color): + nonlocal color_filter + color_filter = color + return Brick(color=color) + + schema = Schema(query=Query) + + results = schema.execute( + """ + query { + bricksByColor(color: RED) { + color + } + } + """ + ) + assert not results.errors + assert results.data == {"bricksByColor": {"color": "RED"}} + assert color_filter == Color.RED + + +def test_mutation_enum_input(): + class RGB(Enum): + """Available colors""" + + RED = 1 + GREEN = 2 + BLUE = 3 + + color_input = None + + class CreatePaint(Mutation): + class Arguments: + color = RGB(required=True) + + color = RGB(required=True) + + def mutate(_, info, color): + nonlocal color_input + color_input = color + return CreatePaint(color=color) + + class MyMutation(ObjectType): + create_paint = CreatePaint.Field() + + class Query(ObjectType): + a = String() + + schema = Schema(query=Query, mutation=MyMutation) + result = schema.execute( + """ mutation MyMutation { + createPaint(color: RED) { + color + } + } + """ + ) + assert not result.errors + assert result.data == {"createPaint": {"color": "RED"}} + + assert color_input == RGB.RED + + +def test_mutation_enum_input_type(): + class RGB(Enum): + """Available colors""" + + RED = 1 + GREEN = 2 + BLUE = 3 + + class ColorInput(InputObjectType): + color = RGB(required=True) + + color_input_value = None + + class CreatePaint(Mutation): + class Arguments: + color_input = ColorInput(required=True) + + color = RGB(required=True) + + def mutate(_, info, color_input): + nonlocal color_input_value + color_input_value = color_input.color + return CreatePaint(color=color_input.color) + + class MyMutation(ObjectType): + create_paint = CreatePaint.Field() + + class Query(ObjectType): + a = String() + + schema = Schema(query=Query, mutation=MyMutation) + result = schema.execute( + """ + mutation MyMutation { + createPaint(colorInput: { color: RED }) { + color + } + } + """ + ) + assert not result.errors + assert result.data == {"createPaint": {"color": "RED"}} + + assert color_input_value == RGB.RED + + +def test_hashable_enum(): + class RGB(Enum): + """Available colors""" + + RED = 1 + GREEN = 2 + BLUE = 3 + + color_map = {RGB.RED: "a", RGB.BLUE: "b", 1: "c"} + + assert color_map[RGB.RED] == "a" + assert color_map[RGB.BLUE] == "b" + assert color_map[1] == "c" + + +def test_hashable_instance_creation_enum(): + Episode = Enum("Episode", [("NEWHOPE", 4), ("EMPIRE", 5), ("JEDI", 6)]) + + trilogy_map = {Episode.NEWHOPE: "better", Episode.EMPIRE: "best", 5: "foo"} + + assert trilogy_map[Episode.NEWHOPE] == "better" + assert trilogy_map[Episode.EMPIRE] == "best" + assert trilogy_map[5] == "foo" + + +def test_enum_iteration(): + class TestEnum(Enum): + FIRST = 1 + SECOND = 2 + + result = [] + expected_values = ["FIRST", "SECOND"] + for c in TestEnum: + result.append(c.name) + assert result == expected_values + + +def test_iterable_instance_creation_enum(): + TestEnum = Enum("TestEnum", [("FIRST", 1), ("SECOND", 2)]) + + result = [] + expected_values = ["FIRST", "SECOND"] + for c in TestEnum: + result.append(c.name) + assert result == expected_values + + +# https://github.com/graphql-python/graphene/issues/1321 +def test_enum_description_member_not_interpreted_as_property(): + class RGB(Enum): + """Description""" + + red = "red" + green = "green" + blue = "blue" + description = "description" + deprecation_reason = "deprecation_reason" + + class Query(ObjectType): + color = RGB() + + def resolve_color(_, info): + return RGB.description + + values = RGB._meta.enum.__members__.values() + assert sorted(v.name for v in values) == [ + "blue", + "deprecation_reason", + "description", + "green", + "red", + ] + + schema = Schema(query=Query) + + results = schema.execute("query { color }") + assert not results.errors + assert results.data["color"] == RGB.description.name diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_field.py b/testbed/graphql-python__graphene/graphene/types/tests/test_field.py new file mode 100644 index 0000000000000000000000000000000000000000..f0401bfae232331c0f626e752c77255f1ba3b6e0 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_field.py @@ -0,0 +1,147 @@ +from functools import partial + +from pytest import raises + +from ..argument import Argument +from ..field import Field +from ..scalars import String +from ..structures import NonNull +from .utils import MyLazyType + + +class MyInstance: + value = "value" + value_func = staticmethod(lambda: "value_func") + + def value_method(self): + return "value_method" + + +def test_field_basic(): + MyType = object() + args = {"my arg": Argument(True)} + + def resolver(): + return None + + deprecation_reason = "Deprecated now" + description = "My Field" + my_default = "something" + field = Field( + MyType, + name="name", + args=args, + resolver=resolver, + description=description, + deprecation_reason=deprecation_reason, + default_value=my_default, + ) + assert field.name == "name" + assert field.args == args + assert field.resolver == resolver + assert field.deprecation_reason == deprecation_reason + assert field.description == description + assert field.default_value == my_default + + +def test_field_required(): + MyType = object() + field = Field(MyType, required=True) + assert isinstance(field.type, NonNull) + assert field.type.of_type == MyType + + +def test_field_default_value_not_callable(): + MyType = object() + try: + Field(MyType, default_value=lambda: True) + except AssertionError as e: + # substring comparison for py 2/3 compatibility + assert "The default value can not be a function but received" in str(e) + + +def test_field_source(): + MyType = object() + field = Field(MyType, source="value") + assert field.resolver(MyInstance(), None) == MyInstance.value + + +def test_field_source_dict_or_attr(): + MyType = object() + field = Field(MyType, source="value") + assert field.resolver(MyInstance(), None) == MyInstance.value + assert field.resolver({"value": MyInstance.value}, None) == MyInstance.value + + +def test_field_with_lazy_type(): + MyType = object() + field = Field(lambda: MyType) + assert field.type == MyType + + +def test_field_with_lazy_partial_type(): + MyType = object() + field = Field(partial(lambda: MyType)) + assert field.type == MyType + + +def test_field_with_string_type(): + field = Field("graphene.types.tests.utils.MyLazyType") + assert field.type == MyLazyType + + +def test_field_not_source_and_resolver(): + MyType = object() + with raises(Exception) as exc_info: + Field(MyType, source="value", resolver=lambda: None) + assert ( + str(exc_info.value) + == "A Field cannot have a source and a resolver in at the same time." + ) + + +def test_field_source_func(): + MyType = object() + field = Field(MyType, source="value_func") + assert field.resolver(MyInstance(), None) == MyInstance.value_func() + + +def test_field_source_method(): + MyType = object() + field = Field(MyType, source="value_method") + assert field.resolver(MyInstance(), None) == MyInstance().value_method() + + +def test_field_source_as_argument(): + MyType = object() + field = Field(MyType, source=String()) + assert "source" in field.args + assert field.args["source"].type == String + + +def test_field_name_as_argument(): + MyType = object() + field = Field(MyType, name=String()) + assert "name" in field.args + assert field.args["name"].type == String + + +def test_field_source_argument_as_kw(): + MyType = object() + deprecation_reason = "deprecated" + field = Field( + MyType, + b=NonNull(True), + c=Argument(None, deprecation_reason=deprecation_reason), + a=NonNull(False), + ) + assert list(field.args) == ["b", "c", "a"] + assert isinstance(field.args["b"], Argument) + assert isinstance(field.args["b"].type, NonNull) + assert field.args["b"].type.of_type is True + assert isinstance(field.args["c"], Argument) + assert field.args["c"].type is None + assert field.args["c"].deprecation_reason == deprecation_reason + assert isinstance(field.args["a"], Argument) + assert isinstance(field.args["a"].type, NonNull) + assert field.args["a"].type.of_type is False diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_generic.py b/testbed/graphql-python__graphene/graphene/types/tests/test_generic.py new file mode 100644 index 0000000000000000000000000000000000000000..338da9823a5c51bc4e5f0985279654db250fb774 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_generic.py @@ -0,0 +1,84 @@ +from ..generic import GenericScalar +from ..objecttype import ObjectType +from ..schema import Schema + + +class Query(ObjectType): + generic = GenericScalar(input=GenericScalar()) + + def resolve_generic(self, info, input=None): + return input + + +schema = Schema(query=Query) + + +def test_generic_query_variable(): + for generic_value in [ + 1, + 1.1, + True, + "str", + [1, 2, 3], + [1.1, 2.2, 3.3], + [True, False], + ["str1", "str2"], + {"key_a": "a", "key_b": "b"}, + { + "int": 1, + "float": 1.1, + "boolean": True, + "string": "str", + "int_list": [1, 2, 3], + "float_list": [1.1, 2.2, 3.3], + "boolean_list": [True, False], + "string_list": ["str1", "str2"], + "nested_dict": {"key_a": "a", "key_b": "b"}, + }, + None, + ]: + result = schema.execute( + """query Test($generic: GenericScalar){ generic(input: $generic) }""", + variables={"generic": generic_value}, + ) + assert not result.errors + assert result.data == {"generic": generic_value} + + +def test_generic_parse_literal_query(): + result = schema.execute( + """ + query { + generic(input: { + int: 1, + float: 1.1 + boolean: true, + string: "str", + int_list: [1, 2, 3], + float_list: [1.1, 2.2, 3.3], + boolean_list: [true, false] + string_list: ["str1", "str2"], + nested_dict: { + key_a: "a", + key_b: "b" + }, + empty_key: undefined + }) + } + """ + ) + assert not result.errors + assert result.data == { + "generic": { + "int": 1, + "float": 1.1, + "boolean": True, + "string": "str", + "int_list": [1, 2, 3], + "float_list": [1.1, 2.2, 3.3], + "boolean_list": [True, False], + "string_list": ["str1", "str2"], + "nested_dict": {"key_a": "a", "key_b": "b"}, + "empty_key": None, + } + } diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_inputfield.py b/testbed/graphql-python__graphene/graphene/types/tests/test_inputfield.py new file mode 100644 index 0000000000000000000000000000000000000000..9b1001286d7e727b26f762adc36b92339753a8f4 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_inputfield.py @@ -0,0 +1,47 @@ +from functools import partial + +from pytest import raises + +from ..inputfield import InputField +from ..structures import NonNull +from .utils import MyLazyType + + +def test_inputfield_required(): + MyType = object() + field = InputField(MyType, required=True) + assert isinstance(field.type, NonNull) + assert field.type.of_type == MyType + + +def test_inputfield_deprecated(): + MyType = object() + deprecation_reason = "deprecated" + field = InputField(MyType, required=False, deprecation_reason=deprecation_reason) + assert isinstance(field.type, type(MyType)) + assert field.deprecation_reason == deprecation_reason + + +def test_inputfield_required_deprecated(): + MyType = object() + with raises(AssertionError) as exc_info: + InputField(MyType, name="input", required=True, deprecation_reason="deprecated") + + assert str(exc_info.value) == "InputField input is required, cannot deprecate it." + + +def test_inputfield_with_lazy_type(): + MyType = object() + field = InputField(lambda: MyType) + assert field.type == MyType + + +def test_inputfield_with_lazy_partial_type(): + MyType = object() + field = InputField(partial(lambda: MyType)) + assert field.type == MyType + + +def test_inputfield_with_string_type(): + field = InputField("graphene.types.tests.utils.MyLazyType") + assert field.type == MyLazyType diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_inputobjecttype.py b/testbed/graphql-python__graphene/graphene/types/tests/test_inputobjecttype.py new file mode 100644 index 0000000000000000000000000000000000000000..0fb7e3945c4e08a01050e7e49e248676191410fc --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_inputobjecttype.py @@ -0,0 +1,138 @@ +from ..argument import Argument +from ..field import Field +from ..inputfield import InputField +from ..inputobjecttype import InputObjectType +from ..objecttype import ObjectType +from ..scalars import Boolean, String +from ..schema import Schema +from ..unmountedtype import UnmountedType + + +class MyType: + pass + + +class MyScalar(UnmountedType): + def get_type(self): + return MyType + + +def test_generate_inputobjecttype(): + class MyInputObjectType(InputObjectType): + """Documentation""" + + assert MyInputObjectType._meta.name == "MyInputObjectType" + assert MyInputObjectType._meta.description == "Documentation" + assert MyInputObjectType._meta.fields == {} + + +def test_generate_inputobjecttype_with_meta(): + class MyInputObjectType(InputObjectType): + class Meta: + name = "MyOtherInputObjectType" + description = "Documentation" + + assert MyInputObjectType._meta.name == "MyOtherInputObjectType" + assert MyInputObjectType._meta.description == "Documentation" + + +def test_generate_inputobjecttype_with_fields(): + class MyInputObjectType(InputObjectType): + field = Field(MyType) + + assert "field" in MyInputObjectType._meta.fields + + +def test_ordered_fields_in_inputobjecttype(): + class MyInputObjectType(InputObjectType): + b = InputField(MyType) + a = InputField(MyType) + field = MyScalar() + asa = InputField(MyType) + + assert list(MyInputObjectType._meta.fields) == ["b", "a", "field", "asa"] + + +def test_generate_inputobjecttype_unmountedtype(): + class MyInputObjectType(InputObjectType): + field = MyScalar(MyType) + + assert "field" in MyInputObjectType._meta.fields + assert isinstance(MyInputObjectType._meta.fields["field"], InputField) + + +def test_generate_inputobjecttype_as_argument(): + class MyInputObjectType(InputObjectType): + field = MyScalar() + + class MyObjectType(ObjectType): + field = Field(MyType, input=MyInputObjectType()) + + assert "field" in MyObjectType._meta.fields + field = MyObjectType._meta.fields["field"] + assert isinstance(field, Field) + assert field.type == MyType + assert "input" in field.args + assert isinstance(field.args["input"], Argument) + assert field.args["input"].type == MyInputObjectType + + +def test_generate_inputobjecttype_inherit_abstracttype(): + class MyAbstractType: + field1 = MyScalar(MyType) + + class MyInputObjectType(InputObjectType, MyAbstractType): + field2 = MyScalar(MyType) + + assert list(MyInputObjectType._meta.fields) == ["field1", "field2"] + assert [type(x) for x in MyInputObjectType._meta.fields.values()] == [ + InputField, + InputField, + ] + + +def test_generate_inputobjecttype_inherit_abstracttype_reversed(): + class MyAbstractType: + field1 = MyScalar(MyType) + + class MyInputObjectType(MyAbstractType, InputObjectType): + field2 = MyScalar(MyType) + + assert list(MyInputObjectType._meta.fields) == ["field1", "field2"] + assert [type(x) for x in MyInputObjectType._meta.fields.values()] == [ + InputField, + InputField, + ] + + +def test_inputobjecttype_of_input(): + class Child(InputObjectType): + first_name = String() + last_name = String() + + @property + def full_name(self): + return f"{self.first_name} {self.last_name}" + + class Parent(InputObjectType): + child = InputField(Child) + + class Query(ObjectType): + is_child = Boolean(parent=Parent()) + + def resolve_is_child(self, info, parent): + return ( + isinstance(parent.child, Child) + and parent.child.full_name == "Peter Griffin" + ) + + schema = Schema(query=Query) + result = schema.execute( + """query basequery { + isChild(parent: {child: {firstName: "Peter", lastName: "Griffin"}}) + } + """ + ) + + assert not result.errors + assert result.data == {"isChild": True} diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_interface.py b/testbed/graphql-python__graphene/graphene/types/tests/test_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..3dd4fc4fbaedd51862ab2263f32af8b637b1957f --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_interface.py @@ -0,0 +1,204 @@ +from ..field import Field +from ..interface import Interface +from ..objecttype import ObjectType +from ..scalars import String +from ..schema import Schema +from ..unmountedtype import UnmountedType + + +class MyType: + pass + + +class MyScalar(UnmountedType): + def get_type(self): + return MyType + + +def test_generate_interface(): + class MyInterface(Interface): + """Documentation""" + + assert MyInterface._meta.name == "MyInterface" + assert MyInterface._meta.description == "Documentation" + assert MyInterface._meta.fields == {} + + +def test_generate_interface_with_meta(): + class MyFirstInterface(Interface): + pass + + class MyInterface(Interface): + class Meta: + name = "MyOtherInterface" + description = "Documentation" + interfaces = [MyFirstInterface] + + assert MyInterface._meta.name == "MyOtherInterface" + assert MyInterface._meta.description == "Documentation" + assert MyInterface._meta.interfaces == [MyFirstInterface] + + +def test_generate_interface_with_fields(): + class MyInterface(Interface): + field = Field(MyType) + + assert "field" in MyInterface._meta.fields + + +def test_ordered_fields_in_interface(): + class MyInterface(Interface): + b = Field(MyType) + a = Field(MyType) + field = MyScalar() + asa = Field(MyType) + + assert list(MyInterface._meta.fields) == ["b", "a", "field", "asa"] + + +def test_generate_interface_unmountedtype(): + class MyInterface(Interface): + field = MyScalar() + + assert "field" in MyInterface._meta.fields + assert isinstance(MyInterface._meta.fields["field"], Field) + + +def test_generate_interface_inherit_abstracttype(): + class MyAbstractType: + field1 = MyScalar() + + class MyInterface(Interface, MyAbstractType): + field2 = MyScalar() + + assert list(MyInterface._meta.fields) == ["field1", "field2"] + assert [type(x) for x in MyInterface._meta.fields.values()] == [Field, Field] + + +def test_generate_interface_inherit_interface(): + class MyBaseInterface(Interface): + field1 = MyScalar() + + class MyInterface(MyBaseInterface): + field2 = MyScalar() + + assert MyInterface._meta.name == "MyInterface" + assert list(MyInterface._meta.fields) == ["field1", "field2"] + assert [type(x) for x in MyInterface._meta.fields.values()] == [Field, Field] + + +def test_generate_interface_inherit_abstracttype_reversed(): + class MyAbstractType: + field1 = MyScalar() + + class MyInterface(MyAbstractType, Interface): + field2 = MyScalar() + + assert list(MyInterface._meta.fields) == ["field1", "field2"] + assert [type(x) for x in MyInterface._meta.fields.values()] == [Field, Field] + + +def test_resolve_type_default(): + class MyInterface(Interface): + field2 = String() + + class MyTestType(ObjectType): + class Meta: + interfaces = (MyInterface,) + + class Query(ObjectType): + test = Field(MyInterface) + + def resolve_test(_, info): + return MyTestType() + + schema = Schema(query=Query, types=[MyTestType]) + + result = schema.execute( + """ + query { + test { + __typename + } + } + """ + ) + assert not result.errors + assert result.data == {"test": {"__typename": "MyTestType"}} + + +def test_resolve_type_custom(): + class MyInterface(Interface): + field2 = String() + + @classmethod + def resolve_type(cls, instance, info): + if instance["type"] == 1: + return MyTestType1 + return MyTestType2 + + class MyTestType1(ObjectType): + class Meta: + interfaces = (MyInterface,) + + class MyTestType2(ObjectType): + class Meta: + interfaces = (MyInterface,) + + class Query(ObjectType): + test = Field(MyInterface) + + def resolve_test(_, info): + return {"type": 1} + + schema = Schema(query=Query, types=[MyTestType1, MyTestType2]) + + result = schema.execute( + """ + query { + test { + __typename + } + } + """ + ) + assert not result.errors + assert result.data == {"test": {"__typename": "MyTestType1"}} + + +def test_resolve_type_custom_interferes(): + class MyInterface(Interface): + field2 = String() + type_ = String(name="type") + + def resolve_type_(_, info): + return "foo" + + class MyTestType1(ObjectType): + class Meta: + interfaces = (MyInterface,) + + class MyTestType2(ObjectType): + class Meta: + interfaces = (MyInterface,) + + class Query(ObjectType): + test = Field(MyInterface) + + def resolve_test(_, info): + return MyTestType1() + + schema = Schema(query=Query, types=[MyTestType1, MyTestType2]) + + result = schema.execute( + """ + query { + test { + __typename + type + } + } + """ + ) + assert not result.errors + assert result.data == {"test": {"__typename": "MyTestType1", "type": "foo"}} diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_json.py b/testbed/graphql-python__graphene/graphene/types/tests/test_json.py new file mode 100644 index 0000000000000000000000000000000000000000..bb754b3a0024ea154db8450262dfec8b3a38181a --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_json.py @@ -0,0 +1,85 @@ +from ..json import JSONString +from ..objecttype import ObjectType +from ..schema import Schema + + +class Query(ObjectType): + json = JSONString(input=JSONString()) + + def resolve_json(self, info, input): + return input + + +schema = Schema(query=Query) + + +def test_jsonstring_query(): + json_value = '{"key": "value"}' + + json_value_quoted = json_value.replace('"', '\\"') + result = schema.execute("""{ json(input: "%s") }""" % json_value_quoted) + assert not result.errors + assert result.data == {"json": json_value} + + result = schema.execute("""{ json(input: "{}") }""") + assert not result.errors + assert result.data == {"json": "{}"} + + +def test_jsonstring_query_variable(): + json_value = '{"key": "value"}' + + result = schema.execute( + """query Test($json: JSONString){ json(input: $json) }""", + variables={"json": json_value}, + ) + assert not result.errors + assert result.data == {"json": json_value} + + +def test_jsonstring_optional_uuid_input(): + """ + Test that we can provide a null value to an optional input + """ + result = schema.execute("{ json(input: null) }") + assert not result.errors + assert result.data == {"json": None} + + +def test_jsonstring_invalid_query(): + """ + Test that if an invalid type is provided we get an error + """ + result = schema.execute("{ json(input: 1) }") + assert result.errors + assert len(result.errors) == 1 + assert result.errors[0].message == "Expected value of type 'JSONString', found 1." + + result = schema.execute("{ json(input: {}) }") + assert result.errors + assert len(result.errors) == 1 + assert result.errors[0].message == "Expected value of type 'JSONString', found {}." + + result = schema.execute('{ json(input: "a") }') + assert result.errors + assert len(result.errors) == 1 + assert result.errors[0].message == ( + "Expected value of type 'JSONString', found \"a\"; " + "Badly formed JSONString: Expecting value: line 1 column 1 (char 0)" + ) + + result = schema.execute("""{ json(input: "{\\'key\\': 0}") }""") + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message + == "Syntax Error: Invalid character escape sequence: '\\''." + ) + + result = schema.execute("""{ json(input: "{\\"key\\": 0,}") }""") + assert result.errors + assert len(result.errors) == 1 + assert result.errors[0].message == ( + 'Expected value of type \'JSONString\', found "{\\"key\\": 0,}"; ' + "Badly formed JSONString: Expecting property name enclosed in double quotes: line 1 column 11 (char 10)" + ) diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_mountedtype.py b/testbed/graphql-python__graphene/graphene/types/tests/test_mountedtype.py new file mode 100644 index 0000000000000000000000000000000000000000..b964233e1ff1c9d9d96167e0cb02c5e0de519be5 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_mountedtype.py @@ -0,0 +1,23 @@ +from ..field import Field +from ..scalars import String + + +class CustomField(Field): + def __init__(self, *args, **kwargs): + self.metadata = kwargs.pop("metadata", None) + super(CustomField, self).__init__(*args, **kwargs) + + +def test_mounted_type(): + unmounted = String() + mounted = Field.mounted(unmounted) + assert isinstance(mounted, Field) + assert mounted.type == String + + +def test_mounted_type_custom(): + unmounted = String(metadata={"hey": "yo!"}) + mounted = CustomField.mounted(unmounted) + assert isinstance(mounted, CustomField) + assert mounted.type == String + assert mounted.metadata == {"hey": "yo!"} diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_mutation.py b/testbed/graphql-python__graphene/graphene/types/tests/test_mutation.py new file mode 100644 index 0000000000000000000000000000000000000000..4a7ad3c7c59baee56da99d857fa88deb0be50324 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_mutation.py @@ -0,0 +1,220 @@ +from pytest import raises + +from ..argument import Argument +from ..dynamic import Dynamic +from ..mutation import Mutation +from ..objecttype import ObjectType +from ..scalars import String +from ..schema import Schema +from ..structures import NonNull +from ..interface import Interface + + +class MyType(Interface): + pass + + +def test_generate_mutation_no_args(): + class MyMutation(Mutation): + """Documentation""" + + def mutate(self, info, **args): + return args + + assert issubclass(MyMutation, ObjectType) + assert MyMutation._meta.name == "MyMutation" + assert MyMutation._meta.description == "Documentation" + resolved = MyMutation.Field().resolver(None, None, name="Peter") + assert resolved == {"name": "Peter"} + + +def test_generate_mutation_with_meta(): + class MyMutation(Mutation): + class Meta: + name = "MyOtherMutation" + description = "Documentation" + interfaces = (MyType,) + + def mutate(self, info, **args): + return args + + assert MyMutation._meta.name == "MyOtherMutation" + assert MyMutation._meta.description == "Documentation" + assert MyMutation._meta.interfaces == (MyType,) + resolved = MyMutation.Field().resolver(None, None, name="Peter") + assert resolved == {"name": "Peter"} + + +def test_mutation_raises_exception_if_no_mutate(): + with raises(AssertionError) as excinfo: + + class MyMutation(Mutation): + pass + + assert "All mutations must define a mutate method in it" == str(excinfo.value) + + +def test_mutation_custom_output_type(): + class User(ObjectType): + name = String() + + class CreateUser(Mutation): + class Arguments: + name = String() + + Output = User + + def mutate(self, info, name): + return User(name=name) + + field = CreateUser.Field() + assert field.type == User + assert field.args == {"name": Argument(String)} + resolved = field.resolver(None, None, name="Peter") + assert isinstance(resolved, User) + assert resolved.name == "Peter" + + +def test_mutation_execution(): + class CreateUser(Mutation): + class Arguments: + name = String() + dynamic = Dynamic(lambda: String()) + dynamic_none = Dynamic(lambda: None) + + name = String() + dynamic = Dynamic(lambda: String()) + + def mutate(self, info, name, dynamic): + return CreateUser(name=name, dynamic=dynamic) + + class Query(ObjectType): + a = String() + + class MyMutation(ObjectType): + create_user = CreateUser.Field() + + schema = Schema(query=Query, mutation=MyMutation) + result = schema.execute( + """ mutation mymutation { + createUser(name:"Peter", dynamic: "dynamic") { + name + dynamic + } + } + """ + ) + assert not result.errors + assert result.data == {"createUser": {"name": "Peter", "dynamic": "dynamic"}} + + +def test_mutation_no_fields_output(): + class CreateUser(Mutation): + name = String() + + def mutate(self, info): + return CreateUser() + + class Query(ObjectType): + a = String() + + class MyMutation(ObjectType): + create_user = CreateUser.Field() + + schema = Schema(query=Query, mutation=MyMutation) + result = schema.execute( + """ mutation mymutation { + createUser { + name + } + } + """ + ) + assert not result.errors + assert result.data == {"createUser": {"name": None}} + + +def test_mutation_allow_to_have_custom_args(): + class CreateUser(Mutation): + class Arguments: + name = String() + + name = String() + + def mutate(self, info, name): + return CreateUser(name=name) + + class MyMutation(ObjectType): + create_user = CreateUser.Field( + name="createUser", + description="Create a user", + deprecation_reason="Is deprecated", + required=True, + ) + + field = MyMutation._meta.fields["create_user"] + assert field.name == "createUser" + assert field.description == "Create a user" + assert field.deprecation_reason == "Is deprecated" + assert field.type == NonNull(CreateUser) + + +def test_mutation_default_args_output(): + class CreateUser(Mutation): + """Description.""" + + class Arguments: + name = String() + + name = String() + + def mutate(self, info, name): + return CreateUser(name=name) + + class MyMutation(ObjectType): + create_user = CreateUser.Field() + + field = MyMutation._meta.fields["create_user"] + assert field.name is None + assert field.description == "Description." + assert field.deprecation_reason is None + assert field.type == CreateUser + + +def test_mutation_as_subclass(): + class BaseCreateUser(Mutation): + class Arguments: + name = String() + + name = String() + + def mutate(self, info, **args): + return args + + class CreateUserWithPlanet(BaseCreateUser): + class Arguments(BaseCreateUser.Arguments): + planet = String() + + planet = String() + + def mutate(self, info, **args): + return CreateUserWithPlanet(**args) + + class MyMutation(ObjectType): + create_user_with_planet = CreateUserWithPlanet.Field() + + class Query(ObjectType): + a = String() + + schema = Schema(query=Query, mutation=MyMutation) + result = schema.execute( + """ mutation mymutation { + createUserWithPlanet(name:"Peter", planet: "earth") { + name + planet + } + } + """ + ) + assert not result.errors + assert result.data == {"createUserWithPlanet": {"name": "Peter", "planet": "earth"}} diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_objecttype.py b/testbed/graphql-python__graphene/graphene/types/tests/test_objecttype.py new file mode 100644 index 0000000000000000000000000000000000000000..dece5e8b9de48a58e4525fce391430c81112d8af --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_objecttype.py @@ -0,0 +1,309 @@ +from pytest import raises + +from ..field import Field +from ..interface import Interface +from ..objecttype import ObjectType +from ..scalars import String +from ..schema import Schema +from ..structures import NonNull +from ..unmountedtype import UnmountedType + + +class MyType(Interface): + pass + + +class Container(ObjectType): + field1 = Field(MyType) + field2 = Field(MyType) + + +class MyInterface(Interface): + ifield = Field(MyType) + + +class ContainerWithInterface(ObjectType): + class Meta: + interfaces = (MyInterface,) + + field1 = Field(MyType) + field2 = Field(MyType) + + +class MyScalar(UnmountedType): + def get_type(self): + return MyType + + +def test_generate_objecttype(): + class MyObjectType(ObjectType): + """Documentation""" + + assert MyObjectType._meta.name == "MyObjectType" + assert MyObjectType._meta.description == "Documentation" + assert MyObjectType._meta.interfaces == tuple() + assert MyObjectType._meta.fields == {} + assert ( + repr(MyObjectType) + == ">" + ) + + +def test_generate_objecttype_with_meta(): + class MyObjectType(ObjectType): + class Meta: + name = "MyOtherObjectType" + description = "Documentation" + interfaces = (MyType,) + + assert MyObjectType._meta.name == "MyOtherObjectType" + assert MyObjectType._meta.description == "Documentation" + assert MyObjectType._meta.interfaces == (MyType,) + + +def test_generate_lazy_objecttype(): + class MyObjectType(ObjectType): + example = Field(lambda: InnerObjectType, required=True) + + class InnerObjectType(ObjectType): + field = Field(MyType) + + assert MyObjectType._meta.name == "MyObjectType" + example_field = MyObjectType._meta.fields["example"] + assert isinstance(example_field.type, NonNull) + assert example_field.type.of_type == InnerObjectType + + +def test_generate_objecttype_with_fields(): + class MyObjectType(ObjectType): + field = Field(MyType) + + assert "field" in MyObjectType._meta.fields + + +def test_generate_objecttype_with_private_attributes(): + class MyObjectType(ObjectType): + def __init__(self, _private_state=None, **kwargs): + self._private_state = _private_state + super().__init__(**kwargs) + + _private_state = None + + assert "_private_state" not in MyObjectType._meta.fields + assert hasattr(MyObjectType, "_private_state") + + m = MyObjectType(_private_state="custom") + assert m._private_state == "custom" + + with raises(TypeError): + MyObjectType(_other_private_state="Wrong") + + +def test_ordered_fields_in_objecttype(): + class MyObjectType(ObjectType): + b = Field(MyType) + a = Field(MyType) + field = MyScalar() + asa = Field(MyType) + + assert list(MyObjectType._meta.fields) == ["b", "a", "field", "asa"] + + +def test_generate_objecttype_inherit_abstracttype(): + class MyAbstractType: + field1 = MyScalar() + + class MyObjectType(ObjectType, MyAbstractType): + field2 = MyScalar() + + assert MyObjectType._meta.description is None + assert MyObjectType._meta.interfaces == () + assert MyObjectType._meta.name == "MyObjectType" + assert list(MyObjectType._meta.fields) == ["field1", "field2"] + assert list(map(type, MyObjectType._meta.fields.values())) == [Field, Field] + + +def test_generate_objecttype_inherit_abstracttype_reversed(): + class MyAbstractType: + field1 = MyScalar() + + class MyObjectType(MyAbstractType, ObjectType): + field2 = MyScalar() + + assert MyObjectType._meta.description is None + assert MyObjectType._meta.interfaces == () + assert MyObjectType._meta.name == "MyObjectType" + assert list(MyObjectType._meta.fields) == ["field1", "field2"] + assert list(map(type, MyObjectType._meta.fields.values())) == [Field, Field] + + +def test_generate_objecttype_unmountedtype(): + class MyObjectType(ObjectType): + field = MyScalar() + + assert "field" in MyObjectType._meta.fields + assert isinstance(MyObjectType._meta.fields["field"], Field) + + +def test_parent_container_get_fields(): + assert list(Container._meta.fields) == ["field1", "field2"] + + +def test_parent_container_interface_get_fields(): + assert list(ContainerWithInterface._meta.fields) == ["ifield", "field1", "field2"] + + +def test_objecttype_as_container_only_args(): + container = Container("1", "2") + assert container.field1 == "1" + assert container.field2 == "2" + + +def test_objecttype_repr(): + container = Container("1", "2") + assert repr(container) == "Container(field1='1', field2='2')" + + +def test_objecttype_eq(): + container1 = Container("1", "2") + container2 = Container("1", "2") + container3 = Container("2", "3") + assert container1 == container1 + assert container1 == container2 + assert container2 != container3 + + +def test_objecttype_as_container_args_kwargs(): + container = Container("1", field2="2") + assert container.field1 == "1" + assert container.field2 == "2" + + +def test_objecttype_as_container_few_kwargs(): + container = Container(field2="2") + assert container.field2 == "2" + + +def test_objecttype_as_container_all_kwargs(): + container = Container(field1="1", field2="2") + assert container.field1 == "1" + assert container.field2 == "2" + + +def test_objecttype_as_container_extra_args(): + msg = r"__init__\(\) takes from 1 to 3 positional arguments but 4 were given" + with raises(TypeError, match=msg): + Container("1", "2", "3") # type: ignore + + +def test_objecttype_as_container_invalid_kwargs(): + msg = r"__init__\(\) got an unexpected keyword argument 'unexisting_field'" + with raises(TypeError, match=msg): + Container(unexisting_field="3") # type: ignore + + +def test_objecttype_container_benchmark(benchmark): + @benchmark + def create_objecttype(): + Container(field1="field1", field2="field2") + + +def test_generate_objecttype_description(): + class MyObjectType(ObjectType): + """ + Documentation + + Documentation line 2 + """ + + assert MyObjectType._meta.description == "Documentation\n\nDocumentation line 2" + + +def test_objecttype_with_possible_types(): + class MyObjectType(ObjectType): + class Meta: + possible_types = (dict,) + + assert MyObjectType._meta.possible_types == (dict,) + + +def test_objecttype_with_possible_types_and_is_type_of_should_raise(): + with raises(AssertionError) as excinfo: + + class MyObjectType(ObjectType): + class Meta: + possible_types = (dict,) + + @classmethod + def is_type_of(cls, root, context, info): + return False + + assert str(excinfo.value) == ( + "MyObjectType.Meta.possible_types will cause type collision with " + "MyObjectType.is_type_of. Please use one or other." + ) + + +def test_objecttype_no_fields_output(): + class User(ObjectType): + name = String() + + class Query(ObjectType): + user = Field(User) + + def resolve_user(self, info): + return User() + + schema = Schema(query=Query) + result = schema.execute( + """ query basequery { + user { + name + } + } + """ + ) + assert not result.errors + assert result.data == {"user": {"name": None}} + + +def test_abstract_objecttype_can_str(): + class MyObjectType(ObjectType): + class Meta: + abstract = True + + field = MyScalar() + + assert str(MyObjectType) == "MyObjectType" + + +def test_objecttype_meta_with_annotations(): + class Query(ObjectType): + class Meta: + name: str = "oops" + + hello = String() + + def resolve_hello(self, info): + return "Hello" + + schema = Schema(query=Query) + assert schema is not None + + +def test_objecttype_meta_arguments(): + class MyInterface(Interface): + foo = String() + + class MyType(ObjectType, interfaces=[MyInterface]): + bar = String() + + assert MyType._meta.interfaces == [MyInterface] + assert list(MyType._meta.fields.keys()) == ["foo", "bar"] + + +def test_objecttype_type_name(): + class MyObjectType(ObjectType, name="FooType"): + pass + + assert MyObjectType._meta.name == "FooType" diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_query.py b/testbed/graphql-python__graphene/graphene/types/tests/test_query.py new file mode 100644 index 0000000000000000000000000000000000000000..e117754fe81602a0609b6d556fcc4f0d7d3dcfdf --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_query.py @@ -0,0 +1,499 @@ +import json +from functools import partial + +from graphql import ( + GraphQLError, + GraphQLResolveInfo as ResolveInfo, + Source, + execute, + parse, +) + +from ..context import Context +from ..dynamic import Dynamic +from ..field import Field +from ..inputfield import InputField +from ..inputobjecttype import InputObjectType +from ..interface import Interface +from ..objecttype import ObjectType +from ..scalars import Boolean, Int, String +from ..schema import Schema +from ..structures import List, NonNull +from ..union import Union + + +def test_query(): + class Query(ObjectType): + hello = String(resolver=lambda *_: "World") + + hello_schema = Schema(Query) + + executed = hello_schema.execute("{ hello }") + assert not executed.errors + assert executed.data == {"hello": "World"} + + +def test_query_source(): + class Root: + _hello = "World" + + def hello(self): + return self._hello + + class Query(ObjectType): + hello = String(source="hello") + + hello_schema = Schema(Query) + + executed = hello_schema.execute("{ hello }", Root()) + assert not executed.errors + assert executed.data == {"hello": "World"} + + +def test_query_union(): + class one_object: + pass + + class two_object: + pass + + class One(ObjectType): + one = String() + + @classmethod + def is_type_of(cls, root, info): + return isinstance(root, one_object) + + class Two(ObjectType): + two = String() + + @classmethod + def is_type_of(cls, root, info): + return isinstance(root, two_object) + + class MyUnion(Union): + class Meta: + types = (One, Two) + + class Query(ObjectType): + unions = List(MyUnion) + + def resolve_unions(self, info): + return [one_object(), two_object()] + + hello_schema = Schema(Query) + + executed = hello_schema.execute("{ unions { __typename } }") + assert not executed.errors + assert executed.data == {"unions": [{"__typename": "One"}, {"__typename": "Two"}]} + + +def test_query_interface(): + class one_object: + pass + + class two_object: + pass + + class MyInterface(Interface): + base = String() + + class One(ObjectType): + class Meta: + interfaces = (MyInterface,) + + one = String() + + @classmethod + def is_type_of(cls, root, info): + return isinstance(root, one_object) + + class Two(ObjectType): + class Meta: + interfaces = (MyInterface,) + + two = String() + + @classmethod + def is_type_of(cls, root, info): + return isinstance(root, two_object) + + class Query(ObjectType): + interfaces = List(MyInterface) + + def resolve_interfaces(self, info): + return [one_object(), two_object()] + + hello_schema = Schema(Query, types=[One, Two]) + + executed = hello_schema.execute("{ interfaces { __typename } }") + assert not executed.errors + assert executed.data == { + "interfaces": [{"__typename": "One"}, {"__typename": "Two"}] + } + + +def test_query_dynamic(): + class Query(ObjectType): + hello = Dynamic(lambda: String(resolver=lambda *_: "World")) + hellos = Dynamic(lambda: List(String, resolver=lambda *_: ["Worlds"])) + hello_field = Dynamic(lambda: Field(String, resolver=lambda *_: "Field World")) + + hello_schema = Schema(Query) + + executed = hello_schema.execute("{ hello hellos helloField }") + assert not executed.errors + assert executed.data == { + "hello": "World", + "hellos": ["Worlds"], + "helloField": "Field World", + } + + +def test_query_default_value(): + class MyType(ObjectType): + field = String() + + class Query(ObjectType): + hello = Field(MyType, default_value=MyType(field="something else!")) + + hello_schema = Schema(Query) + + executed = hello_schema.execute("{ hello { field } }") + assert not executed.errors + assert executed.data == {"hello": {"field": "something else!"}} + + +def test_query_wrong_default_value(): + class MyType(ObjectType): + field = String() + + @classmethod + def is_type_of(cls, root, info): + return isinstance(root, MyType) + + class Query(ObjectType): + hello = Field(MyType, default_value="hello") + + hello_schema = Schema(Query) + + executed = hello_schema.execute("{ hello { field } }") + assert len(executed.errors) == 1 + assert ( + executed.errors[0].message + == GraphQLError("Expected value of type 'MyType' but got: 'hello'.").message + ) + assert executed.data == {"hello": None} + + +def test_query_default_value_ignored_by_resolver(): + class MyType(ObjectType): + field = String() + + class Query(ObjectType): + hello = Field( + MyType, + default_value="hello", + resolver=lambda *_: MyType(field="no default."), + ) + + hello_schema = Schema(Query) + + executed = hello_schema.execute("{ hello { field } }") + assert not executed.errors + assert executed.data == {"hello": {"field": "no default."}} + + +def test_query_resolve_function(): + class Query(ObjectType): + hello = String() + + def resolve_hello(self, info): + return "World" + + hello_schema = Schema(Query) + + executed = hello_schema.execute("{ hello }") + assert not executed.errors + assert executed.data == {"hello": "World"} + + +def test_query_arguments(): + class Query(ObjectType): + test = String(a_str=String(), a_int=Int()) + + def resolve_test(self, info, **args): + return json.dumps([self, args], separators=(",", ":")) + + test_schema = Schema(Query) + + result = test_schema.execute("{ test }", None) + assert not result.errors + assert result.data == {"test": "[null,{}]"} + + result = test_schema.execute('{ test(aStr: "String!") }', "Source!") + assert not result.errors + assert result.data == {"test": '["Source!",{"a_str":"String!"}]'} + + result = test_schema.execute('{ test(aInt: -123, aStr: "String!") }', "Source!") + assert not result.errors + assert result.data in [ + {"test": '["Source!",{"a_str":"String!","a_int":-123}]'}, + {"test": '["Source!",{"a_int":-123,"a_str":"String!"}]'}, + ] + + +def test_query_input_field(): + class Input(InputObjectType): + a_field = String() + recursive_field = InputField(lambda: Input) + + class Query(ObjectType): + test = String(a_input=Input()) + + def resolve_test(self, info, **args): + return json.dumps([self, args], separators=(",", ":")) + + test_schema = Schema(Query) + + result = test_schema.execute("{ test }", None) + assert not result.errors + assert result.data == {"test": "[null,{}]"} + + result = test_schema.execute('{ test(aInput: {aField: "String!"} ) }', "Source!") + assert not result.errors + assert result.data == {"test": '["Source!",{"a_input":{"a_field":"String!"}}]'} + + result = test_schema.execute( + '{ test(aInput: {recursiveField: {aField: "String!"}}) }', "Source!" + ) + assert not result.errors + assert result.data == { + "test": '["Source!",{"a_input":{"recursive_field":{"a_field":"String!"}}}]' + } + + +def test_query_middlewares(): + class Query(ObjectType): + hello = String() + other = String() + + def resolve_hello(self, info): + return "World" + + def resolve_other(self, info): + return "other" + + def reversed_middleware(next, *args, **kwargs): + return next(*args, **kwargs)[::-1] + + hello_schema = Schema(Query) + + executed = hello_schema.execute( + "{ hello, other }", middleware=[reversed_middleware] + ) + assert not executed.errors + assert executed.data == {"hello": "dlroW", "other": "rehto"} + + +def test_objecttype_on_instances(): + class Ship: + def __init__(self, name): + self.name = name + + class ShipType(ObjectType): + name = String(description="Ship name", required=True) + + def resolve_name(self, info): + # Here self will be the Ship instance returned in resolve_ship + return self.name + + class Query(ObjectType): + ship = Field(ShipType) + + def resolve_ship(self, info): + return Ship(name="xwing") + + schema = Schema(query=Query) + executed = schema.execute("{ ship { name } }") + assert not executed.errors + assert executed.data == {"ship": {"name": "xwing"}} + + +def test_big_list_query_benchmark(benchmark): + big_list = range(10000) + + class Query(ObjectType): + all_ints = List(Int) + + def resolve_all_ints(self, info): + return big_list + + hello_schema = Schema(Query) + + big_list_query = partial(hello_schema.execute, "{ allInts }") + result = benchmark(big_list_query) + assert not result.errors + assert result.data == {"allInts": list(big_list)} + + +def test_big_list_query_compiled_query_benchmark(benchmark): + big_list = range(100000) + + class Query(ObjectType): + all_ints = List(Int) + + def resolve_all_ints(self, info): + return big_list + + hello_schema = Schema(Query) + graphql_schema = hello_schema.graphql_schema + source = Source("{ allInts }") + query_ast = parse(source) + + big_list_query = partial(execute, graphql_schema, query_ast) + result = benchmark(big_list_query) + assert not result.errors + assert result.data == {"allInts": list(big_list)} + + +def test_big_list_of_containers_query_benchmark(benchmark): + class Container(ObjectType): + x = Int() + + big_container_list = [Container(x=x) for x in range(1000)] + + class Query(ObjectType): + all_containers = List(Container) + + def resolve_all_containers(self, info): + return big_container_list + + hello_schema = Schema(Query) + + big_list_query = partial(hello_schema.execute, "{ allContainers { x } }") + result = benchmark(big_list_query) + assert not result.errors + assert result.data == {"allContainers": [{"x": c.x} for c in big_container_list]} + + +def test_big_list_of_containers_multiple_fields_query_benchmark(benchmark): + class Container(ObjectType): + x = Int() + y = Int() + z = Int() + o = Int() + + big_container_list = [Container(x=x, y=x, z=x, o=x) for x in range(1000)] + + class Query(ObjectType): + all_containers = List(Container) + + def resolve_all_containers(self, info): + return big_container_list + + hello_schema = Schema(Query) + + big_list_query = partial(hello_schema.execute, "{ allContainers { x, y, z, o } }") + result = benchmark(big_list_query) + assert not result.errors + assert result.data == { + "allContainers": [ + {"x": c.x, "y": c.y, "z": c.z, "o": c.o} for c in big_container_list + ] + } + + +def test_big_list_of_containers_multiple_fields_custom_resolvers_query_benchmark( + benchmark, +): + class Container(ObjectType): + x = Int() + y = Int() + z = Int() + o = Int() + + def resolve_x(self, info): + return self.x + + def resolve_y(self, info): + return self.y + + def resolve_z(self, info): + return self.z + + def resolve_o(self, info): + return self.o + + big_container_list = [Container(x=x, y=x, z=x, o=x) for x in range(1000)] + + class Query(ObjectType): + all_containers = List(Container) + + def resolve_all_containers(self, info): + return big_container_list + + hello_schema = Schema(Query) + + big_list_query = partial(hello_schema.execute, "{ allContainers { x, y, z, o } }") + result = benchmark(big_list_query) + assert not result.errors + assert result.data == { + "allContainers": [ + {"x": c.x, "y": c.y, "z": c.z, "o": c.o} for c in big_container_list + ] + } + + +def test_query_annotated_resolvers(): + context = Context(key="context") + + class Query(ObjectType): + annotated = String(id=String()) + context = String() + info = String() + + def resolve_annotated(self, info, id): + return f"{self}-{id}" + + def resolve_context(self, info): + assert isinstance(info.context, Context) + return f"{self}-{info.context.key}" + + def resolve_info(self, info): + assert isinstance(info, ResolveInfo) + return f"{self}-{info.field_name}" + + test_schema = Schema(Query) + + result = test_schema.execute('{ annotated(id:"self") }', "base") + assert not result.errors + assert result.data == {"annotated": "base-self"} + + result = test_schema.execute("{ context }", "base", context=context) + assert not result.errors + assert result.data == {"context": "base-context"} + + result = test_schema.execute("{ info }", "base") + assert not result.errors + assert result.data == {"info": "base-info"} + + +def test_default_as_kwarg_to_NonNull(): + # Related to https://github.com/graphql-python/graphene/issues/702 + class User(ObjectType): + name = String() + is_admin = NonNull(Boolean, default_value=False) + + class Query(ObjectType): + user = Field(User) + + def resolve_user(self, *args, **kwargs): + return User(name="foo") + + schema = Schema(query=Query) + expected = {"user": {"name": "foo", "isAdmin": False}} + result = schema.execute("{ user { name isAdmin } }") + + assert not result.errors + assert result.data == expected diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_resolver.py b/testbed/graphql-python__graphene/graphene/types/tests/test_resolver.py new file mode 100644 index 0000000000000000000000000000000000000000..dcadb6d8d39d4c0cb6d521990c4c8ea1fe48623e --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_resolver.py @@ -0,0 +1,58 @@ +from ..resolver import ( + attr_resolver, + dict_resolver, + dict_or_attr_resolver, + get_default_resolver, + set_default_resolver, +) + +args = {} +context = None +info = None + +demo_dict = {"attr": "value"} + + +class demo_obj: + attr = "value" + + +def test_attr_resolver(): + resolved = attr_resolver("attr", None, demo_obj, info, **args) + assert resolved == "value" + + +def test_attr_resolver_default_value(): + resolved = attr_resolver("attr2", "default", demo_obj, info, **args) + assert resolved == "default" + + +def test_dict_resolver(): + resolved = dict_resolver("attr", None, demo_dict, info, **args) + assert resolved == "value" + + +def test_dict_resolver_default_value(): + resolved = dict_resolver("attr2", "default", demo_dict, info, **args) + assert resolved == "default" + + +def test_dict_or_attr_resolver(): + resolved = dict_or_attr_resolver("attr", None, demo_dict, info, **args) + assert resolved == "value" + + resolved = dict_or_attr_resolver("attr", None, demo_obj, info, **args) + assert resolved == "value" + + +def test_get_default_resolver_is_attr_resolver(): + assert get_default_resolver() == dict_or_attr_resolver + + +def test_set_default_resolver_workd(): + default_resolver = get_default_resolver() + + set_default_resolver(dict_resolver) + assert get_default_resolver() == dict_resolver + + set_default_resolver(default_resolver) diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_scalar.py b/testbed/graphql-python__graphene/graphene/types/tests/test_scalar.py new file mode 100644 index 0000000000000000000000000000000000000000..cbdfd8c5057d08199c6ecbe62264b58db0b33800 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_scalar.py @@ -0,0 +1,308 @@ +from ..objecttype import ObjectType, Field +from ..scalars import Scalar, Int, BigInt, Float, String, Boolean +from ..schema import Schema +from graphql import Undefined +from graphql.language.ast import IntValueNode + + +def test_scalar(): + class JSONScalar(Scalar): + """Documentation""" + + assert JSONScalar._meta.name == "JSONScalar" + assert JSONScalar._meta.description == "Documentation" + + +def test_ints(): + assert Int.parse_value(2**31 - 1) is not Undefined + assert Int.parse_value("2.0") == 2 + assert Int.parse_value(2**31) is Undefined + + assert Int.parse_literal(IntValueNode(value=str(2**31 - 1))) == 2**31 - 1 + assert Int.parse_literal(IntValueNode(value=str(2**31))) is Undefined + + assert Int.parse_value(-(2**31)) is not Undefined + assert Int.parse_value(-(2**31) - 1) is Undefined + + assert BigInt.parse_value(2**31) is not Undefined + assert BigInt.parse_value("2.0") == 2 + assert BigInt.parse_value(-(2**31) - 1) is not Undefined + + assert BigInt.parse_literal(IntValueNode(value=str(2**31 - 1))) == 2**31 - 1 + assert BigInt.parse_literal(IntValueNode(value=str(2**31))) == 2**31 + + +def return_input(_parent, _info, input): + return input + + +class Optional(ObjectType): + int = Int(input=Int(), resolver=return_input) + big_int = BigInt(input=BigInt(), resolver=return_input) + float = Float(input=Float(), resolver=return_input) + bool = Boolean(input=Boolean(), resolver=return_input) + string = String(input=String(), resolver=return_input) + + +class Query(ObjectType): + optional = Field(Optional) + + def resolve_optional(self, info): + return Optional() + + def resolve_required(self, info, input): + return input + + +schema = Schema(query=Query) + + +class TestInt: + def test_query(self): + """ + Test that a normal query works. + """ + result = schema.execute("{ optional { int(input: 20) } }") + assert not result.errors + assert result.data == {"optional": {"int": 20}} + + def test_optional_input(self): + """ + Test that we can provide a null value to an optional input + """ + result = schema.execute("{ optional { int(input: null) } }") + assert not result.errors + assert result.data == {"optional": {"int": None}} + + def test_invalid_input(self): + """ + Test that if an invalid type is provided we get an error + """ + result = schema.execute('{ optional { int(input: "20") } }') + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message == 'Int cannot represent non-integer value: "20"' + ) + + result = schema.execute('{ optional { int(input: "a") } }') + assert result.errors + assert len(result.errors) == 1 + assert result.errors[0].message == 'Int cannot represent non-integer value: "a"' + + result = schema.execute("{ optional { int(input: true) } }") + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message == "Int cannot represent non-integer value: true" + ) + + +class TestBigInt: + def test_query(self): + """ + Test that a normal query works. + """ + value = 2**31 + result = schema.execute("{ optional { bigInt(input: %s) } }" % value) + assert not result.errors + assert result.data == {"optional": {"bigInt": value}} + + def test_optional_input(self): + """ + Test that we can provide a null value to an optional input + """ + result = schema.execute("{ optional { bigInt(input: null) } }") + assert not result.errors + assert result.data == {"optional": {"bigInt": None}} + + def test_invalid_input(self): + """ + Test that if an invalid type is provided we get an error + """ + result = schema.execute('{ optional { bigInt(input: "20") } }') + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message == "Expected value of type 'BigInt', found \"20\"." + ) + + result = schema.execute('{ optional { bigInt(input: "a") } }') + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message == "Expected value of type 'BigInt', found \"a\"." + ) + + result = schema.execute("{ optional { bigInt(input: true) } }") + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message == "Expected value of type 'BigInt', found true." + ) + + +class TestFloat: + def test_query(self): + """ + Test that a normal query works. + """ + result = schema.execute("{ optional { float(input: 20) } }") + assert not result.errors + assert result.data == {"optional": {"float": 20.0}} + + result = schema.execute("{ optional { float(input: 20.2) } }") + assert not result.errors + assert result.data == {"optional": {"float": 20.2}} + + def test_optional_input(self): + """ + Test that we can provide a null value to an optional input + """ + result = schema.execute("{ optional { float(input: null) } }") + assert not result.errors + assert result.data == {"optional": {"float": None}} + + def test_invalid_input(self): + """ + Test that if an invalid type is provided we get an error + """ + result = schema.execute('{ optional { float(input: "20") } }') + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message == 'Float cannot represent non numeric value: "20"' + ) + + result = schema.execute('{ optional { float(input: "a") } }') + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message == 'Float cannot represent non numeric value: "a"' + ) + + result = schema.execute("{ optional { float(input: true) } }") + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message == "Float cannot represent non numeric value: true" + ) + + +class TestBoolean: + def test_query(self): + """ + Test that a normal query works. + """ + result = schema.execute("{ optional { bool(input: true) } }") + assert not result.errors + assert result.data == {"optional": {"bool": True}} + + result = schema.execute("{ optional { bool(input: false) } }") + assert not result.errors + assert result.data == {"optional": {"bool": False}} + + def test_optional_input(self): + """ + Test that we can provide a null value to an optional input + """ + result = schema.execute("{ optional { bool(input: null) } }") + assert not result.errors + assert result.data == {"optional": {"bool": None}} + + def test_invalid_input(self): + """ + Test that if an invalid type is provided we get an error + """ + result = schema.execute('{ optional { bool(input: "True") } }') + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message + == 'Boolean cannot represent a non boolean value: "True"' + ) + + result = schema.execute('{ optional { bool(input: "true") } }') + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message + == 'Boolean cannot represent a non boolean value: "true"' + ) + + result = schema.execute('{ optional { bool(input: "a") } }') + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message + == 'Boolean cannot represent a non boolean value: "a"' + ) + + result = schema.execute("{ optional { bool(input: 1) } }") + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message + == "Boolean cannot represent a non boolean value: 1" + ) + + result = schema.execute("{ optional { bool(input: 0) } }") + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message + == "Boolean cannot represent a non boolean value: 0" + ) + + +class TestString: + def test_query(self): + """ + Test that a normal query works. + """ + result = schema.execute('{ optional { string(input: "something something") } }') + assert not result.errors + assert result.data == {"optional": {"string": "something something"}} + + result = schema.execute('{ optional { string(input: "True") } }') + assert not result.errors + assert result.data == {"optional": {"string": "True"}} + + result = schema.execute('{ optional { string(input: "0") } }') + assert not result.errors + assert result.data == {"optional": {"string": "0"}} + + def test_optional_input(self): + """ + Test that we can provide a null value to an optional input + """ + result = schema.execute("{ optional { string(input: null) } }") + assert not result.errors + assert result.data == {"optional": {"string": None}} + + def test_invalid_input(self): + """ + Test that if an invalid type is provided we get an error + """ + result = schema.execute("{ optional { string(input: 1) } }") + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message == "String cannot represent a non string value: 1" + ) + + result = schema.execute("{ optional { string(input: 3.2) } }") + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message + == "String cannot represent a non string value: 3.2" + ) + + result = schema.execute("{ optional { string(input: true) } }") + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message + == "String cannot represent a non string value: true" + ) diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_scalars_serialization.py b/testbed/graphql-python__graphene/graphene/types/tests/test_scalars_serialization.py new file mode 100644 index 0000000000000000000000000000000000000000..a0028c85d1d51eb929aae4bd5dc4647473e1e813 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_scalars_serialization.py @@ -0,0 +1,51 @@ +from graphql import Undefined +from ..scalars import Boolean, Float, Int, String + + +def test_serializes_output_int(): + assert Int.serialize(1) == 1 + assert Int.serialize(0) == 0 + assert Int.serialize(-1) == -1 + assert Int.serialize(0.1) == 0 + assert Int.serialize(1.1) == 1 + assert Int.serialize(-1.1) == -1 + assert Int.serialize(1e5) == 100000 + assert Int.serialize(9876504321) is Undefined + assert Int.serialize(-9876504321) is Undefined + assert Int.serialize(1e100) is Undefined + assert Int.serialize(-1e100) is Undefined + assert Int.serialize("-1.1") == -1 + assert Int.serialize("one") is Undefined + assert Int.serialize(False) == 0 + assert Int.serialize(True) == 1 + + +def test_serializes_output_float(): + assert Float.serialize(1) == 1.0 + assert Float.serialize(0) == 0.0 + assert Float.serialize(-1) == -1.0 + assert Float.serialize(0.1) == 0.1 + assert Float.serialize(1.1) == 1.1 + assert Float.serialize(-1.1) == -1.1 + assert Float.serialize("-1.1") == -1.1 + assert Float.serialize("one") is Undefined + assert Float.serialize(False) == 0 + assert Float.serialize(True) == 1 + + +def test_serializes_output_string(): + assert String.serialize("string") == "string" + assert String.serialize(1) == "1" + assert String.serialize(-1.1) == "-1.1" + assert String.serialize(True) == "true" + assert String.serialize(False) == "false" + assert String.serialize("\U0001F601") == "\U0001F601" + + +def test_serializes_output_boolean(): + assert Boolean.serialize("string") is True + assert Boolean.serialize("") is False + assert Boolean.serialize(1) is True + assert Boolean.serialize(0) is False + assert Boolean.serialize(True) is True + assert Boolean.serialize(False) is False diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_schema.py b/testbed/graphql-python__graphene/graphene/types/tests/test_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..c03c81ba0a4b15baa62ab7a9a286dec81cfba1ca --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_schema.py @@ -0,0 +1,74 @@ +from textwrap import dedent + +from pytest import raises + +from graphql.type import GraphQLObjectType, GraphQLSchema + +from ..field import Field +from ..objecttype import ObjectType +from ..scalars import String +from ..schema import Schema + + +class MyOtherType(ObjectType): + field = String() + + +class Query(ObjectType): + inner = Field(MyOtherType) + + +def test_schema(): + schema = Schema(Query) + graphql_schema = schema.graphql_schema + assert isinstance(graphql_schema, GraphQLSchema) + query_type = graphql_schema.query_type + assert isinstance(query_type, GraphQLObjectType) + assert query_type.name == "Query" + assert query_type.graphene_type is Query + + +def test_schema_get_type(): + schema = Schema(Query) + assert schema.Query == Query + assert schema.MyOtherType == MyOtherType + + +def test_schema_get_type_error(): + schema = Schema(Query) + with raises(AttributeError) as exc_info: + schema.X + + assert str(exc_info.value) == 'Type "X" not found in the Schema' + + +def test_schema_str(): + schema = Schema(Query) + assert ( + str(schema).strip() + == dedent( + """ + type Query { + inner: MyOtherType + } + + type MyOtherType { + field: String + } + """ + ).strip() + ) + + +def test_schema_introspect(): + schema = Schema(Query) + assert "__schema" in schema.introspect() + + +def test_schema_requires_query_type(): + schema = Schema() + result = schema.execute("query {}") + + assert len(result.errors) == 1 + error = result.errors[0] + assert error.message == "Query root type must be provided." diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_structures.py b/testbed/graphql-python__graphene/graphene/types/tests/test_structures.py new file mode 100644 index 0000000000000000000000000000000000000000..88f3ff1da90ed83fbfee299a5360382458665097 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_structures.py @@ -0,0 +1,129 @@ +from functools import partial + +from pytest import raises + +from ..scalars import String +from ..structures import List, NonNull +from .utils import MyLazyType + + +def test_list(): + _list = List(String) + assert _list.of_type == String + assert str(_list) == "[String]" + + +def test_list_with_unmounted_type(): + with raises(Exception) as exc_info: + List(String()) + + assert ( + str(exc_info.value) + == "List could not have a mounted String() as inner type. Try with List(String)." + ) + + +def test_list_with_lazy_type(): + MyType = object() + field = List(lambda: MyType) + assert field.of_type == MyType + + +def test_list_with_lazy_partial_type(): + MyType = object() + field = List(partial(lambda: MyType)) + assert field.of_type == MyType + + +def test_list_with_string_type(): + field = List("graphene.types.tests.utils.MyLazyType") + assert field.of_type == MyLazyType + + +def test_list_inherited_works_list(): + _list = List(List(String)) + assert isinstance(_list.of_type, List) + assert _list.of_type.of_type == String + + +def test_list_inherited_works_nonnull(): + _list = List(NonNull(String)) + assert isinstance(_list.of_type, NonNull) + assert _list.of_type.of_type == String + + +def test_nonnull(): + nonnull = NonNull(String) + assert nonnull.of_type == String + assert str(nonnull) == "String!" + + +def test_nonnull_with_lazy_type(): + MyType = object() + field = NonNull(lambda: MyType) + assert field.of_type == MyType + + +def test_nonnull_with_lazy_partial_type(): + MyType = object() + field = NonNull(partial(lambda: MyType)) + assert field.of_type == MyType + + +def test_nonnull_with_string_type(): + field = NonNull("graphene.types.tests.utils.MyLazyType") + assert field.of_type == MyLazyType + + +def test_nonnull_inherited_works_list(): + _list = NonNull(List(String)) + assert isinstance(_list.of_type, List) + assert _list.of_type.of_type == String + + +def test_nonnull_inherited_dont_work_nonnull(): + with raises(Exception) as exc_info: + NonNull(NonNull(String)) + + assert ( + str(exc_info.value) + == "Can only create NonNull of a Nullable GraphQLType but got: String!." + ) + + +def test_nonnull_with_unmounted_type(): + with raises(Exception) as exc_info: + NonNull(String()) + + assert ( + str(exc_info.value) + == "NonNull could not have a mounted String() as inner type. Try with NonNull(String)." + ) + + +def test_list_comparasion(): + list1 = List(String) + list2 = List(String) + list3 = List(None) + + list1_argskwargs = List(String, None, b=True) + list2_argskwargs = List(String, None, b=True) + + assert list1 == list2 + assert list1 != list3 + assert list1_argskwargs == list2_argskwargs + assert list1 != list1_argskwargs + + +def test_nonnull_comparasion(): + nonnull1 = NonNull(String) + nonnull2 = NonNull(String) + nonnull3 = NonNull(None) + + nonnull1_argskwargs = NonNull(String, None, b=True) + nonnull2_argskwargs = NonNull(String, None, b=True) + + assert nonnull1 == nonnull2 + assert nonnull1 != nonnull3 + assert nonnull1_argskwargs == nonnull2_argskwargs + assert nonnull1 != nonnull1_argskwargs diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_subscribe_async.py b/testbed/graphql-python__graphene/graphene/types/tests/test_subscribe_async.py new file mode 100644 index 0000000000000000000000000000000000000000..50e5ba68ecfb3ff00b79b16b22c52bece4dc1fa1 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_subscribe_async.py @@ -0,0 +1,78 @@ +from pytest import mark + +from graphene import ObjectType, Int, String, Schema, Field + + +class Query(ObjectType): + hello = String() + + def resolve_hello(root, info): + return "Hello, world!" + + +class Subscription(ObjectType): + count_to_ten = Field(Int) + + async def subscribe_count_to_ten(root, info): + for count in range(1, 11): + yield count + + +schema = Schema(query=Query, subscription=Subscription) + + +@mark.asyncio +async def test_subscription(): + subscription = "subscription { countToTen }" + result = await schema.subscribe(subscription) + count = 0 + async for item in result: + count = item.data["countToTen"] + assert count == 10 + + +@mark.asyncio +async def test_subscription_fails_with_invalid_query(): + # It fails if the provided query is invalid + subscription = "subscription { " + result = await schema.subscribe(subscription) + assert not result.data + assert result.errors + assert "Syntax Error: Expected Name, found " in str(result.errors[0]) + + +@mark.asyncio +async def test_subscription_fails_when_query_is_not_valid(): + # It can't subscribe to two fields at the same time, triggering a + # validation error. + subscription = "subscription { countToTen, b: countToTen }" + result = await schema.subscribe(subscription) + assert not result.data + assert result.errors + assert "Anonymous Subscription must select only one top level field." in str( + result.errors[0] + ) + + +@mark.asyncio +async def test_subscription_with_args(): + class Query(ObjectType): + hello = String() + + class Subscription(ObjectType): + count_upwards = Field(Int, limit=Int(required=True)) + + async def subscribe_count_upwards(root, info, limit): + count = 0 + while count < limit: + count += 1 + yield count + + schema = Schema(query=Query, subscription=Subscription) + + subscription = "subscription { countUpwards(limit: 5) }" + result = await schema.subscribe(subscription) + count = 0 + async for item in result: + count = item.data["countUpwards"] + assert count == 5 diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_type_map.py b/testbed/graphql-python__graphene/graphene/types/tests/test_type_map.py new file mode 100644 index 0000000000000000000000000000000000000000..55b1706e08ac832a3006f5ec88c379d700a2bc88 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_type_map.py @@ -0,0 +1,321 @@ +from graphql import Undefined +from graphql.type import ( + GraphQLArgument, + GraphQLEnumType, + GraphQLEnumValue, + GraphQLField, + GraphQLInputField, + GraphQLInputObjectType, + GraphQLInterfaceType, + GraphQLNonNull, + GraphQLObjectType, + GraphQLString, +) + +from ..dynamic import Dynamic +from ..enum import Enum +from ..field import Field +from ..inputfield import InputField +from ..inputobjecttype import InputObjectType +from ..interface import Interface +from ..objecttype import ObjectType +from ..scalars import Int, String +from ..structures import List, NonNull +from ..schema import Schema + + +def create_type_map(types, auto_camelcase=True): + query = type("Query", (ObjectType,), {}) + schema = Schema(query, types=types, auto_camelcase=auto_camelcase) + return schema.graphql_schema.type_map + + +def test_enum(): + class MyEnum(Enum): + """Description""" + + foo = 1 + bar = 2 + + @property + def description(self): + return f"Description {self.name}={self.value}" + + @property + def deprecation_reason(self): + if self == MyEnum.foo: + return "Is deprecated" + + type_map = create_type_map([MyEnum]) + assert "MyEnum" in type_map + graphql_enum = type_map["MyEnum"] + assert isinstance(graphql_enum, GraphQLEnumType) + assert graphql_enum.name == "MyEnum" + assert graphql_enum.description == "Description" + assert graphql_enum.values == { + "foo": GraphQLEnumValue( + value=1, description="Description foo=1", deprecation_reason="Is deprecated" + ), + "bar": GraphQLEnumValue(value=2, description="Description bar=2"), + } + + +def test_objecttype(): + class MyObjectType(ObjectType): + """Description""" + + foo = String( + bar=String(description="Argument description", default_value="x"), + description="Field description", + ) + bar = String(name="gizmo") + + def resolve_foo(self, bar): + return bar + + type_map = create_type_map([MyObjectType]) + assert "MyObjectType" in type_map + graphql_type = type_map["MyObjectType"] + assert isinstance(graphql_type, GraphQLObjectType) + assert graphql_type.name == "MyObjectType" + assert graphql_type.description == "Description" + + fields = graphql_type.fields + assert list(fields) == ["foo", "gizmo"] + foo_field = fields["foo"] + assert isinstance(foo_field, GraphQLField) + assert foo_field.description == "Field description" + + assert foo_field.args == { + "bar": GraphQLArgument( + GraphQLString, + description="Argument description", + default_value="x", + out_name="bar", + ) + } + + +def test_required_argument_with_default_value(): + class MyObjectType(ObjectType): + foo = String(bar=String(required=True, default_value="x")) + + type_map = create_type_map([MyObjectType]) + + graphql_type = type_map["MyObjectType"] + foo_field = graphql_type.fields["foo"] + + bar_argument = foo_field.args["bar"] + assert bar_argument.default_value == "x" + assert isinstance(bar_argument.type, GraphQLNonNull) + assert bar_argument.type.of_type == GraphQLString + + +def test_dynamic_objecttype(): + class MyObjectType(ObjectType): + """Description""" + + bar = Dynamic(lambda: Field(String)) + own = Field(lambda: MyObjectType) + + type_map = create_type_map([MyObjectType]) + assert "MyObjectType" in type_map + assert list(MyObjectType._meta.fields) == ["bar", "own"] + graphql_type = type_map["MyObjectType"] + + fields = graphql_type.fields + assert list(fields) == ["bar", "own"] + assert fields["bar"].type == GraphQLString + assert fields["own"].type == graphql_type + + +def test_interface(): + class MyInterface(Interface): + """Description""" + + foo = String( + bar=String(description="Argument description", default_value="x"), + description="Field description", + ) + bar = String(name="gizmo", first_arg=String(), other_arg=String(name="oth_arg")) + own = Field(lambda: MyInterface) + + def resolve_foo(self, args, info): + return args.get("bar") + + type_map = create_type_map([MyInterface]) + assert "MyInterface" in type_map + graphql_type = type_map["MyInterface"] + assert isinstance(graphql_type, GraphQLInterfaceType) + assert graphql_type.name == "MyInterface" + assert graphql_type.description == "Description" + + fields = graphql_type.fields + assert list(fields) == ["foo", "gizmo", "own"] + assert fields["own"].type == graphql_type + assert list(fields["gizmo"].args) == ["firstArg", "oth_arg"] + foo_field = fields["foo"] + assert isinstance(foo_field, GraphQLField) + assert foo_field.description == "Field description" + assert not foo_field.resolve # Resolver not attached in interfaces + assert foo_field.args == { + "bar": GraphQLArgument( + GraphQLString, + description="Argument description", + default_value="x", + out_name="bar", + ) + } + + +def test_inputobject(): + class OtherObjectType(InputObjectType): + thingy = NonNull(Int) + + class MyInnerObjectType(InputObjectType): + some_field = String() + some_other_field = List(OtherObjectType) + + class MyInputObjectType(InputObjectType): + """Description""" + + foo_bar = String(description="Field description") + bar = String(name="gizmo") + baz = NonNull(MyInnerObjectType) + own = InputField(lambda: MyInputObjectType) + + def resolve_foo_bar(self, args, info): + return args.get("bar") + + type_map = create_type_map([MyInputObjectType]) + assert "MyInputObjectType" in type_map + graphql_type = type_map["MyInputObjectType"] + assert isinstance(graphql_type, GraphQLInputObjectType) + assert graphql_type.name == "MyInputObjectType" + assert graphql_type.description == "Description" + + other_graphql_type = type_map["OtherObjectType"] + inner_graphql_type = type_map["MyInnerObjectType"] + container = graphql_type.out_type( + { + "bar": "oh!", + "baz": inner_graphql_type.out_type( + { + "some_other_field": [ + other_graphql_type.out_type({"thingy": 1}), + other_graphql_type.out_type({"thingy": 2}), + ] + } + ), + } + ) + assert isinstance(container, MyInputObjectType) + assert "bar" in container + assert container.bar == "oh!" + assert "foo_bar" not in container + assert container.foo_bar is None + assert container.baz.some_field is None + assert container.baz.some_other_field[0].thingy == 1 + assert container.baz.some_other_field[1].thingy == 2 + + fields = graphql_type.fields + assert list(fields) == ["fooBar", "gizmo", "baz", "own"] + own_field = fields["own"] + assert own_field.type == graphql_type + foo_field = fields["fooBar"] + assert isinstance(foo_field, GraphQLInputField) + assert foo_field.description == "Field description" + + +def test_objecttype_camelcase(): + class MyObjectType(ObjectType): + """Description""" + + foo_bar = String(bar_foo=String()) + + type_map = create_type_map([MyObjectType]) + assert "MyObjectType" in type_map + graphql_type = type_map["MyObjectType"] + assert isinstance(graphql_type, GraphQLObjectType) + assert graphql_type.name == "MyObjectType" + assert graphql_type.description == "Description" + + fields = graphql_type.fields + assert list(fields) == ["fooBar"] + foo_field = fields["fooBar"] + assert isinstance(foo_field, GraphQLField) + assert foo_field.args == { + "barFoo": GraphQLArgument( + GraphQLString, default_value=Undefined, out_name="bar_foo" + ) + } + + +def test_objecttype_camelcase_disabled(): + class MyObjectType(ObjectType): + """Description""" + + foo_bar = String(bar_foo=String()) + + type_map = create_type_map([MyObjectType], auto_camelcase=False) + assert "MyObjectType" in type_map + graphql_type = type_map["MyObjectType"] + assert isinstance(graphql_type, GraphQLObjectType) + assert graphql_type.name == "MyObjectType" + assert graphql_type.description == "Description" + + fields = graphql_type.fields + assert list(fields) == ["foo_bar"] + foo_field = fields["foo_bar"] + assert isinstance(foo_field, GraphQLField) + assert foo_field.args == { + "bar_foo": GraphQLArgument( + GraphQLString, default_value=Undefined, out_name="bar_foo" + ) + } + + +def test_objecttype_with_possible_types(): + class MyObjectType(ObjectType): + """Description""" + + class Meta: + possible_types = (dict,) + + foo_bar = String() + + type_map = create_type_map([MyObjectType]) + graphql_type = type_map["MyObjectType"] + assert graphql_type.is_type_of + assert graphql_type.is_type_of({}, None) is True + assert graphql_type.is_type_of(MyObjectType(), None) is False + + +def test_interface_with_interfaces(): + class FooInterface(Interface): + foo = String() + + class BarInterface(Interface): + class Meta: + interfaces = [FooInterface] + + foo = String() + bar = String() + + type_map = create_type_map([FooInterface, BarInterface]) + assert "FooInterface" in type_map + foo_graphql_type = type_map["FooInterface"] + assert isinstance(foo_graphql_type, GraphQLInterfaceType) + assert foo_graphql_type.name == "FooInterface" + + assert "BarInterface" in type_map + bar_graphql_type = type_map["BarInterface"] + assert isinstance(bar_graphql_type, GraphQLInterfaceType) + assert bar_graphql_type.name == "BarInterface" + + fields = bar_graphql_type.fields + assert list(fields) == ["foo", "bar"] + assert isinstance(fields["foo"], GraphQLField) + assert isinstance(fields["bar"], GraphQLField) + + assert list(bar_graphql_type.interfaces) == list([foo_graphql_type]) diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_union.py b/testbed/graphql-python__graphene/graphene/types/tests/test_union.py new file mode 100644 index 0000000000000000000000000000000000000000..4d642d6f51e5f503318da238abf1d6970ccf1175 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_union.py @@ -0,0 +1,58 @@ +from pytest import raises + +from ..field import Field +from ..objecttype import ObjectType +from ..union import Union +from ..unmountedtype import UnmountedType + + +class MyObjectType1(ObjectType): + pass + + +class MyObjectType2(ObjectType): + pass + + +def test_generate_union(): + class MyUnion(Union): + """Documentation""" + + class Meta: + types = (MyObjectType1, MyObjectType2) + + assert MyUnion._meta.name == "MyUnion" + assert MyUnion._meta.description == "Documentation" + assert MyUnion._meta.types == (MyObjectType1, MyObjectType2) + + +def test_generate_union_with_meta(): + class MyUnion(Union): + class Meta: + name = "MyOtherUnion" + description = "Documentation" + types = (MyObjectType1, MyObjectType2) + + assert MyUnion._meta.name == "MyOtherUnion" + assert MyUnion._meta.description == "Documentation" + + +def test_generate_union_with_no_types(): + with raises(Exception) as exc_info: + + class MyUnion(Union): + pass + + assert str(exc_info.value) == "Must provide types for Union MyUnion." + + +def test_union_can_be_mounted(): + class MyUnion(Union): + class Meta: + types = (MyObjectType1, MyObjectType2) + + my_union_instance = MyUnion() + assert isinstance(my_union_instance, UnmountedType) + my_union_field = my_union_instance.mount_as(Field) + assert isinstance(my_union_field, Field) + assert my_union_field.type == MyUnion diff --git a/testbed/graphql-python__graphene/graphene/types/tests/test_uuid.py b/testbed/graphql-python__graphene/graphene/types/tests/test_uuid.py new file mode 100644 index 0000000000000000000000000000000000000000..d34f16642d1fec34e5008d105f8075cb98cc5fb8 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/test_uuid.py @@ -0,0 +1,68 @@ +from ..objecttype import ObjectType +from ..schema import Schema +from ..uuid import UUID +from ..structures import NonNull + + +class Query(ObjectType): + uuid = UUID(input=UUID()) + required_uuid = UUID(input=NonNull(UUID), required=True) + + def resolve_uuid(self, info, input): + return input + + def resolve_required_uuid(self, info, input): + return input + + +schema = Schema(query=Query) + + +def test_uuidstring_query(): + uuid_value = "dfeb3bcf-70fd-11e7-a61a-6003088f8204" + result = schema.execute("""{ uuid(input: "%s") }""" % uuid_value) + assert not result.errors + assert result.data == {"uuid": uuid_value} + + +def test_uuidstring_query_variable(): + uuid_value = "dfeb3bcf-70fd-11e7-a61a-6003088f8204" + + result = schema.execute( + """query Test($uuid: UUID){ uuid(input: $uuid) }""", + variables={"uuid": uuid_value}, + ) + assert not result.errors + assert result.data == {"uuid": uuid_value} + + +def test_uuidstring_optional_uuid_input(): + """ + Test that we can provide a null value to an optional input + """ + result = schema.execute("{ uuid(input: null) }") + assert not result.errors + assert result.data == {"uuid": None} + + +def test_uuidstring_invalid_query(): + """ + Test that if an invalid type is provided we get an error + """ + result = schema.execute("{ uuid(input: 1) }") + assert result.errors + assert len(result.errors) == 1 + assert result.errors[0].message == "Expected value of type 'UUID', found 1." + + result = schema.execute('{ uuid(input: "a") }') + assert result.errors + assert len(result.errors) == 1 + assert ( + result.errors[0].message + == "Expected value of type 'UUID', found \"a\"; badly formed hexadecimal UUID string" + ) + + result = schema.execute("{ requiredUuid(input: null) }") + assert result.errors + assert len(result.errors) == 1 + assert result.errors[0].message == "Expected value of type 'UUID!', found null." diff --git a/testbed/graphql-python__graphene/graphene/types/tests/utils.py b/testbed/graphql-python__graphene/graphene/types/tests/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..83cf49e2063c21ec2a7d013da37f735214db79da --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/tests/utils.py @@ -0,0 +1 @@ +MyLazyType = object() diff --git a/testbed/graphql-python__graphene/graphene/types/union.py b/testbed/graphql-python__graphene/graphene/types/union.py new file mode 100644 index 0000000000000000000000000000000000000000..f77e833abef7c4aee361f3cd660200642179868d --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/union.py @@ -0,0 +1,75 @@ +from .base import BaseOptions, BaseType +from .unmountedtype import UnmountedType + +# For static type checking with Mypy +MYPY = False +if MYPY: + from .objecttype import ObjectType # NOQA + from typing import Iterable, Type # NOQA + + +class UnionOptions(BaseOptions): + types = () # type: Iterable[Type[ObjectType]] + + +class Union(UnmountedType, BaseType): + """ + Union Type Definition + + When a field can return one of a heterogeneous set of types, a Union type + is used to describe what types are possible as well as providing a function + to determine which type is actually used when the field is resolved. + + The schema in this example can take a search text and return any of the GraphQL object types + indicated: Human, Droid or Starship. + + Ambiguous return types can be resolved on each ObjectType through ``Meta.possible_types`` + attribute or ``is_type_of`` method. Or by implementing ``resolve_type`` class method on the + Union. + + .. code:: python + + from graphene import Union, ObjectType, List + + class SearchResult(Union): + class Meta: + types = (Human, Droid, Starship) + + class Query(ObjectType): + search = List(SearchResult.Field( + search_text=String(description='Value to search for')) + ) + + Meta: + types (Iterable[graphene.ObjectType]): Required. Collection of types that may be returned + by this Union for the graphQL schema. + name (optional, str): the name of the GraphQL type (must be unique in schema). Defaults to class + name. + description (optional, str): the description of the GraphQL type in the schema. Defaults to class + docstring. + """ + + @classmethod + def __init_subclass_with_meta__(cls, types=None, **options): + assert ( + isinstance(types, (list, tuple)) and len(types) > 0 + ), f"Must provide types for Union {cls.__name__}." + + _meta = UnionOptions(cls) + _meta.types = types + super(Union, cls).__init_subclass_with_meta__(_meta=_meta, **options) + + @classmethod + def get_type(cls): + """ + This function is called when the unmounted type (Union instance) + is mounted (as a Field, InputField or Argument) + """ + return cls + + @classmethod + def resolve_type(cls, instance, info): + from .objecttype import ObjectType # NOQA + + if isinstance(instance, ObjectType): + return type(instance) diff --git a/testbed/graphql-python__graphene/graphene/types/unmountedtype.py b/testbed/graphql-python__graphene/graphene/types/unmountedtype.py new file mode 100644 index 0000000000000000000000000000000000000000..83a6afefc2334234086b5d638283ee032b17de4b --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/unmountedtype.py @@ -0,0 +1,87 @@ +from ..utils.orderedtype import OrderedType + + +class UnmountedType(OrderedType): + """ + This class acts a proxy for a Graphene Type, so it can be mounted + dynamically as Field, InputField or Argument. + + Instead of writing: + + .. code:: python + + from graphene import ObjectType, Field, String + + class MyObjectType(ObjectType): + my_field = Field(String, description='Description here') + + It lets you write: + + .. code:: python + + from graphene import ObjectType, String + + class MyObjectType(ObjectType): + my_field = String(description='Description here') + + It is not used directly, but is inherited by other types and streamlines their use in + different context: + + - Object Type + - Scalar Type + - Enum + - Interface + - Union + + An unmounted type will accept arguments based upon its context (ObjectType, Field or + InputObjectType) and pass it on to the appropriate MountedType (Field, Argument or InputField). + + See each Mounted type reference for more information about valid parameters. + """ + + def __init__(self, *args, **kwargs): + super(UnmountedType, self).__init__() + self.args = args + self.kwargs = kwargs + + def get_type(self): + """ + This function is called when the UnmountedType instance + is mounted (as a Field, InputField or Argument) + """ + raise NotImplementedError(f"get_type not implemented in {self}") + + def mount_as(self, _as): + return _as.mounted(self) + + def Field(self): # noqa: N802 + """ + Mount the UnmountedType as Field + """ + from .field import Field + + return self.mount_as(Field) + + def InputField(self): # noqa: N802 + """ + Mount the UnmountedType as InputField + """ + from .inputfield import InputField + + return self.mount_as(InputField) + + def Argument(self): # noqa: N802 + """ + Mount the UnmountedType as Argument + """ + from .argument import Argument + + return self.mount_as(Argument) + + def __eq__(self, other): + return self is other or ( + isinstance(other, UnmountedType) + and self.get_type() == other.get_type() + and self.args == other.args + and self.kwargs == other.kwargs + ) diff --git a/testbed/graphql-python__graphene/graphene/types/utils.py b/testbed/graphql-python__graphene/graphene/types/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1976448aa98452d081e2d4a0b498f9c1ca92cd42 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/utils.py @@ -0,0 +1,50 @@ +import inspect +from functools import partial + +from ..utils.module_loading import import_string +from .mountedtype import MountedType +from .unmountedtype import UnmountedType + + +def get_field_as(value, _as=None): + """ + Get type mounted + """ + if isinstance(value, MountedType): + return value + elif isinstance(value, UnmountedType): + if _as is None: + return value + return _as.mounted(value) + + +def yank_fields_from_attrs(attrs, _as=None, sort=True): + """ + Extract all the fields in given attributes (dict) + and return them ordered + """ + fields_with_names = [] + for attname, value in list(attrs.items()): + field = get_field_as(value, _as) + if not field: + continue + fields_with_names.append((attname, field)) + + if sort: + fields_with_names = sorted(fields_with_names, key=lambda f: f[1]) + return dict(fields_with_names) + + +def get_type(_type): + if isinstance(_type, str): + return import_string(_type) + if inspect.isfunction(_type) or isinstance(_type, partial): + return _type() + return _type + + +def get_underlying_type(_type): + """Get the underlying type even if it is wrapped in structures like NonNull""" + while hasattr(_type, "of_type"): + _type = _type.of_type + return _type diff --git a/testbed/graphql-python__graphene/graphene/types/uuid.py b/testbed/graphql-python__graphene/graphene/types/uuid.py new file mode 100644 index 0000000000000000000000000000000000000000..f2ba1fcb31cbd85b72b634ae8fd3fc320f5e7783 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/types/uuid.py @@ -0,0 +1,32 @@ +from __future__ import absolute_import +from uuid import UUID as _UUID + +from graphql.language.ast import StringValueNode +from graphql import Undefined + +from .scalars import Scalar + + +class UUID(Scalar): + """ + Leverages the internal Python implementation of UUID (uuid.UUID) to provide native UUID objects + in fields, resolvers and input. + """ + + @staticmethod + def serialize(uuid): + if isinstance(uuid, str): + uuid = _UUID(uuid) + + assert isinstance(uuid, _UUID), f"Expected UUID instance, received {uuid}" + return str(uuid) + + @staticmethod + def parse_literal(node, _variables=None): + if isinstance(node, StringValueNode): + return _UUID(node.value) + return Undefined + + @staticmethod + def parse_value(value): + return _UUID(value) diff --git a/testbed/graphql-python__graphene/graphene/utils/__init__.py b/testbed/graphql-python__graphene/graphene/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/graphene/utils/crunch.py b/testbed/graphql-python__graphene/graphene/utils/crunch.py new file mode 100644 index 0000000000000000000000000000000000000000..b27d3718e623da14d21836545f5705859b551bd6 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/crunch.py @@ -0,0 +1,35 @@ +import json +from collections.abc import Mapping + + +def to_key(value): + return json.dumps(value) + + +def insert(value, index, values): + key = to_key(value) + + if key not in index: + index[key] = len(values) + values.append(value) + return len(values) - 1 + + return index.get(key) + + +def flatten(data, index, values): + if isinstance(data, (list, tuple)): + flattened = [flatten(child, index, values) for child in data] + elif isinstance(data, Mapping): + flattened = {key: flatten(child, index, values) for key, child in data.items()} + else: + flattened = data + return insert(flattened, index, values) + + +def crunch(data): + index = {} + values = [] + + flatten(data, index, values) + return values diff --git a/testbed/graphql-python__graphene/graphene/utils/dataloader.py b/testbed/graphql-python__graphene/graphene/utils/dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..143558aa2aa777827eee83c0112c5a29d1a5fabb --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/dataloader.py @@ -0,0 +1,281 @@ +from asyncio import ( + gather, + ensure_future, + get_event_loop, + iscoroutine, + iscoroutinefunction, +) +from collections import namedtuple +from collections.abc import Iterable +from functools import partial + +from typing import List # flake8: noqa + +Loader = namedtuple("Loader", "key,future") + + +def iscoroutinefunctionorpartial(fn): + return iscoroutinefunction(fn.func if isinstance(fn, partial) else fn) + + +class DataLoader(object): + batch = True + max_batch_size = None # type: int + cache = True + + def __init__( + self, + batch_load_fn=None, + batch=None, + max_batch_size=None, + cache=None, + get_cache_key=None, + cache_map=None, + loop=None, + ): + + self._loop = loop + + if batch_load_fn is not None: + self.batch_load_fn = batch_load_fn + + assert iscoroutinefunctionorpartial( + self.batch_load_fn + ), "batch_load_fn must be coroutine. Received: {}".format(self.batch_load_fn) + + if not callable(self.batch_load_fn): + raise TypeError( # pragma: no cover + ( + "DataLoader must be have a batch_load_fn which accepts " + "Iterable and returns Future>, but got: {}." + ).format(batch_load_fn) + ) + + if batch is not None: + self.batch = batch # pragma: no cover + + if max_batch_size is not None: + self.max_batch_size = max_batch_size + + if cache is not None: + self.cache = cache # pragma: no cover + + self.get_cache_key = get_cache_key or (lambda x: x) + + self._cache = cache_map if cache_map is not None else {} + self._queue = [] # type: List[Loader] + + @property + def loop(self): + if not self._loop: + self._loop = get_event_loop() + + return self._loop + + def load(self, key=None): + """ + Loads a key, returning a `Future` for the value represented by that key. + """ + if key is None: + raise TypeError( # pragma: no cover + ( + "The loader.load() function must be called with a value, " + "but got: {}." + ).format(key) + ) + + cache_key = self.get_cache_key(key) + + # If caching and there is a cache-hit, return cached Future. + if self.cache: + cached_result = self._cache.get(cache_key) + if cached_result: + return cached_result + + # Otherwise, produce a new Future for this value. + future = self.loop.create_future() + # If caching, cache this Future. + if self.cache: + self._cache[cache_key] = future + + self.do_resolve_reject(key, future) + return future + + def do_resolve_reject(self, key, future): + # Enqueue this Future to be dispatched. + self._queue.append(Loader(key=key, future=future)) + # Determine if a dispatch of this queue should be scheduled. + # A single dispatch should be scheduled per queue at the time when the + # queue changes from "empty" to "full". + if len(self._queue) == 1: + if self.batch: + # If batching, schedule a task to dispatch the queue. + enqueue_post_future_job(self.loop, self) + else: + # Otherwise dispatch the (queue of one) immediately. + dispatch_queue(self) # pragma: no cover + + def load_many(self, keys): + """ + Loads multiple keys, returning a list of values + + >>> a, b = await my_loader.load_many([ 'a', 'b' ]) + + This is equivalent to the more verbose: + + >>> a, b = await gather( + >>> my_loader.load('a'), + >>> my_loader.load('b') + >>> ) + """ + if not isinstance(keys, Iterable): + raise TypeError( # pragma: no cover + ( + "The loader.load_many() function must be called with Iterable " + "but got: {}." + ).format(keys) + ) + + return gather(*[self.load(key) for key in keys]) + + def clear(self, key): + """ + Clears the value at `key` from the cache, if it exists. Returns itself for + method chaining. + """ + cache_key = self.get_cache_key(key) + self._cache.pop(cache_key, None) + return self + + def clear_all(self): + """ + Clears the entire cache. To be used when some event results in unknown + invalidations across this particular `DataLoader`. Returns itself for + method chaining. + """ + self._cache.clear() + return self + + def prime(self, key, value): + """ + Adds the provied key and value to the cache. If the key already exists, no + change is made. Returns itself for method chaining. + """ + cache_key = self.get_cache_key(key) + + # Only add the key if it does not already exist. + if cache_key not in self._cache: + # Cache a rejected future if the value is an Error, in order to match + # the behavior of load(key). + future = self.loop.create_future() + if isinstance(value, Exception): + future.set_exception(value) + else: + future.set_result(value) + + self._cache[cache_key] = future + + return self + + +def enqueue_post_future_job(loop, loader): + async def dispatch(): + dispatch_queue(loader) + + loop.call_soon(ensure_future, dispatch()) + + +def get_chunks(iterable_obj, chunk_size=1): + chunk_size = max(1, chunk_size) + return ( + iterable_obj[i : i + chunk_size] + for i in range(0, len(iterable_obj), chunk_size) + ) + + +def dispatch_queue(loader): + """ + Given the current state of a Loader instance, perform a batch load + from its current queue. + """ + # Take the current loader queue, replacing it with an empty queue. + queue = loader._queue + loader._queue = [] + + # If a max_batch_size was provided and the queue is longer, then segment the + # queue into multiple batches, otherwise treat the queue as a single batch. + max_batch_size = loader.max_batch_size + + if max_batch_size and max_batch_size < len(queue): + chunks = get_chunks(queue, max_batch_size) + for chunk in chunks: + ensure_future(dispatch_queue_batch(loader, chunk)) + else: + ensure_future(dispatch_queue_batch(loader, queue)) + + +async def dispatch_queue_batch(loader, queue): + # Collect all keys to be loaded in this dispatch + keys = [loaded.key for loaded in queue] + + # Call the provided batch_load_fn for this loader with the loader queue's keys. + batch_future = loader.batch_load_fn(keys) + + # Assert the expected response from batch_load_fn + if not batch_future or not iscoroutine(batch_future): + return failed_dispatch( # pragma: no cover + loader, + queue, + TypeError( + ( + "DataLoader must be constructed with a function which accepts " + "Iterable and returns Future>, but the function did " + "not return a Coroutine: {}." + ).format(batch_future) + ), + ) + + try: + values = await batch_future + if not isinstance(values, Iterable): + raise TypeError( # pragma: no cover + ( + "DataLoader must be constructed with a function which accepts " + "Iterable and returns Future>, but the function did " + "not return a Future of a Iterable: {}." + ).format(values) + ) + + values = list(values) + if len(values) != len(keys): + raise TypeError( # pragma: no cover + ( + "DataLoader must be constructed with a function which accepts " + "Iterable and returns Future>, but the function did " + "not return a Future of a Iterable with the same length as the Iterable " + "of keys." + "\n\nKeys:\n{}" + "\n\nValues:\n{}" + ).format(keys, values) + ) + + # Step through the values, resolving or rejecting each Future in the + # loaded queue. + for loaded, value in zip(queue, values): + if isinstance(value, Exception): + loaded.future.set_exception(value) + else: + loaded.future.set_result(value) + + except Exception as e: + return failed_dispatch(loader, queue, e) + + +def failed_dispatch(loader, queue, error): + """ + Do not cache individual loads if the entire batch dispatch fails, + but still reject each request so they do not hang. + """ + for loaded in queue: + loader.clear(loaded.key) + loaded.future.set_exception(error) diff --git a/testbed/graphql-python__graphene/graphene/utils/deduplicator.py b/testbed/graphql-python__graphene/graphene/utils/deduplicator.py new file mode 100644 index 0000000000000000000000000000000000000000..3fbf139d1da26084769ca6855d7b39ec7a9985bb --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/deduplicator.py @@ -0,0 +1,32 @@ +from collections.abc import Mapping + + +def deflate(node, index=None, path=None): + if index is None: + index = {} + if path is None: + path = [] + + if node and "id" in node and "__typename" in node: + route = ",".join(path) + cache_key = ":".join([route, str(node["__typename"]), str(node["id"])]) + + if index.get(cache_key) is True: + return {"__typename": node["__typename"], "id": node["id"]} + else: + index[cache_key] = True + + result = {} + + for field_name in node: + value = node[field_name] + + new_path = path + [field_name] + if isinstance(value, (list, tuple)): + result[field_name] = [deflate(child, index, new_path) for child in value] + elif isinstance(value, Mapping): + result[field_name] = deflate(value, index, new_path) + else: + result[field_name] = value + + return result diff --git a/testbed/graphql-python__graphene/graphene/utils/deprecated.py b/testbed/graphql-python__graphene/graphene/utils/deprecated.py new file mode 100644 index 0000000000000000000000000000000000000000..71a5bb404e9dbeb3d829f10cb6a3e62cdc9feed8 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/deprecated.py @@ -0,0 +1,70 @@ +import functools +import inspect +import warnings + +string_types = (type(b""), type("")) + + +def warn_deprecation(text): + warnings.warn(text, category=DeprecationWarning, stacklevel=2) + + +def deprecated(reason): + """ + This is a decorator which can be used to mark functions + as deprecated. It will result in a warning being emitted + when the function is used. + """ + + if isinstance(reason, string_types): + + # The @deprecated is used with a 'reason'. + # + # .. code-block:: python + # + # @deprecated("please, use another function") + # def old_function(x, y): + # pass + + def decorator(func1): + + if inspect.isclass(func1): + fmt1 = f"Call to deprecated class {func1.__name__} ({reason})." + else: + fmt1 = f"Call to deprecated function {func1.__name__} ({reason})." + + @functools.wraps(func1) + def new_func1(*args, **kwargs): + warn_deprecation(fmt1) + return func1(*args, **kwargs) + + return new_func1 + + return decorator + + elif inspect.isclass(reason) or inspect.isfunction(reason): + + # The @deprecated is used without any 'reason'. + # + # .. code-block:: python + # + # @deprecated + # def old_function(x, y): + # pass + + func2 = reason + + if inspect.isclass(func2): + fmt2 = f"Call to deprecated class {func2.__name__}." + else: + fmt2 = f"Call to deprecated function {func2.__name__}." + + @functools.wraps(func2) + def new_func2(*args, **kwargs): + warn_deprecation(fmt2) + return func2(*args, **kwargs) + + return new_func2 + + else: + raise TypeError(repr(type(reason))) diff --git a/testbed/graphql-python__graphene/graphene/utils/get_unbound_function.py b/testbed/graphql-python__graphene/graphene/utils/get_unbound_function.py new file mode 100644 index 0000000000000000000000000000000000000000..bd311e34a51b8331ac1902795f3d9a9790e2962f --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/get_unbound_function.py @@ -0,0 +1,4 @@ +def get_unbound_function(func): + if not getattr(func, "__self__", True): + return func.__func__ + return func diff --git a/testbed/graphql-python__graphene/graphene/utils/is_introspection_key.py b/testbed/graphql-python__graphene/graphene/utils/is_introspection_key.py new file mode 100644 index 0000000000000000000000000000000000000000..59d72b24c3fade0e427c1382396bfff5996a689f --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/is_introspection_key.py @@ -0,0 +1,6 @@ +def is_introspection_key(key): + # from: https://spec.graphql.org/June2018/#sec-Schema + # > All types and directives defined within a schema must not have a name which + # > begins with "__" (two underscores), as this is used exclusively + # > by GraphQL’s introspection system. + return str(key).startswith("__") diff --git a/testbed/graphql-python__graphene/graphene/utils/module_loading.py b/testbed/graphql-python__graphene/graphene/utils/module_loading.py new file mode 100644 index 0000000000000000000000000000000000000000..d9095d0ac2f35c7bed14f97b49a596bb78d55bc8 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/module_loading.py @@ -0,0 +1,45 @@ +from functools import partial +from importlib import import_module + + +def import_string(dotted_path, dotted_attributes=None): + """ + Import a dotted module path and return the attribute/class designated by the + last name in the path. When a dotted attribute path is also provided, the + dotted attribute path would be applied to the attribute/class retrieved from + the first step, and return the corresponding value designated by the + attribute path. Raise ImportError if the import failed. + """ + try: + module_path, class_name = dotted_path.rsplit(".", 1) + except ValueError: + raise ImportError("%s doesn't look like a module path" % dotted_path) + + module = import_module(module_path) + + try: + result = getattr(module, class_name) + except AttributeError: + raise ImportError( + 'Module "%s" does not define a "%s" attribute/class' + % (module_path, class_name) + ) + + if not dotted_attributes: + return result + attributes = dotted_attributes.split(".") + traveled_attributes = [] + try: + for attribute in attributes: + traveled_attributes.append(attribute) + result = getattr(result, attribute) + return result + except AttributeError: + raise ImportError( + 'Module "%s" does not define a "%s" attribute inside attribute/class "%s"' + % (module_path, ".".join(traveled_attributes), class_name) + ) + + +def lazy_import(dotted_path, dotted_attributes=None): + return partial(import_string, dotted_path, dotted_attributes) diff --git a/testbed/graphql-python__graphene/graphene/utils/orderedtype.py b/testbed/graphql-python__graphene/graphene/utils/orderedtype.py new file mode 100644 index 0000000000000000000000000000000000000000..294ad54e7ab7e0af8514f6d37e792fc9039b7ae0 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/orderedtype.py @@ -0,0 +1,39 @@ +from functools import total_ordering + + +@total_ordering +class OrderedType: + creation_counter = 1 + + def __init__(self, _creation_counter=None): + self.creation_counter = _creation_counter or self.gen_counter() + + @staticmethod + def gen_counter(): + counter = OrderedType.creation_counter + OrderedType.creation_counter += 1 + return counter + + def reset_counter(self): + self.creation_counter = self.gen_counter() + + def __eq__(self, other): + # Needed for @total_ordering + if isinstance(self, type(other)): + return self.creation_counter == other.creation_counter + return NotImplemented + + def __lt__(self, other): + # This is needed because bisect does not take a comparison function. + if isinstance(other, OrderedType): + return self.creation_counter < other.creation_counter + return NotImplemented + + def __gt__(self, other): + # This is needed because bisect does not take a comparison function. + if isinstance(other, OrderedType): + return self.creation_counter > other.creation_counter + return NotImplemented + + def __hash__(self): + return hash(self.creation_counter) diff --git a/testbed/graphql-python__graphene/graphene/utils/props.py b/testbed/graphql-python__graphene/graphene/utils/props.py new file mode 100644 index 0000000000000000000000000000000000000000..26c697eca1cf85b35ad8149151849ba43acaa7b9 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/props.py @@ -0,0 +1,15 @@ +class _OldClass: + pass + + +class _NewClass: + pass + + +_all_vars = set(dir(_OldClass) + dir(_NewClass)) + + +def props(x): + return { + key: vars(x).get(key, getattr(x, key)) for key in dir(x) if key not in _all_vars + } diff --git a/testbed/graphql-python__graphene/graphene/utils/resolve_only_args.py b/testbed/graphql-python__graphene/graphene/utils/resolve_only_args.py new file mode 100644 index 0000000000000000000000000000000000000000..5efff2edcfa155ea628ed3224c54c88681b7f193 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/resolve_only_args.py @@ -0,0 +1,12 @@ +from functools import wraps + +from .deprecated import deprecated + + +@deprecated("This function is deprecated") +def resolve_only_args(func): + @wraps(func) + def wrapped_func(root, info, **args): + return func(root, **args) + + return wrapped_func diff --git a/testbed/graphql-python__graphene/graphene/utils/str_converters.py b/testbed/graphql-python__graphene/graphene/utils/str_converters.py new file mode 100644 index 0000000000000000000000000000000000000000..2a214f0642e325a765f32194cdf72cea76d7856e --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/str_converters.py @@ -0,0 +1,17 @@ +import re + + +# Adapted from this response in Stackoverflow +# http://stackoverflow.com/a/19053800/1072990 +def to_camel_case(snake_str): + components = snake_str.split("_") + # We capitalize the first letter of each component except the first one + # with the 'capitalize' method and join them together. + return components[0] + "".join(x.capitalize() if x else "_" for x in components[1:]) + + +# From this response in Stackoverflow +# http://stackoverflow.com/a/1176023/1072990 +def to_snake_case(name): + s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() diff --git a/testbed/graphql-python__graphene/graphene/utils/subclass_with_meta.py b/testbed/graphql-python__graphene/graphene/utils/subclass_with_meta.py new file mode 100644 index 0000000000000000000000000000000000000000..c4ee11d74f34d6c015fce168d0a8173ba600a76a --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/subclass_with_meta.py @@ -0,0 +1,50 @@ +from inspect import isclass + +from .props import props + + +class SubclassWithMeta_Meta(type): + _meta = None + + def __str__(cls): + if cls._meta: + return cls._meta.name + return cls.__name__ + + def __repr__(cls): + return f"<{cls.__name__} meta={repr(cls._meta)}>" + + +class SubclassWithMeta(metaclass=SubclassWithMeta_Meta): + """This class improves __init_subclass__ to receive automatically the options from meta""" + + def __init_subclass__(cls, **meta_options): + """This method just terminates the super() chain""" + _Meta = getattr(cls, "Meta", None) + _meta_props = {} + if _Meta: + if isinstance(_Meta, dict): + _meta_props = _Meta + elif isclass(_Meta): + _meta_props = props(_Meta) + else: + raise Exception( + f"Meta have to be either a class or a dict. Received {_Meta}" + ) + delattr(cls, "Meta") + options = dict(meta_options, **_meta_props) + + abstract = options.pop("abstract", False) + if abstract: + assert not options, ( + "Abstract types can only contain the abstract attribute. " + f"Received: abstract, {', '.join(options)}" + ) + else: + super_class = super(cls, cls) + if hasattr(super_class, "__init_subclass_with_meta__"): + super_class.__init_subclass_with_meta__(**options) + + @classmethod + def __init_subclass_with_meta__(cls, **meta_options): + """This method just terminates the super() chain""" diff --git a/testbed/graphql-python__graphene/graphene/utils/tests/__init__.py b/testbed/graphql-python__graphene/graphene/utils/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/graphene/utils/tests/test_crunch.py b/testbed/graphql-python__graphene/graphene/utils/tests/test_crunch.py new file mode 100644 index 0000000000000000000000000000000000000000..92d0b1b04ad5cee72f3f115f2d6e2bd063452f0d --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/tests/test_crunch.py @@ -0,0 +1,51 @@ +from pytest import mark + +from ..crunch import crunch + + +@mark.parametrize( + "description,uncrunched,crunched", + [ + ["number primitive", 0, [0]], + ["boolean primitive", True, [True]], + ["string primitive", "string", ["string"]], + ["empty array", [], [[]]], + ["single-item array", [None], [None, [0]]], + [ + "multi-primitive all distinct array", + [None, 0, True, "string"], + [None, 0, True, "string", [0, 1, 2, 3]], + ], + [ + "multi-primitive repeated array", + [True, True, True, True], + [True, [0, 0, 0, 0]], + ], + ["one-level nested array", [[1, 2, 3]], [1, 2, 3, [0, 1, 2], [3]]], + ["two-level nested array", [[[1, 2, 3]]], [1, 2, 3, [0, 1, 2], [3], [4]]], + ["empty object", {}, [{}]], + ["single-item object", {"a": None}, [None, {"a": 0}]], + [ + "multi-item all distinct object", + {"a": None, "b": 0, "c": True, "d": "string"}, + [None, 0, True, "string", {"a": 0, "b": 1, "c": 2, "d": 3}], + ], + [ + "multi-item repeated object", + {"a": True, "b": True, "c": True, "d": True}, + [True, {"a": 0, "b": 0, "c": 0, "d": 0}], + ], + [ + "complex array", + [{"a": True, "b": [1, 2, 3]}, [1, 2, 3]], + [True, 1, 2, 3, [1, 2, 3], {"a": 0, "b": 4}, [5, 4]], + ], + [ + "complex object", + {"a": True, "b": [1, 2, 3], "c": {"a": True, "b": [1, 2, 3]}}, + [True, 1, 2, 3, [1, 2, 3], {"a": 0, "b": 4}, {"a": 0, "b": 4, "c": 5}], + ], + ], +) +def test_crunch(description, uncrunched, crunched): + assert crunch(uncrunched) == crunched diff --git a/testbed/graphql-python__graphene/graphene/utils/tests/test_dataloader.py b/testbed/graphql-python__graphene/graphene/utils/tests/test_dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..257f6b4db0a74670a763a2334c8e89f194d287d6 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/tests/test_dataloader.py @@ -0,0 +1,452 @@ +from asyncio import gather +from collections import namedtuple +from functools import partial +from unittest.mock import Mock + +from graphene.utils.dataloader import DataLoader +from pytest import mark, raises + +from graphene import ObjectType, String, Schema, Field, List + +CHARACTERS = { + "1": {"name": "Luke Skywalker", "sibling": "3"}, + "2": {"name": "Darth Vader", "sibling": None}, + "3": {"name": "Leia Organa", "sibling": "1"}, +} + +get_character = Mock(side_effect=lambda character_id: CHARACTERS[character_id]) + + +class CharacterType(ObjectType): + name = String() + sibling = Field(lambda: CharacterType) + + async def resolve_sibling(character, info): + if character["sibling"]: + return await info.context.character_loader.load(character["sibling"]) + return None + + +class Query(ObjectType): + skywalker_family = List(CharacterType) + + async def resolve_skywalker_family(_, info): + return await info.context.character_loader.load_many(["1", "2", "3"]) + + +mock_batch_load_fn = Mock( + side_effect=lambda character_ids: [get_character(id) for id in character_ids] +) + + +class CharacterLoader(DataLoader): + async def batch_load_fn(self, character_ids): + return mock_batch_load_fn(character_ids) + + +Context = namedtuple("Context", "character_loader") + + +@mark.asyncio +async def test_basic_dataloader(): + schema = Schema(query=Query) + + character_loader = CharacterLoader() + context = Context(character_loader=character_loader) + + query = """ + { + skywalkerFamily { + name + sibling { + name + } + } + } + """ + + result = await schema.execute_async(query, context=context) + + assert not result.errors + assert result.data == { + "skywalkerFamily": [ + {"name": "Luke Skywalker", "sibling": {"name": "Leia Organa"}}, + {"name": "Darth Vader", "sibling": None}, + {"name": "Leia Organa", "sibling": {"name": "Luke Skywalker"}}, + ] + } + + assert mock_batch_load_fn.call_count == 1 + assert get_character.call_count == 3 + + +def id_loader(**options): + load_calls = [] + + async def default_resolve(x): + return x + + resolve = options.pop("resolve", default_resolve) + + async def fn(keys): + load_calls.append(keys) + return await resolve(keys) + # return keys + + identity_loader = DataLoader(fn, **options) + return identity_loader, load_calls + + +@mark.asyncio +async def test_build_a_simple_data_loader(): + async def call_fn(keys): + return keys + + identity_loader = DataLoader(call_fn) + + promise1 = identity_loader.load(1) + + value1 = await promise1 + assert value1 == 1 + + +@mark.asyncio +async def test_can_build_a_data_loader_from_a_partial(): + value_map = {1: "one"} + + async def call_fn(context, keys): + return [context.get(key) for key in keys] + + partial_fn = partial(call_fn, value_map) + identity_loader = DataLoader(partial_fn) + + promise1 = identity_loader.load(1) + + value1 = await promise1 + assert value1 == "one" + + +@mark.asyncio +async def test_supports_loading_multiple_keys_in_one_call(): + async def call_fn(keys): + return keys + + identity_loader = DataLoader(call_fn) + + promise_all = identity_loader.load_many([1, 2]) + + values = await promise_all + assert values == [1, 2] + + promise_all = identity_loader.load_many([]) + + values = await promise_all + assert values == [] + + +@mark.asyncio +async def test_batches_multiple_requests(): + identity_loader, load_calls = id_loader() + + promise1 = identity_loader.load(1) + promise2 = identity_loader.load(2) + + p = gather(promise1, promise2) + + value1, value2 = await p + + assert value1 == 1 + assert value2 == 2 + + assert load_calls == [[1, 2]] + + +@mark.asyncio +async def test_batches_multiple_requests_with_max_batch_sizes(): + identity_loader, load_calls = id_loader(max_batch_size=2) + + promise1 = identity_loader.load(1) + promise2 = identity_loader.load(2) + promise3 = identity_loader.load(3) + + p = gather(promise1, promise2, promise3) + + value1, value2, value3 = await p + + assert value1 == 1 + assert value2 == 2 + assert value3 == 3 + + assert load_calls == [[1, 2], [3]] + + +@mark.asyncio +async def test_coalesces_identical_requests(): + identity_loader, load_calls = id_loader() + + promise1 = identity_loader.load(1) + promise2 = identity_loader.load(1) + + assert promise1 == promise2 + p = gather(promise1, promise2) + + value1, value2 = await p + + assert value1 == 1 + assert value2 == 1 + + assert load_calls == [[1]] + + +@mark.asyncio +async def test_caches_repeated_requests(): + identity_loader, load_calls = id_loader() + + a, b = await gather(identity_loader.load("A"), identity_loader.load("B")) + + assert a == "A" + assert b == "B" + + assert load_calls == [["A", "B"]] + + a2, c = await gather(identity_loader.load("A"), identity_loader.load("C")) + + assert a2 == "A" + assert c == "C" + + assert load_calls == [["A", "B"], ["C"]] + + a3, b2, c2 = await gather( + identity_loader.load("A"), identity_loader.load("B"), identity_loader.load("C") + ) + + assert a3 == "A" + assert b2 == "B" + assert c2 == "C" + + assert load_calls == [["A", "B"], ["C"]] + + +@mark.asyncio +async def test_clears_single_value_in_loader(): + identity_loader, load_calls = id_loader() + + a, b = await gather(identity_loader.load("A"), identity_loader.load("B")) + + assert a == "A" + assert b == "B" + + assert load_calls == [["A", "B"]] + + identity_loader.clear("A") + + a2, b2 = await gather(identity_loader.load("A"), identity_loader.load("B")) + + assert a2 == "A" + assert b2 == "B" + + assert load_calls == [["A", "B"], ["A"]] + + +@mark.asyncio +async def test_clears_all_values_in_loader(): + identity_loader, load_calls = id_loader() + + a, b = await gather(identity_loader.load("A"), identity_loader.load("B")) + + assert a == "A" + assert b == "B" + + assert load_calls == [["A", "B"]] + + identity_loader.clear_all() + + a2, b2 = await gather(identity_loader.load("A"), identity_loader.load("B")) + + assert a2 == "A" + assert b2 == "B" + + assert load_calls == [["A", "B"], ["A", "B"]] + + +@mark.asyncio +async def test_allows_priming_the_cache(): + identity_loader, load_calls = id_loader() + + identity_loader.prime("A", "A") + + a, b = await gather(identity_loader.load("A"), identity_loader.load("B")) + + assert a == "A" + assert b == "B" + + assert load_calls == [["B"]] + + +@mark.asyncio +async def test_does_not_prime_keys_that_already_exist(): + identity_loader, load_calls = id_loader() + + identity_loader.prime("A", "X") + + a1 = await identity_loader.load("A") + b1 = await identity_loader.load("B") + + assert a1 == "X" + assert b1 == "B" + + identity_loader.prime("A", "Y") + identity_loader.prime("B", "Y") + + a2 = await identity_loader.load("A") + b2 = await identity_loader.load("B") + + assert a2 == "X" + assert b2 == "B" + + assert load_calls == [["B"]] + + +# # Represents Errors +@mark.asyncio +async def test_resolves_to_error_to_indicate_failure(): + async def resolve(keys): + mapped_keys = [ + key if key % 2 == 0 else Exception("Odd: {}".format(key)) for key in keys + ] + return mapped_keys + + even_loader, load_calls = id_loader(resolve=resolve) + + with raises(Exception) as exc_info: + await even_loader.load(1) + + assert str(exc_info.value) == "Odd: 1" + + value2 = await even_loader.load(2) + assert value2 == 2 + assert load_calls == [[1], [2]] + + +@mark.asyncio +async def test_can_represent_failures_and_successes_simultaneously(): + async def resolve(keys): + mapped_keys = [ + key if key % 2 == 0 else Exception("Odd: {}".format(key)) for key in keys + ] + return mapped_keys + + even_loader, load_calls = id_loader(resolve=resolve) + + promise1 = even_loader.load(1) + promise2 = even_loader.load(2) + + with raises(Exception) as exc_info: + await promise1 + + assert str(exc_info.value) == "Odd: 1" + value2 = await promise2 + assert value2 == 2 + assert load_calls == [[1, 2]] + + +@mark.asyncio +async def test_caches_failed_fetches(): + async def resolve(keys): + mapped_keys = [Exception("Error: {}".format(key)) for key in keys] + return mapped_keys + + error_loader, load_calls = id_loader(resolve=resolve) + + with raises(Exception) as exc_info: + await error_loader.load(1) + + assert str(exc_info.value) == "Error: 1" + + with raises(Exception) as exc_info: + await error_loader.load(1) + + assert str(exc_info.value) == "Error: 1" + + assert load_calls == [[1]] + + +@mark.asyncio +async def test_caches_failed_fetches_2(): + identity_loader, load_calls = id_loader() + + identity_loader.prime(1, Exception("Error: 1")) + + with raises(Exception) as _: + await identity_loader.load(1) + + assert load_calls == [] + + +# It is resilient to job queue ordering +@mark.asyncio +async def test_batches_loads_occuring_within_promises(): + identity_loader, load_calls = id_loader() + + async def load_b_1(): + return await load_b_2() + + async def load_b_2(): + return await identity_loader.load("B") + + values = await gather(identity_loader.load("A"), load_b_1()) + + assert values == ["A", "B"] + + assert load_calls == [["A", "B"]] + + +@mark.asyncio +async def test_catches_error_if_loader_resolver_fails(): + exc = Exception("AOH!") + + def do_resolve(x): + raise exc + + a_loader, a_load_calls = id_loader(resolve=do_resolve) + + with raises(Exception) as exc_info: + await a_loader.load("A1") + + assert exc_info.value == exc + + +@mark.asyncio +async def test_can_call_a_loader_from_a_loader(): + deep_loader, deep_load_calls = id_loader() + a_loader, a_load_calls = id_loader( + resolve=lambda keys: deep_loader.load(tuple(keys)) + ) + b_loader, b_load_calls = id_loader( + resolve=lambda keys: deep_loader.load(tuple(keys)) + ) + + a1, b1, a2, b2 = await gather( + a_loader.load("A1"), + b_loader.load("B1"), + a_loader.load("A2"), + b_loader.load("B2"), + ) + + assert a1 == "A1" + assert b1 == "B1" + assert a2 == "A2" + assert b2 == "B2" + + assert a_load_calls == [["A1", "A2"]] + assert b_load_calls == [["B1", "B2"]] + assert deep_load_calls == [[("A1", "A2"), ("B1", "B2")]] + + +@mark.asyncio +async def test_dataloader_clear_with_missing_key_works(): + async def do_resolve(x): + return x + + a_loader, a_load_calls = id_loader(resolve=do_resolve) + assert a_loader.clear("A1") == a_loader diff --git a/testbed/graphql-python__graphene/graphene/utils/tests/test_deduplicator.py b/testbed/graphql-python__graphene/graphene/utils/tests/test_deduplicator.py new file mode 100644 index 0000000000000000000000000000000000000000..95a70e7461998ff1fadd9ff0b7b7875fd2e940d2 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/tests/test_deduplicator.py @@ -0,0 +1,179 @@ +import datetime +import graphene +from graphene import relay +from graphene.types.resolver import dict_resolver + +from ..deduplicator import deflate + + +def test_does_not_modify_object_without_typename_and_id(): + response = {"foo": "bar"} + + deflated_response = deflate(response) + assert deflated_response == {"foo": "bar"} + + +def test_does_not_modify_first_instance_of_an_object(): + response = { + "data": [ + {"__typename": "foo", "id": 1, "name": "foo"}, + {"__typename": "foo", "id": 1, "name": "foo"}, + ] + } + + deflated_response = deflate(response) + + assert deflated_response == { + "data": [ + {"__typename": "foo", "id": 1, "name": "foo"}, + {"__typename": "foo", "id": 1}, + ] + } + + +def test_does_not_modify_first_instance_of_an_object_nested(): + response = { + "data": [ + { + "__typename": "foo", + "bar1": {"__typename": "bar", "id": 1, "name": "bar"}, + "bar2": {"__typename": "bar", "id": 1, "name": "bar"}, + "id": 1, + }, + { + "__typename": "foo", + "bar1": {"__typename": "bar", "id": 1, "name": "bar"}, + "bar2": {"__typename": "bar", "id": 1, "name": "bar"}, + "id": 2, + }, + ] + } + + deflated_response = deflate(response) + + assert deflated_response == { + "data": [ + { + "__typename": "foo", + "bar1": {"__typename": "bar", "id": 1, "name": "bar"}, + "bar2": {"__typename": "bar", "id": 1, "name": "bar"}, + "id": 1, + }, + { + "__typename": "foo", + "bar1": {"__typename": "bar", "id": 1}, + "bar2": {"__typename": "bar", "id": 1}, + "id": 2, + }, + ] + } + + +def test_does_not_modify_input(): + response = { + "data": [ + {"__typename": "foo", "id": 1, "name": "foo"}, + {"__typename": "foo", "id": 1, "name": "foo"}, + ] + } + + deflate(response) + + assert response == { + "data": [ + {"__typename": "foo", "id": 1, "name": "foo"}, + {"__typename": "foo", "id": 1, "name": "foo"}, + ] + } + + +TEST_DATA = { + "events": [ + {"id": "568", "date": datetime.date(2017, 5, 19), "movie": "1198359"}, + {"id": "234", "date": datetime.date(2017, 5, 20), "movie": "1198359"}, + ], + "movies": { + "1198359": { + "id": "1198359", + "name": "King Arthur: Legend of the Sword", + "synopsis": ( + "When the child Arthur's father is murdered, Vortigern, " + "Arthur's uncle, seizes the crown. Robbed of his birthright and " + "with no idea who he truly is..." + ), + } + }, +} + + +def test_example_end_to_end(): + class Movie(graphene.ObjectType): + class Meta: + interfaces = (relay.Node,) + default_resolver = dict_resolver + + name = graphene.String(required=True) + synopsis = graphene.String(required=True) + + class Event(graphene.ObjectType): + class Meta: + interfaces = (relay.Node,) + default_resolver = dict_resolver + + movie = graphene.Field(Movie, required=True) + date = graphene.types.datetime.Date(required=True) + + def resolve_movie(event, info): + return TEST_DATA["movies"][event["movie"]] + + class Query(graphene.ObjectType): + events = graphene.List(graphene.NonNull(Event), required=True) + + def resolve_events(_, info): + return TEST_DATA["events"] + + schema = graphene.Schema(query=Query) + query = """\ + { + events { + __typename + id + date + movie { + __typename + id + name + synopsis + } + } + } + """ + result = schema.execute(query) + assert not result.errors + + data = deflate(result.data) + assert data == { + "events": [ + { + "__typename": "Event", + "id": "RXZlbnQ6NTY4", + "date": "2017-05-19", + "movie": { + "__typename": "Movie", + "id": "TW92aWU6MTE5ODM1OQ==", + "name": "King Arthur: Legend of the Sword", + "synopsis": ( + "When the child Arthur's father is murdered, Vortigern, " + "Arthur's uncle, seizes the crown. Robbed of his birthright and " + "with no idea who he truly is..." + ), + }, + }, + { + "__typename": "Event", + "id": "RXZlbnQ6MjM0", + "date": "2017-05-20", + "movie": {"__typename": "Movie", "id": "TW92aWU6MTE5ODM1OQ=="}, + }, + ] + } diff --git a/testbed/graphql-python__graphene/graphene/utils/tests/test_deprecated.py b/testbed/graphql-python__graphene/graphene/utils/tests/test_deprecated.py new file mode 100644 index 0000000000000000000000000000000000000000..8a14434b649a0ff740948796282d0608ce387eef --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/tests/test_deprecated.py @@ -0,0 +1,75 @@ +from pytest import raises + +from .. import deprecated +from ..deprecated import deprecated as deprecated_decorator +from ..deprecated import warn_deprecation + + +def test_warn_deprecation(mocker): + mocker.patch.object(deprecated.warnings, "warn") + + warn_deprecation("OH!") + deprecated.warnings.warn.assert_called_with( + "OH!", stacklevel=2, category=DeprecationWarning + ) + + +def test_deprecated_decorator(mocker): + mocker.patch.object(deprecated, "warn_deprecation") + + @deprecated_decorator + def my_func(): + return True + + result = my_func() + assert result + deprecated.warn_deprecation.assert_called_with( + "Call to deprecated function my_func." + ) + + +def test_deprecated_class(mocker): + mocker.patch.object(deprecated, "warn_deprecation") + + @deprecated_decorator + class X: + pass + + result = X() + assert result + deprecated.warn_deprecation.assert_called_with("Call to deprecated class X.") + + +def test_deprecated_decorator_text(mocker): + mocker.patch.object(deprecated, "warn_deprecation") + + @deprecated_decorator("Deprecation text") + def my_func(): + return True + + result = my_func() + assert result + deprecated.warn_deprecation.assert_called_with( + "Call to deprecated function my_func (Deprecation text)." + ) + + +def test_deprecated_class_text(mocker): + mocker.patch.object(deprecated, "warn_deprecation") + + @deprecated_decorator("Deprecation text") + class X: + pass + + result = X() + assert result + deprecated.warn_deprecation.assert_called_with( + "Call to deprecated class X (Deprecation text)." + ) + + +def test_deprecated_other_object(mocker): + mocker.patch.object(deprecated, "warn_deprecation") + + with raises(TypeError): + deprecated_decorator({}) diff --git a/testbed/graphql-python__graphene/graphene/utils/tests/test_module_loading.py b/testbed/graphql-python__graphene/graphene/utils/tests/test_module_loading.py new file mode 100644 index 0000000000000000000000000000000000000000..01e89bf30d5e0307ac0251f5498a6663889f4760 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/tests/test_module_loading.py @@ -0,0 +1,69 @@ +from pytest import raises + +from graphene import ObjectType, String + +from ..module_loading import import_string, lazy_import + + +def test_import_string(): + MyString = import_string("graphene.String") + assert MyString == String + + MyObjectTypeMeta = import_string("graphene.ObjectType", "__doc__") + assert MyObjectTypeMeta == ObjectType.__doc__ + + +def test_import_string_module(): + with raises(Exception) as exc_info: + import_string("graphenea") + + assert str(exc_info.value) == "graphenea doesn't look like a module path" + + +def test_import_string_class(): + with raises(Exception) as exc_info: + import_string("graphene.Stringa") + + assert ( + str(exc_info.value) + == 'Module "graphene" does not define a "Stringa" attribute/class' + ) + + +def test_import_string_attributes(): + with raises(Exception) as exc_info: + import_string("graphene.String", "length") + + assert ( + str(exc_info.value) + == 'Module "graphene" does not define a "length" attribute inside attribute/class ' + '"String"' + ) + + with raises(Exception) as exc_info: + import_string("graphene.ObjectType", "__class__.length") + + assert ( + str(exc_info.value) + == 'Module "graphene" does not define a "__class__.length" attribute inside ' + 'attribute/class "ObjectType"' + ) + + with raises(Exception) as exc_info: + import_string("graphene.ObjectType", "__classa__.__base__") + + assert ( + str(exc_info.value) + == 'Module "graphene" does not define a "__classa__" attribute inside attribute/class ' + '"ObjectType"' + ) + + +def test_lazy_import(): + f = lazy_import("graphene.String") + MyString = f() + assert MyString == String + + f = lazy_import("graphene.ObjectType", "__doc__") + MyObjectTypeMeta = f() + assert MyObjectTypeMeta == ObjectType.__doc__ diff --git a/testbed/graphql-python__graphene/graphene/utils/tests/test_orderedtype.py b/testbed/graphql-python__graphene/graphene/utils/tests/test_orderedtype.py new file mode 100644 index 0000000000000000000000000000000000000000..ad5bd77a37f7d10dd6b635b199247c0e9b9b83fa --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/tests/test_orderedtype.py @@ -0,0 +1,41 @@ +from ..orderedtype import OrderedType + + +def test_orderedtype(): + one = OrderedType() + two = OrderedType() + three = OrderedType() + + assert one < two < three + + +def test_orderedtype_eq(): + one = OrderedType() + two = OrderedType() + + assert one == one + assert one != two + + +def test_orderedtype_hash(): + one = OrderedType() + two = OrderedType() + + assert hash(one) == hash(one) + assert hash(one) != hash(two) + + +def test_orderedtype_resetcounter(): + one = OrderedType() + two = OrderedType() + one.reset_counter() + + assert one > two + + +def test_orderedtype_non_orderabletypes(): + one = OrderedType() + + assert one.__lt__(1) == NotImplemented + assert one.__gt__(1) == NotImplemented + assert one != 1 diff --git a/testbed/graphql-python__graphene/graphene/utils/tests/test_resolve_only_args.py b/testbed/graphql-python__graphene/graphene/utils/tests/test_resolve_only_args.py new file mode 100644 index 0000000000000000000000000000000000000000..4e44be9f6572b721fbb911d0d354a7d8199fd72b --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/tests/test_resolve_only_args.py @@ -0,0 +1,14 @@ +from .. import deprecated +from ..resolve_only_args import resolve_only_args + + +def test_resolve_only_args(mocker): + mocker.patch.object(deprecated, "warn_deprecation") + + def resolver(root, **args): + return root, args + + wrapped_resolver = resolve_only_args(resolver) + assert deprecated.warn_deprecation.called + result = wrapped_resolver(1, 2, a=3) + assert result == (1, {"a": 3}) diff --git a/testbed/graphql-python__graphene/graphene/utils/tests/test_resolver_from_annotations.py b/testbed/graphql-python__graphene/graphene/utils/tests/test_resolver_from_annotations.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/graphene/utils/tests/test_str_converters.py b/testbed/graphql-python__graphene/graphene/utils/tests/test_str_converters.py new file mode 100644 index 0000000000000000000000000000000000000000..307d68771a1b7d284b37614eef69c8c5dad1796f --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/tests/test_str_converters.py @@ -0,0 +1,19 @@ +# coding: utf-8 +from ..str_converters import to_camel_case, to_snake_case + + +def test_snake_case(): + assert to_snake_case("snakesOnAPlane") == "snakes_on_a_plane" + assert to_snake_case("SnakesOnAPlane") == "snakes_on_a_plane" + assert to_snake_case("SnakesOnA_Plane") == "snakes_on_a__plane" + assert to_snake_case("snakes_on_a_plane") == "snakes_on_a_plane" + assert to_snake_case("snakes_on_a__plane") == "snakes_on_a__plane" + assert to_snake_case("IPhoneHysteria") == "i_phone_hysteria" + assert to_snake_case("iPhoneHysteria") == "i_phone_hysteria" + + +def test_camel_case(): + assert to_camel_case("snakes_on_a_plane") == "snakesOnAPlane" + assert to_camel_case("snakes_on_a__plane") == "snakesOnA_Plane" + assert to_camel_case("i_phone_hysteria") == "iPhoneHysteria" + assert to_camel_case("field_i18n") == "fieldI18n" diff --git a/testbed/graphql-python__graphene/graphene/utils/tests/test_trim_docstring.py b/testbed/graphql-python__graphene/graphene/utils/tests/test_trim_docstring.py new file mode 100644 index 0000000000000000000000000000000000000000..232836d1f115bd66c670dfa507ee504de81f3f96 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/tests/test_trim_docstring.py @@ -0,0 +1,22 @@ +from ..trim_docstring import trim_docstring + + +def test_trim_docstring(): + class WellDocumentedObject: + """ + This object is very well-documented. It has multiple lines in its + description. + + Multiple paragraphs too + """ + + assert ( + trim_docstring(WellDocumentedObject.__doc__) + == "This object is very well-documented. It has multiple lines in its\n" + "description.\n\nMultiple paragraphs too" + ) + + class UndocumentedObject: + pass + + assert trim_docstring(UndocumentedObject.__doc__) is None diff --git a/testbed/graphql-python__graphene/graphene/utils/thenables.py b/testbed/graphql-python__graphene/graphene/utils/thenables.py new file mode 100644 index 0000000000000000000000000000000000000000..96286992ec69d584cbe63230432dc35d877fc327 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/thenables.py @@ -0,0 +1,25 @@ +""" +This file is used mainly as a bridge for thenable abstractions. +""" + +from inspect import isawaitable + + +def await_and_execute(obj, on_resolve): + async def build_resolve_async(): + return on_resolve(await obj) + + return build_resolve_async() + + +def maybe_thenable(obj, on_resolve): + """ + Execute a on_resolve function once the thenable is resolved, + returning the same type of object inputed. + If the object is not thenable, it should return on_resolve(obj) + """ + if isawaitable(obj): + return await_and_execute(obj, on_resolve) + + # If it's not awaitable, return the function executed over the object + return on_resolve(obj) diff --git a/testbed/graphql-python__graphene/graphene/utils/trim_docstring.py b/testbed/graphql-python__graphene/graphene/utils/trim_docstring.py new file mode 100644 index 0000000000000000000000000000000000000000..a23c7e7d71a63efd324998038163fadf39b374e7 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/utils/trim_docstring.py @@ -0,0 +1,9 @@ +import inspect + + +def trim_docstring(docstring): + # Cleans up whitespaces from an indented docstring + # + # See https://www.python.org/dev/peps/pep-0257/ + # and https://docs.python.org/2/library/inspect.html#inspect.cleandoc + return inspect.cleandoc(docstring) if docstring else None diff --git a/testbed/graphql-python__graphene/graphene/validation/__init__.py b/testbed/graphql-python__graphene/graphene/validation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5b592a2cd863f885f04209e0e5bb8937ed841b2f --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/validation/__init__.py @@ -0,0 +1,5 @@ +from .depth_limit import depth_limit_validator +from .disable_introspection import DisableIntrospection + + +__all__ = ["DisableIntrospection", "depth_limit_validator"] diff --git a/testbed/graphql-python__graphene/graphene/validation/depth_limit.py b/testbed/graphql-python__graphene/graphene/validation/depth_limit.py new file mode 100644 index 0000000000000000000000000000000000000000..b4599e6608fa0c4c4537527402b76b33fea4e2e9 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/validation/depth_limit.py @@ -0,0 +1,195 @@ +# This is a Python port of https://github.com/stems/graphql-depth-limit +# which is licensed under the terms of the MIT license, reproduced below. +# +# ----------- +# +# MIT License +# +# Copyright (c) 2017 Stem +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +try: + from re import Pattern +except ImportError: + # backwards compatibility for v3.6 + from typing import Pattern +from typing import Callable, Dict, List, Optional, Union + +from graphql import GraphQLError +from graphql.validation import ValidationContext, ValidationRule +from graphql.language import ( + DefinitionNode, + FieldNode, + FragmentDefinitionNode, + FragmentSpreadNode, + InlineFragmentNode, + Node, + OperationDefinitionNode, +) + +from ..utils.is_introspection_key import is_introspection_key + + +IgnoreType = Union[Callable[[str], bool], Pattern, str] + + +def depth_limit_validator( + max_depth: int, + ignore: Optional[List[IgnoreType]] = None, + callback: Optional[Callable[[Dict[str, int]], None]] = None, +): + class DepthLimitValidator(ValidationRule): + def __init__(self, validation_context: ValidationContext): + document = validation_context.document + definitions = document.definitions + + fragments = get_fragments(definitions) + queries = get_queries_and_mutations(definitions) + query_depths = {} + + for name in queries: + query_depths[name] = determine_depth( + node=queries[name], + fragments=fragments, + depth_so_far=0, + max_depth=max_depth, + context=validation_context, + operation_name=name, + ignore=ignore, + ) + if callable(callback): + callback(query_depths) + super().__init__(validation_context) + + return DepthLimitValidator + + +def get_fragments( + definitions: List[DefinitionNode], +) -> Dict[str, FragmentDefinitionNode]: + fragments = {} + for definition in definitions: + if isinstance(definition, FragmentDefinitionNode): + fragments[definition.name.value] = definition + return fragments + + +# This will actually get both queries and mutations. +# We can basically treat those the same +def get_queries_and_mutations( + definitions: List[DefinitionNode], +) -> Dict[str, OperationDefinitionNode]: + operations = {} + + for definition in definitions: + if isinstance(definition, OperationDefinitionNode): + operation = definition.name.value if definition.name else "anonymous" + operations[operation] = definition + return operations + + +def determine_depth( + node: Node, + fragments: Dict[str, FragmentDefinitionNode], + depth_so_far: int, + max_depth: int, + context: ValidationContext, + operation_name: str, + ignore: Optional[List[IgnoreType]] = None, +) -> int: + if depth_so_far > max_depth: + context.report_error( + GraphQLError( + f"'{operation_name}' exceeds maximum operation depth of {max_depth}.", + [node], + ) + ) + return depth_so_far + if isinstance(node, FieldNode): + should_ignore = is_introspection_key(node.name.value) or is_ignored( + node, ignore + ) + + if should_ignore or not node.selection_set: + return 0 + return 1 + max( + map( + lambda selection: determine_depth( + node=selection, + fragments=fragments, + depth_so_far=depth_so_far + 1, + max_depth=max_depth, + context=context, + operation_name=operation_name, + ignore=ignore, + ), + node.selection_set.selections, + ) + ) + elif isinstance(node, FragmentSpreadNode): + return determine_depth( + node=fragments[node.name.value], + fragments=fragments, + depth_so_far=depth_so_far, + max_depth=max_depth, + context=context, + operation_name=operation_name, + ignore=ignore, + ) + elif isinstance( + node, (InlineFragmentNode, FragmentDefinitionNode, OperationDefinitionNode) + ): + return max( + map( + lambda selection: determine_depth( + node=selection, + fragments=fragments, + depth_so_far=depth_so_far, + max_depth=max_depth, + context=context, + operation_name=operation_name, + ignore=ignore, + ), + node.selection_set.selections, + ) + ) + else: + raise Exception( + f"Depth crawler cannot handle: {node.kind}." + ) # pragma: no cover + + +def is_ignored(node: FieldNode, ignore: Optional[List[IgnoreType]] = None) -> bool: + if ignore is None: + return False + for rule in ignore: + field_name = node.name.value + if isinstance(rule, str): + if field_name == rule: + return True + elif isinstance(rule, Pattern): + if rule.match(field_name): + return True + elif callable(rule): + if rule(field_name): + return True + else: + raise ValueError(f"Invalid ignore option: {rule}.") + return False diff --git a/testbed/graphql-python__graphene/graphene/validation/disable_introspection.py b/testbed/graphql-python__graphene/graphene/validation/disable_introspection.py new file mode 100644 index 0000000000000000000000000000000000000000..49a7d6073d8756217dede67ef331bb43d6e50a70 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/validation/disable_introspection.py @@ -0,0 +1,16 @@ +from graphql import GraphQLError +from graphql.language import FieldNode +from graphql.validation import ValidationRule + +from ..utils.is_introspection_key import is_introspection_key + + +class DisableIntrospection(ValidationRule): + def enter_field(self, node: FieldNode, *_args): + field_name = node.name.value + if is_introspection_key(field_name): + self.report_error( + GraphQLError( + f"Cannot query '{field_name}': introspection is disabled.", node + ) + ) diff --git a/testbed/graphql-python__graphene/graphene/validation/tests/__init__.py b/testbed/graphql-python__graphene/graphene/validation/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/graphql-python__graphene/graphene/validation/tests/test_depth_limit_validator.py b/testbed/graphql-python__graphene/graphene/validation/tests/test_depth_limit_validator.py new file mode 100644 index 0000000000000000000000000000000000000000..29c1508c4b889edc26da2a8a7e2840f0067ccc57 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/validation/tests/test_depth_limit_validator.py @@ -0,0 +1,254 @@ +import re + +from pytest import raises +from graphql import parse, get_introspection_query, validate + +from ...types import Schema, ObjectType, Interface +from ...types import String, Int, List, Field +from ..depth_limit import depth_limit_validator + + +class PetType(Interface): + name = String(required=True) + + class meta: + name = "Pet" + + +class CatType(ObjectType): + class meta: + name = "Cat" + interfaces = (PetType,) + + +class DogType(ObjectType): + class meta: + name = "Dog" + interfaces = (PetType,) + + +class AddressType(ObjectType): + street = String(required=True) + number = Int(required=True) + city = String(required=True) + country = String(required=True) + + class Meta: + name = "Address" + + +class HumanType(ObjectType): + name = String(required=True) + email = String(required=True) + address = Field(AddressType, required=True) + pets = List(PetType, required=True) + + class Meta: + name = "Human" + + +class Query(ObjectType): + user = Field(HumanType, required=True, name=String()) + version = String(required=True) + user1 = Field(HumanType, required=True) + user2 = Field(HumanType, required=True) + user3 = Field(HumanType, required=True) + + @staticmethod + def resolve_user(root, info, name=None): + pass + + +schema = Schema(query=Query) + + +def run_query(query: str, max_depth: int, ignore=None): + document = parse(query) + + result = None + + def callback(query_depths): + nonlocal result + result = query_depths + + errors = validate( + schema=schema.graphql_schema, + document_ast=document, + rules=( + depth_limit_validator( + max_depth=max_depth, ignore=ignore, callback=callback + ), + ), + ) + + return errors, result + + +def test_should_count_depth_without_fragment(): + query = """ + query read0 { + version + } + query read1 { + version + user { + name + } + } + query read2 { + matt: user(name: "matt") { + email + } + andy: user(name: "andy") { + email + address { + city + } + } + } + query read3 { + matt: user(name: "matt") { + email + } + andy: user(name: "andy") { + email + address { + city + } + pets { + name + owner { + name + } + } + } + } + """ + + expected = {"read0": 0, "read1": 1, "read2": 2, "read3": 3} + + errors, result = run_query(query, 10) + assert not errors + assert result == expected + + +def test_should_count_with_fragments(): + query = """ + query read0 { + ... on Query { + version + } + } + query read1 { + version + user { + ... on Human { + name + } + } + } + fragment humanInfo on Human { + email + } + fragment petInfo on Pet { + name + owner { + name + } + } + query read2 { + matt: user(name: "matt") { + ...humanInfo + } + andy: user(name: "andy") { + ...humanInfo + address { + city + } + } + } + query read3 { + matt: user(name: "matt") { + ...humanInfo + } + andy: user(name: "andy") { + ... on Human { + email + } + address { + city + } + pets { + ...petInfo + } + } + } + """ + + expected = {"read0": 0, "read1": 1, "read2": 2, "read3": 3} + + errors, result = run_query(query, 10) + assert not errors + assert result == expected + + +def test_should_ignore_the_introspection_query(): + errors, result = run_query(get_introspection_query(), 10) + assert not errors + assert result == {"IntrospectionQuery": 0} + + +def test_should_catch_very_deep_query(): + query = """{ + user { + pets { + owner { + pets { + owner { + pets { + name + } + } + } + } + } + } + } + """ + errors, result = run_query(query, 4) + + assert len(errors) == 1 + assert errors[0].message == "'anonymous' exceeds maximum operation depth of 4." + + +def test_should_ignore_field(): + query = """ + query read1 { + user { address { city } } + } + query read2 { + user1 { address { city } } + user2 { address { city } } + user3 { address { city } } + } + """ + + errors, result = run_query( + query, + 10, + ignore=["user1", re.compile("user2"), lambda field_name: field_name == "user3"], + ) + + expected = {"read1": 2, "read2": 0} + assert not errors + assert result == expected + + +def test_should_raise_invalid_ignore(): + query = """ + query read1 { + user { address { city } } + } + """ + with raises(ValueError, match="Invalid ignore option:"): + run_query(query, 10, ignore=[True]) diff --git a/testbed/graphql-python__graphene/graphene/validation/tests/test_disable_introspection.py b/testbed/graphql-python__graphene/graphene/validation/tests/test_disable_introspection.py new file mode 100644 index 0000000000000000000000000000000000000000..149ac628d95c0c3ffa41d5325c8922385499fbc1 --- /dev/null +++ b/testbed/graphql-python__graphene/graphene/validation/tests/test_disable_introspection.py @@ -0,0 +1,37 @@ +from graphql import parse, validate + +from ...types import Schema, ObjectType, String +from ..disable_introspection import DisableIntrospection + + +class Query(ObjectType): + name = String(required=True) + + @staticmethod + def resolve_name(root, info): + return "Hello world!" + + +schema = Schema(query=Query) + + +def run_query(query: str): + document = parse(query) + + return validate( + schema=schema.graphql_schema, + document_ast=document, + rules=(DisableIntrospection,), + ) + + +def test_disallows_introspection_queries(): + errors = run_query("{ __schema { queryType { name } } }") + + assert len(errors) == 1 + assert errors[0].message == "Cannot query '__schema': introspection is disabled." + + +def test_allows_non_introspection_queries(): + errors = run_query("{ name }") + assert len(errors) == 0 diff --git a/testbed/graphql-python__graphene/mypy.ini b/testbed/graphql-python__graphene/mypy.ini new file mode 100644 index 0000000000000000000000000000000000000000..bbb37b7759c0d1805a096a39f93b9342e0d23d8e --- /dev/null +++ b/testbed/graphql-python__graphene/mypy.ini @@ -0,0 +1,17 @@ +[mypy] +ignore_missing_imports = True + +[mypy-graphene.pyutils.*] +ignore_errors = True + +[mypy-graphene.types.scalars] +ignore_errors = True + +[mypy-graphene.types.generic] +ignore_errors = True + +[mypy-graphene.types.tests.*] +ignore_errors = True + +[mypy-graphene.relay.tests.*] +ignore_errors = True diff --git a/testbed/graphql-python__graphene/setup.cfg b/testbed/graphql-python__graphene/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..db1ff1345018021e79e34e8d2209805f7f403c91 --- /dev/null +++ b/testbed/graphql-python__graphene/setup.cfg @@ -0,0 +1,16 @@ +[flake8] +exclude = setup.py,docs/*,*/examples/*,graphene/pyutils/*,tests +max-line-length = 120 + +# This is a specific ignore for Black+Flake8 +# source: https://black.readthedocs.io/en/stable/guides/using_black_with_other_tools.html#id1 +extend-ignore = E203 + +[coverage:run] +omit = graphene/pyutils/*,*/tests/*,graphene/types/scalars.py + +[isort] +known_first_party=graphene + +[bdist_wheel] +universal=1 diff --git a/testbed/graphql-python__graphene/tox.ini b/testbed/graphql-python__graphene/tox.ini new file mode 100644 index 0000000000000000000000000000000000000000..65fceadd808c8253cce47b5475218ac9f6e627b8 --- /dev/null +++ b/testbed/graphql-python__graphene/tox.ini @@ -0,0 +1,29 @@ +[tox] +envlist = py3{6,7,8,9,10}, mypy, pre-commit +skipsdist = true + +[testenv] +deps = + .[test] +setenv = + PYTHONPATH = .:{envdir} +commands = + py{36,37,38,39,310}: pytest --cov=graphene graphene --cov-report=term --cov-report=xml examples {posargs} + +[testenv:pre-commit] +basepython = python3.10 +deps = + pre-commit>=2.16,<3 +setenv = + LC_CTYPE=en_US.UTF-8 +commands = + pre-commit run --all-files --show-diff-on-failure + +[testenv:mypy] +basepython = python3.10 +deps = + mypy>=0.950,<1 +commands = + mypy graphene + +[pytest] diff --git a/testbed/huggingface__accelerate/.github/workflows/build_docker_images.yml b/testbed/huggingface__accelerate/.github/workflows/build_docker_images.yml new file mode 100644 index 0000000000000000000000000000000000000000..c207f5515b6d02e042ee94114d87084214ff400e --- /dev/null +++ b/testbed/huggingface__accelerate/.github/workflows/build_docker_images.yml @@ -0,0 +1,54 @@ +name: Build Docker images (scheduled) + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "0 1 * * *" + +concurrency: + group: docker-image-builds + cancel-in-progress: false + +jobs: + latest-cpu: + name: "Latest Accelerate CPU [dev]" + runs-on: ubuntu-latest + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Check out code + uses: actions/checkout@v2 + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Build and Push CPU + uses: docker/build-push-action@v2 + with: + context: ./docker/accelerate-cpu + push: true + tags: huggingface/accelerate-cpu + + latest-cuda: + name: "Latest Accelerate GPU [dev]" + runs-on: ubuntu-latest + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Check out code + uses: actions/checkout@v2 + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Build and Push GPU + uses: docker/build-push-action@v2 + with: + context: ./docker/accelerate-gpu + push: true + tags: huggingface/accelerate-gpu \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/basic_tutorials/install.mdx b/testbed/huggingface__accelerate/docs/source/basic_tutorials/install.mdx new file mode 100644 index 0000000000000000000000000000000000000000..19630f575855a914bc7bb33693a97a272103d2f2 --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/basic_tutorials/install.mdx @@ -0,0 +1,99 @@ + + +# Installation and Configuration + +Before you start, you will need to setup your environment, install the appropriate packages, and configure 🤗 Accelerate. 🤗 Accelerate is tested on **Python 3.7+**. + +## Installing 🤗 Accelerate + +🤗 Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below: + +### pip + +To install 🤗 Accelerate from pypi, perform: + +```bash +pip install accelerate +``` + +### conda + +🤗 Accelerate can also be installed with conda with: + +```bash +conda install -c conda-forge accelerate +``` + +### Source + +New features are added every day that haven't been released yet. To try them out yourself, install +from the GitHub repository: + +```bash +pip install git+https://github.com/huggingface/accelerate +``` + +If you're working on contributing to the library or wish to play with the source code and see live +results as you run the code, an editable version can be installed from a locally-cloned version of the +repository: + +```bash +git clone https://github.com/huggingface/accelerate +cd accelerate +pip install -e . +``` + +## Configuring 🤗 Accelerate + +After installing, you need to configure 🤗 Accelerate for how the current system is setup for training. +To do so run the following and answer the questions prompted to you: + +```bash +accelerate config +``` + +To write a barebones configuration that doesn't include options such as DeepSpeed configuration or running on TPUs, you can quickly run: + +```bash +python -c "from accelerate.utils import write_basic_config; write_basic_config(mixed_precision='fp16')" +``` +🤗 Accelerate will automatically utilize the maximum number of GPUs available and set the mixed precision mode. + +To check that your configuration looks fine, run: + +```bash +accelerate env +``` + +An example output is shown below, which describes two GPUs on a single machine with no mixed precision being used: + +```bash +- `Accelerate` version: 0.11.0.dev0 +- Platform: Linux-5.10.0-15-cloud-amd64-x86_64-with-debian-11.3 +- Python version: 3.7.12 +- Numpy version: 1.19.5 +- PyTorch version (GPU?): 1.12.0+cu102 (True) +- `Accelerate` default config: + - compute_environment: LOCAL_MACHINE + - distributed_type: MULTI_GPU + - mixed_precision: no + - use_cpu: False + - num_processes: 2 + - machine_rank: 0 + - num_machines: 1 + - main_process_ip: None + - main_process_port: None + - main_training_function: main + - deepspeed_config: {} + - fsdp_config: {} +``` \ No newline at end of file diff --git a/testbed/huggingface__accelerate/docs/source/basic_tutorials/launch.mdx b/testbed/huggingface__accelerate/docs/source/basic_tutorials/launch.mdx new file mode 100644 index 0000000000000000000000000000000000000000..741920f26230194e05d3a620e2fcfdcca867612e --- /dev/null +++ b/testbed/huggingface__accelerate/docs/source/basic_tutorials/launch.mdx @@ -0,0 +1,178 @@ + + +# Launching your 🤗 Accelerate scripts + +In the previous tutorial, you were introduced to how to modify your current training script to use 🤗 Accelerate. +The final version of that code is shown below: + +```python +from accelerate import Accelerator + +accelerator = Accelerator() + +model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler +) + +for batch in training_dataloader: + optimizer.zero_grad() + inputs, targets = batch + outputs = model(inputs) + loss = loss_function(outputs, targets) + accelerator.backward(loss) + optimizer.step() + scheduler.step() +``` + +But how do you run this code and have it utilize the special hardware available to it? + +First you should rewrite the above code into a function, and make it callable as a script. For example: + +```diff + from accelerate import Accelerator + ++ def main(): + accelerator = Accelerator() + + model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler + ) + + for batch in training_dataloader: + optimizer.zero_grad() + inputs, targets = batch + outputs = model(inputs) + loss = loss_function(outputs, targets) + accelerator.backward(loss) + optimizer.step() + scheduler.step() + ++ if __name__ == "__main__": ++ main() +``` + +Next you need to launch it with `accelerate launch`. + + + + It's recommended you run `accelerate config` before using `accelerate launch` to configure your environment to your liking. + Otherwise 🤗 Accelerate will use very basic defaults depending on your system setup. + + + + +## Using accelerate launch + +🤗 Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`. +This command wraps around all of the different commands needed to launch your script on various platforms, without you having to remember what each of them are. + + + + If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`. + + + +You can launch your script quickly by using: + +```bash +accelerate launch {script_name.py} --arg1 --arg2 ... +``` + +Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterwards like normal! + +Since this runs the various torch spawn methods, all of the expected environment variables can be modified here as well. +For example, here is how to use `accelerate launch` with a single GPU: + +```bash +CUDA_VISIBLE_DEVICES="0" accelerate launch {script_name.py} --arg1 --arg2 ... +``` + +You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters. +In this case, 🤗 Accelerate will make some hyperparameter decisions for you, e.g., if GPUs are available, it will use all of them by default without the mixed precision. +Here is how you would use all GPUs and train with mixed precision disabled: + +```bash +accelerate launch --multi_gpu {script_name.py} {--arg1} {--arg2} ... +``` + +To get more specific you should pass in the needed parameters yourself. For instance, here is how you +would also launch that same script on two GPUs using mixed precision while avoiding all of the warnings: + +```bash +accelerate launch --multi_gpu --mixed_precision=fp16 --num_processes=2 {script_name.py} {--arg1} {--arg2} ... +``` + +For a complete list of parameters you can pass in, run: + +```bash +accelerate launch -h +``` + + + + Even if you are not using 🤗 Accelerate in your code, you can still use the launcher for starting your scripts! + + + +For a visualization of this difference, that earlier `accelerate launch` on multi-gpu would look something like so with `torchrun`: + +```bash +MIXED_PRECISION="fp16" torchrun --nproc_per_node=2 --num_machines=1 {script_name.py} {--arg1} {--arg2} ... +``` + +## Why you should always use `accelerate config` + +Why is it useful to the point you should **always** run `accelerate config`? + +Remember that earlier call to `accelerate launch` as well as `torchrun`? +Post configuration, to run that script with the needed parts you just need to use `accelerate launch` outright, without passing anything else in: + +```bash +accelerate launch {script_name.py} {--arg1} {--arg2} ... +``` + + +## Custom Configurations + +As briefly mentioned earlier, `accelerate launch` should be mostly used through combining set configurations +made with the `accelerate config` command. These configs are saved to a `default_config.yaml` file in your cache folder for 🤗 Accelerate. +This cache folder is located at (with decreasing order of priority): + +- The content of your environment variable `HF_HOME` suffixed with `accelerate`. +- If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with + `huggingface/accelerate`. +- If this does not exist either, the folder `~/.cache/huggingface/accelerate`. + +To have multiple configurations, the flag `--config_file` can be passed to the `accelerate launch` command paired +with the location of the custom yaml. + +An example yaml may look something like the following for two GPUs on a single machine using `fp16` for mixed precision: +```yaml +compute_environment: LOCAL_MACHINE +deepspeed_config: {} +distributed_type: MULTI_GPU +fsdp_config: {} +machine_rank: 0 +main_process_ip: null +main_process_port: null +main_training_function: main +mixed_precision: fp16 +num_machines: 1 +num_processes: 2 +use_cpu: false +``` + +Launching a script from the location of that custom yaml file looks like the following: +```bash +accelerate launch --config_file {path/to/config/my_config_file.yaml} {script_name.py} {--arg1} {--arg2} ... +``` \ No newline at end of file diff --git a/testbed/huggingface__accelerate/setup.cfg b/testbed/huggingface__accelerate/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..37cf34791bfde0d8cd4c4cfdae4c1075c82e400d --- /dev/null +++ b/testbed/huggingface__accelerate/setup.cfg @@ -0,0 +1,19 @@ +[isort] +default_section = FIRSTPARTY +ensure_newline_before_comments = True +force_grid_wrap = 0 +include_trailing_comma = True +known_first_party = accelerate +known_third_party = + numpy + torch + torch_xla + +line_length = 119 +lines_after_imports = 2 +multi_line_output = 3 +use_parentheses = True + +[flake8] +ignore = E203, E722, E501, E741, W503, W605 +max-line-length = 119 diff --git a/testbed/huggingface__datasets/tests/README.md b/testbed/huggingface__datasets/tests/README.md new file mode 100644 index 0000000000000000000000000000000000000000..89955ad6b06aa199e55d8eb18a99adba12109131 --- /dev/null +++ b/testbed/huggingface__datasets/tests/README.md @@ -0,0 +1,32 @@ +## Add Dummy data test + +**Important** In order to pass the `load_dataset_` test, dummy data is required for all possible config names. + +First we distinguish between datasets scripts that +- A) have no config class and +- B) have a config class + +For A) the dummy data folder structure, will always look as follows: +- ``dummy//dummy_data.zip``, *e.g.* ``cosmos_qa/dummy/0.1.0/dummy_data.zip``. +For B) the dummy data folder structure, will always look as follows: +- ``dummy///dummy_data.zip``, *e.g.* ``squad/dummy/plain-text/1.0.0/dummy_data.zip``. + + +Now the difficult part is to create the correct `dummy_data.zip` file. + +**Important** When checking the dummy folder structure of already added datasets, always unzip ``dummy_data.zip``. If a folder ``dummy_data`` is found next to ``dummy_data.zip``, it is probably an old version and should be deleted. The tests only take the ``dummy_data.zip`` file into account. + +Here we have to pay close attention to the ``_split_generators(self, dl_manager)`` function of the dataset script in question. +There are three general possibilties: + +1) The ``dl_manager.download_and_extract()`` is given a **single path variable** of type `str` as its argument. In this case the file `dummy_data.zip` should unzip to the following structure: +``os.path.join("dummy_data", )`` *e.g.* for ``sentiment140``, the unzipped ``dummy_data.zip`` has the following dir structure ``dummy_data/testdata.manual.2009.06.14.csv`` and ``dummy_data/training.1600000.processed.noemoticon.csv``. + +**Note** if there are no ````, then ``dummy_data`` should be the name of the single file. An example for this is the ``crime-and-punishment`` dataset script. + +2) The ``dl_manager.download_and_extract()`` is given a **dictionary of paths** of type `str` as its argument. In this case the file `dummy_data.zip` should unzip to the following structure: +``os.path.join("dummy_data", .split('/')[-1], )`` *e.g.* for ``squad``, the unzipped ``dummy_data.zip`` has the following dir structure ``dummy_data/dev-v1.1.json``, etc... + +**Note** if ```` is a zipped file then the dummy data folder structure should contain the exact name of the zipped file and the following extracted folder structure. The file `dummy_data.zip` should **never** itself contain a zipped file since the dummy data is not unzipped by the ``MockDownloadManager`` during testing. *E.g.* check the dummy folder structure of ``hansards`` where the folders have to be named ``*.tar`` or the structure of ``wiki_split`` where the folders have to be named ``*.zip``. + +3) The ``dl_manager.download_and_extract()`` is given a **dictionary of lists of paths** of type `str` as its argument. This is a very special case and has been seen only for the dataset ``ensli``. In this case the values are simply flattened and the dummy folder structure is the same as in 2). diff --git a/testbed/huggingface__datasets/tests/__init__.py b/testbed/huggingface__datasets/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/tests/commands/test_test.py b/testbed/huggingface__datasets/tests/commands/test_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0c4bf68ae786d3655b257ebb8c99439692051778 --- /dev/null +++ b/testbed/huggingface__datasets/tests/commands/test_test.py @@ -0,0 +1,82 @@ +import os +from collections import namedtuple + +import pytest + +from datasets import ClassLabel, Features, Sequence, Value +from datasets.commands.test import TestCommand +from datasets.info import DatasetInfo, DatasetInfosDict + + +_TestCommandArgs = namedtuple( + "_TestCommandArgs", + [ + "dataset", + "name", + "cache_dir", + "data_dir", + "all_configs", + "save_infos", + "ignore_verifications", + "force_redownload", + "clear_cache", + ], + defaults=[None, None, None, False, False, False, False, False], +) + + +def is_1percent_close(source, target): + return (abs(source - target) / target) < 0.01 + + +@pytest.mark.integration +def test_test_command(dataset_loading_script_dir): + args = _TestCommandArgs(dataset=dataset_loading_script_dir, all_configs=True, save_infos=True) + test_command = TestCommand(*args) + test_command.run() + dataset_readme_path = os.path.join(dataset_loading_script_dir, "README.md") + assert os.path.exists(dataset_readme_path) + dataset_infos = DatasetInfosDict.from_directory(dataset_loading_script_dir) + expected_dataset_infos = DatasetInfosDict( + { + "default": DatasetInfo( + features=Features( + { + "tokens": Sequence(Value("string")), + "ner_tags": Sequence( + ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]) + ), + "langs": Sequence(Value("string")), + "spans": Sequence(Value("string")), + } + ), + splits=[ + { + "name": "train", + "num_bytes": 2351563, + "num_examples": 10000, + }, + { + "name": "validation", + "num_bytes": 238418, + "num_examples": 1000, + }, + ], + download_size=3940680, + dataset_size=2589981, + ) + } + ) + assert dataset_infos.keys() == expected_dataset_infos.keys() + for key in DatasetInfo._INCLUDED_INFO_IN_YAML: + result, expected = getattr(dataset_infos["default"], key), getattr(expected_dataset_infos["default"], key) + if key == "num_bytes": + assert is_1percent_close(result, expected) + elif key == "splits": + assert list(result) == list(expected) + for split in result: + assert result[split].name == expected[split].name + assert result[split].num_examples == expected[split].num_examples + assert is_1percent_close(result[split].num_bytes, expected[split].num_bytes) + else: + result == expected diff --git a/testbed/huggingface__datasets/tests/conftest.py b/testbed/huggingface__datasets/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..171ee80364dfb87df1a6069fc15c9b7f23c400b7 --- /dev/null +++ b/testbed/huggingface__datasets/tests/conftest.py @@ -0,0 +1,53 @@ +import pytest + +import datasets + + +# Import fixture modules as plugins +pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] + + +def pytest_collection_modifyitems(config, items): + # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") + for item in items: + if any(marker in item.keywords for marker in ["integration", "unit"]): + continue + item.add_marker(pytest.mark.unit) + + +def pytest_configure(config): + config.addinivalue_line("markers", "torchaudio_latest: mark test to run with torchaudio>=0.12") + + +@pytest.fixture(autouse=True) +def set_test_cache_config(tmp_path_factory, monkeypatch): + # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? + test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache" + test_hf_datasets_cache = test_hf_cache_home / "datasets" + test_hf_metrics_cache = test_hf_cache_home / "metrics" + test_hf_modules_cache = test_hf_cache_home / "modules" + monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache)) + monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache)) + monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache)) + test_downloaded_datasets_path = test_hf_datasets_cache / "downloads" + monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path)) + test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted" + monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path)) + + +@pytest.fixture(autouse=True, scope="session") +def disable_tqdm_output(): + datasets.disable_progress_bar() + + +@pytest.fixture(autouse=True) +def set_update_download_counts_to_false(monkeypatch): + # don't take tests into account when counting downloads + monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False) + + +@pytest.fixture +def set_sqlalchemy_silence_uber_warning(monkeypatch): + # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 + # To be removed once SQLAlchemy 2.0 supported + monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True) diff --git a/testbed/huggingface__datasets/tests/distributed_scripts/run_torch_distributed.py b/testbed/huggingface__datasets/tests/distributed_scripts/run_torch_distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..4cfe247dd74da34bd0a531594f60688c1c95f700 --- /dev/null +++ b/testbed/huggingface__datasets/tests/distributed_scripts/run_torch_distributed.py @@ -0,0 +1,55 @@ +import os +from argparse import ArgumentParser +from typing import List + +import torch.utils.data + +from datasets import Dataset, IterableDataset +from datasets.distributed import split_dataset_by_node + + +NUM_SHARDS = 4 +NUM_ITEMS_PER_SHARD = 3 + + +class FailedTestError(RuntimeError): + pass + + +def gen(shards: List[str]): + for shard in shards: + for i in range(NUM_ITEMS_PER_SHARD): + yield {"i": i, "shard": shard} + + +def main(): + rank = int(os.environ["RANK"]) + world_size = int(os.environ["WORLD_SIZE"]) + + parser = ArgumentParser() + parser.add_argument("--streaming", type=bool) + parser.add_argument("--local_rank", type=int) + parser.add_argument("--num_workers", type=int, default=0) + args = parser.parse_args() + streaming = args.streaming + num_workers = args.num_workers + + gen_kwargs = {"shards": [f"shard_{shard_idx}" for shard_idx in range(NUM_SHARDS)]} + ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs) + if not streaming: + ds = Dataset.from_list(list(ds)) + + ds = split_dataset_by_node(ds, rank=rank, world_size=world_size) + dataloader = torch.utils.data.DataLoader(ds, num_workers=num_workers) + + full_size = NUM_SHARDS * NUM_ITEMS_PER_SHARD + expected_local_size = full_size // world_size + expected_local_size += int(rank < (full_size % world_size)) + + local_size = sum(1 for _ in dataloader) + if local_size != expected_local_size: + raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}") + + +if __name__ == "__main__": + main() diff --git a/testbed/huggingface__datasets/tests/features/test_array_xd.py b/testbed/huggingface__datasets/tests/features/test_array_xd.py new file mode 100644 index 0000000000000000000000000000000000000000..61a0fd7650e81e1da98cb459ff30571e1b3dd9b2 --- /dev/null +++ b/testbed/huggingface__datasets/tests/features/test_array_xd.py @@ -0,0 +1,465 @@ +import os +import random +import tempfile +import unittest + +import numpy as np +import pandas as pd +import pyarrow as pa +import pytest +from absl.testing import parameterized + +import datasets +from datasets.arrow_writer import ArrowWriter +from datasets.features import Array2D, Array3D, Array4D, Array5D, Value +from datasets.features.features import Array3DExtensionType, PandasArrayExtensionDtype, _ArrayXD +from datasets.formatting.formatting import NumpyArrowExtractor, SimpleArrowExtractor + + +SHAPE_TEST_1 = (30, 487) +SHAPE_TEST_2 = (36, 1024) +SHAPE_TEST_3 = (None, 100) +SPEED_TEST_SHAPE = (100, 100) +SPEED_TEST_N_EXAMPLES = 100 + +DEFAULT_FEATURES = datasets.Features( + { + "text": Array2D(SHAPE_TEST_1, dtype="float32"), + "image": Array2D(SHAPE_TEST_2, dtype="float32"), + "dynamic": Array2D(SHAPE_TEST_3, dtype="float32"), + } +) + + +def generate_examples(features: dict, num_examples=100, seq_shapes=None): + dummy_data = [] + seq_shapes = seq_shapes or {} + for i in range(num_examples): + example = {} + for col_id, (k, v) in enumerate(features.items()): + if isinstance(v, _ArrayXD): + if k == "dynamic": + first_dim = random.randint(1, 3) + data = np.random.rand(first_dim, *v.shape[1:]).astype(v.dtype) + else: + data = np.random.rand(*v.shape).astype(v.dtype) + elif isinstance(v, datasets.Value): + data = "foo" + elif isinstance(v, datasets.Sequence): + while isinstance(v, datasets.Sequence): + v = v.feature + shape = seq_shapes[k] + data = np.random.rand(*shape).astype(v.dtype) + example[k] = data + dummy_data.append((i, example)) + + return dummy_data + + +class ExtensionTypeCompatibilityTest(unittest.TestCase): + def test_array2d_nonspecific_shape(self): + with tempfile.TemporaryDirectory() as tmp_dir: + my_features = DEFAULT_FEATURES.copy() + with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer: + for key, record in generate_examples( + features=my_features, + num_examples=1, + ): + example = my_features.encode_example(record) + writer.write(example) + num_examples, num_bytes = writer.finalize() + dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow")) + dataset.set_format("numpy") + row = dataset[0] + first_shape = row["image"].shape + second_shape = row["text"].shape + self.assertTrue(first_shape is not None and second_shape is not None, "need atleast 2 different shapes") + self.assertEqual(len(first_shape), len(second_shape), "both shapes are supposed to be equal length") + self.assertNotEqual(first_shape, second_shape, "shapes must not be the same") + del dataset + + def test_multiple_extensions_same_row(self): + with tempfile.TemporaryDirectory() as tmp_dir: + my_features = DEFAULT_FEATURES.copy() + with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer: + for key, record in generate_examples(features=my_features, num_examples=1): + example = my_features.encode_example(record) + writer.write(example) + num_examples, num_bytes = writer.finalize() + dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow")) + dataset.set_format("numpy") + row = dataset[0] + first_len = len(row["image"].shape) + second_len = len(row["text"].shape) + third_len = len(row["dynamic"].shape) + self.assertEqual(first_len, 2, "use a sequence type if dim is < 2") + self.assertEqual(second_len, 2, "use a sequence type if dim is < 2") + self.assertEqual(third_len, 2, "use a sequence type if dim is < 2") + del dataset + + def test_compatability_with_string_values(self): + with tempfile.TemporaryDirectory() as tmp_dir: + my_features = DEFAULT_FEATURES.copy() + my_features["image_id"] = datasets.Value("string") + with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer: + for key, record in generate_examples(features=my_features, num_examples=1): + example = my_features.encode_example(record) + writer.write(example) + num_examples, num_bytes = writer.finalize() + dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow")) + self.assertIsInstance(dataset[0]["image_id"], str, "image id must be of type string") + del dataset + + def test_extension_indexing(self): + with tempfile.TemporaryDirectory() as tmp_dir: + my_features = DEFAULT_FEATURES.copy() + my_features["explicit_ext"] = Array2D((3, 3), dtype="float32") + with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer: + for key, record in generate_examples(features=my_features, num_examples=1): + example = my_features.encode_example(record) + writer.write(example) + num_examples, num_bytes = writer.finalize() + dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow")) + dataset.set_format("numpy") + data = dataset[0]["explicit_ext"] + self.assertIsInstance(data, np.ndarray, "indexed extension must return numpy.ndarray") + del dataset + + +def get_array_feature_types(): + shape_1 = [3] * 5 + shape_2 = [3, 4, 5, 6, 7] + return [ + { + "testcase_name": f"{d}d", + "array_feature": array_feature, + "shape_1": tuple(shape_1[:d]), + "shape_2": tuple(shape_2[:d]), + } + for d, array_feature in zip(range(2, 6), [Array2D, Array3D, Array4D, Array5D]) + ] + + +@parameterized.named_parameters(get_array_feature_types()) +class ArrayXDTest(unittest.TestCase): + def get_features(self, array_feature, shape_1, shape_2): + return datasets.Features( + { + "image": array_feature(shape_1, dtype="float32"), + "source": Value("string"), + "matrix": array_feature(shape_2, dtype="float32"), + } + ) + + def get_dict_example_0(self, shape_1, shape_2): + return { + "image": np.random.rand(*shape_1).astype("float32"), + "source": "foo", + "matrix": np.random.rand(*shape_2).astype("float32"), + } + + def get_dict_example_1(self, shape_1, shape_2): + return { + "image": np.random.rand(*shape_1).astype("float32"), + "matrix": np.random.rand(*shape_2).astype("float32"), + "source": "bar", + } + + def get_dict_examples(self, shape_1, shape_2): + return { + "image": np.random.rand(2, *shape_1).astype("float32").tolist(), + "source": ["foo", "bar"], + "matrix": np.random.rand(2, *shape_2).astype("float32").tolist(), + } + + def _check_getitem_output_type(self, dataset, shape_1, shape_2, first_matrix): + matrix_column = dataset["matrix"] + self.assertIsInstance(matrix_column, list) + self.assertIsInstance(matrix_column[0], list) + self.assertIsInstance(matrix_column[0][0], list) + self.assertTupleEqual(np.array(matrix_column).shape, (2, *shape_2)) + + matrix_field_of_first_example = dataset[0]["matrix"] + self.assertIsInstance(matrix_field_of_first_example, list) + self.assertIsInstance(matrix_field_of_first_example, list) + self.assertEqual(np.array(matrix_field_of_first_example).shape, shape_2) + np.testing.assert_array_equal(np.array(matrix_field_of_first_example), np.array(first_matrix)) + + matrix_field_of_first_two_examples = dataset[:2]["matrix"] + self.assertIsInstance(matrix_field_of_first_two_examples, list) + self.assertIsInstance(matrix_field_of_first_two_examples[0], list) + self.assertIsInstance(matrix_field_of_first_two_examples[0][0], list) + self.assertTupleEqual(np.array(matrix_field_of_first_two_examples).shape, (2, *shape_2)) + + with dataset.formatted_as("numpy"): + self.assertTupleEqual(dataset["matrix"].shape, (2, *shape_2)) + self.assertEqual(dataset[0]["matrix"].shape, shape_2) + self.assertTupleEqual(dataset[:2]["matrix"].shape, (2, *shape_2)) + + with dataset.formatted_as("pandas"): + self.assertIsInstance(dataset["matrix"], pd.Series) + self.assertIsInstance(dataset[0]["matrix"], pd.Series) + self.assertIsInstance(dataset[:2]["matrix"], pd.Series) + self.assertTupleEqual(dataset["matrix"].to_numpy().shape, (2, *shape_2)) + self.assertTupleEqual(dataset[0]["matrix"].to_numpy().shape, (1, *shape_2)) + self.assertTupleEqual(dataset[:2]["matrix"].to_numpy().shape, (2, *shape_2)) + + def test_write(self, array_feature, shape_1, shape_2): + with tempfile.TemporaryDirectory() as tmp_dir: + my_features = self.get_features(array_feature, shape_1, shape_2) + my_examples = [ + (0, self.get_dict_example_0(shape_1, shape_2)), + (1, self.get_dict_example_1(shape_1, shape_2)), + ] + with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer: + for key, record in my_examples: + example = my_features.encode_example(record) + writer.write(example) + num_examples, num_bytes = writer.finalize() + dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow")) + self._check_getitem_output_type(dataset, shape_1, shape_2, my_examples[0][1]["matrix"]) + del dataset + + def test_write_batch(self, array_feature, shape_1, shape_2): + with tempfile.TemporaryDirectory() as tmp_dir: + my_features = self.get_features(array_feature, shape_1, shape_2) + dict_examples = self.get_dict_examples(shape_1, shape_2) + dict_examples = my_features.encode_batch(dict_examples) + with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer: + writer.write_batch(dict_examples) + num_examples, num_bytes = writer.finalize() + dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow")) + self._check_getitem_output_type(dataset, shape_1, shape_2, dict_examples["matrix"][0]) + del dataset + + def test_from_dict(self, array_feature, shape_1, shape_2): + dict_examples = self.get_dict_examples(shape_1, shape_2) + dataset = datasets.Dataset.from_dict( + dict_examples, features=self.get_features(array_feature, shape_1, shape_2) + ) + self._check_getitem_output_type(dataset, shape_1, shape_2, dict_examples["matrix"][0]) + del dataset + + +class ArrayXDDynamicTest(unittest.TestCase): + def get_one_col_dataset(self, first_dim_list, fixed_shape): + features = datasets.Features({"image": Array3D(shape=(None, *fixed_shape), dtype="float32")}) + dict_values = {"image": [np.random.rand(fdim, *fixed_shape).astype("float32") for fdim in first_dim_list]} + dataset = datasets.Dataset.from_dict(dict_values, features=features) + return dataset + + def get_two_col_datasset(self, first_dim_list, fixed_shape): + features = datasets.Features( + {"image": Array3D(shape=(None, *fixed_shape), dtype="float32"), "text": Value("string")} + ) + dict_values = { + "image": [np.random.rand(fdim, *fixed_shape).astype("float32") for fdim in first_dim_list], + "text": ["text" for _ in first_dim_list], + } + dataset = datasets.Dataset.from_dict(dict_values, features=features) + return dataset + + def test_to_pylist(self): + fixed_shape = (2, 2) + first_dim_list = [1, 3, 10] + dataset = self.get_one_col_dataset(first_dim_list, fixed_shape) + arr_xd = SimpleArrowExtractor().extract_column(dataset._data) + self.assertIsInstance(arr_xd.type, Array3DExtensionType) + pylist = arr_xd.to_pylist() + + for first_dim, single_arr in zip(first_dim_list, pylist): + self.assertIsInstance(single_arr, list) + self.assertTupleEqual(np.array(single_arr).shape, (first_dim, *fixed_shape)) + + def test_to_numpy(self): + fixed_shape = (2, 2) + + # ragged + first_dim_list = [1, 3, 10] + dataset = self.get_one_col_dataset(first_dim_list, fixed_shape) + arr_xd = SimpleArrowExtractor().extract_column(dataset._data) + self.assertIsInstance(arr_xd.type, Array3DExtensionType) + # replace with arr_xd = arr_xd.combine_chunks() when 12.0.0 will be the minimal required PyArrow version + arr_xd = arr_xd.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in arr_xd.chunks])) + numpy_arr = arr_xd.to_numpy() + + self.assertIsInstance(numpy_arr, np.ndarray) + self.assertEqual(numpy_arr.dtype, object) + for first_dim, single_arr in zip(first_dim_list, numpy_arr): + self.assertIsInstance(single_arr, np.ndarray) + self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape)) + + # non-ragged + first_dim_list = [4, 4, 4] + dataset = self.get_one_col_dataset(first_dim_list, fixed_shape) + arr_xd = SimpleArrowExtractor().extract_column(dataset._data) + self.assertIsInstance(arr_xd.type, Array3DExtensionType) + # replace with arr_xd = arr_xd.combine_chunks() when 12.0.0 will be the minimal required PyArrow version + arr_xd = arr_xd.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in arr_xd.chunks])) + numpy_arr = arr_xd.to_numpy() + + self.assertIsInstance(numpy_arr, np.ndarray) + self.assertNotEqual(numpy_arr.dtype, object) + for first_dim, single_arr in zip(first_dim_list, numpy_arr): + self.assertIsInstance(single_arr, np.ndarray) + self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape)) + + def test_iter_dataset(self): + fixed_shape = (2, 2) + first_dim_list = [1, 3, 10] + dataset = self.get_one_col_dataset(first_dim_list, fixed_shape) + + for first_dim, ds_row in zip(first_dim_list, dataset): + single_arr = ds_row["image"] + self.assertIsInstance(single_arr, list) + self.assertTupleEqual(np.array(single_arr).shape, (first_dim, *fixed_shape)) + + def test_to_pandas(self): + fixed_shape = (2, 2) + + # ragged + first_dim_list = [1, 3, 10] + dataset = self.get_one_col_dataset(first_dim_list, fixed_shape) + df = dataset.to_pandas() + self.assertEqual(type(df.image.dtype), PandasArrayExtensionDtype) + numpy_arr = df.image.to_numpy() + + self.assertIsInstance(numpy_arr, np.ndarray) + self.assertEqual(numpy_arr.dtype, object) + for first_dim, single_arr in zip(first_dim_list, numpy_arr): + self.assertIsInstance(single_arr, np.ndarray) + self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape)) + + # non-ragged + first_dim_list = [4, 4, 4] + dataset = self.get_one_col_dataset(first_dim_list, fixed_shape) + df = dataset.to_pandas() + self.assertEqual(type(df.image.dtype), PandasArrayExtensionDtype) + numpy_arr = df.image.to_numpy() + + self.assertIsInstance(numpy_arr, np.ndarray) + self.assertNotEqual(numpy_arr.dtype, object) + for first_dim, single_arr in zip(first_dim_list, numpy_arr): + self.assertIsInstance(single_arr, np.ndarray) + self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape)) + + def test_map_dataset(self): + fixed_shape = (2, 2) + first_dim_list = [1, 3, 10] + dataset = self.get_one_col_dataset(first_dim_list, fixed_shape) + + dataset = dataset.map(lambda a: {"image": np.concatenate([a] * 2)}, input_columns="image") + + # check also if above function resulted with 2x bigger first dim + for first_dim, ds_row in zip(first_dim_list, dataset): + single_arr = ds_row["image"] + self.assertIsInstance(single_arr, list) + self.assertTupleEqual(np.array(single_arr).shape, (first_dim * 2, *fixed_shape)) + + +@pytest.mark.parametrize("dtype, dummy_value", [("int32", 1), ("bool", True), ("float64", 1)]) +def test_table_to_pandas(dtype, dummy_value): + features = datasets.Features({"foo": datasets.Array2D(dtype=dtype, shape=(2, 2))}) + dataset = datasets.Dataset.from_dict({"foo": [[[dummy_value] * 2] * 2]}, features=features) + df = dataset._data.to_pandas() + assert type(df.foo.dtype) == PandasArrayExtensionDtype + arr = df.foo.to_numpy() + np.testing.assert_equal(arr, np.array([[[dummy_value] * 2] * 2], dtype=np.dtype(dtype))) + + +@pytest.mark.parametrize("dtype, dummy_value", [("int32", 1), ("bool", True), ("float64", 1)]) +def test_array_xd_numpy_arrow_extractor(dtype, dummy_value): + features = datasets.Features({"foo": datasets.Array2D(dtype=dtype, shape=(2, 2))}) + dataset = datasets.Dataset.from_dict({"foo": [[[dummy_value] * 2] * 2]}, features=features) + arr = NumpyArrowExtractor().extract_column(dataset._data) + assert isinstance(arr, np.ndarray) + np.testing.assert_equal(arr, np.array([[[dummy_value] * 2] * 2], dtype=np.dtype(dtype))) + + +def test_array_xd_with_none(): + # Fixed shape + features = datasets.Features({"foo": datasets.Array2D(dtype="int32", shape=(2, 2))}) + dummy_array = np.array([[1, 2], [3, 4]], dtype="int32") + dataset = datasets.Dataset.from_dict({"foo": [dummy_array, None, dummy_array, None]}, features=features) + arr = NumpyArrowExtractor().extract_column(dataset._data) + assert isinstance(arr, np.ndarray) and arr.dtype == np.float64 and arr.shape == (4, 2, 2) + assert np.allclose(arr[0], dummy_array) and np.allclose(arr[2], dummy_array) + assert np.all(np.isnan(arr[1])) and np.all(np.isnan(arr[3])) # broadcasted np.nan - use np.all + + # Dynamic shape + features = datasets.Features({"foo": datasets.Array2D(dtype="int32", shape=(None, 2))}) + dummy_array = np.array([[1, 2], [3, 4]], dtype="int32") + dataset = datasets.Dataset.from_dict({"foo": [dummy_array, None, dummy_array, None]}, features=features) + arr = NumpyArrowExtractor().extract_column(dataset._data) + assert isinstance(arr, np.ndarray) and arr.dtype == object and arr.shape == (4,) + np.testing.assert_equal(arr[0], dummy_array) + np.testing.assert_equal(arr[2], dummy_array) + assert np.isnan(arr[1]) and np.isnan(arr[3]) # a single np.nan value - np.all not needed + + +@pytest.mark.parametrize("seq_type", ["no_sequence", "sequence", "sequence_of_sequence"]) +@pytest.mark.parametrize( + "dtype", + [ + "bool", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + "float16", + "float32", + "float64", + ], +) +@pytest.mark.parametrize("shape, feature_class", [((2, 3), datasets.Array2D), ((2, 3, 4), datasets.Array3D)]) +def test_array_xd_with_np(seq_type, dtype, shape, feature_class): + feature = feature_class(dtype=dtype, shape=shape) + data = np.zeros(shape, dtype=dtype) + expected = data.tolist() + if seq_type == "sequence": + feature = datasets.Sequence(feature) + data = [data] + expected = [expected] + elif seq_type == "sequence_of_sequence": + feature = datasets.Sequence(datasets.Sequence(feature)) + data = [[data]] + expected = [[expected]] + ds = datasets.Dataset.from_dict({"col": [data]}, features=datasets.Features({"col": feature})) + assert ds[0]["col"] == expected + + +@pytest.mark.parametrize("with_none", [False, True]) +def test_dataset_map(with_none): + ds = datasets.Dataset.from_dict({"path": ["path1", "path2"]}) + + def process_data(batch): + batch = { + "image": [ + np.array( + [ + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[10, 20, 30], [40, 50, 60], [70, 80, 90]], + [[100, 200, 300], [400, 500, 600], [700, 800, 900]], + ] + ) + for _ in batch["path"] + ] + } + if with_none: + batch["image"][0] = None + return batch + + features = datasets.Features({"image": Array3D(dtype="int32", shape=(3, 3, 3))}) + processed_ds = ds.map(process_data, batched=True, remove_columns=ds.column_names, features=features) + assert processed_ds.shape == (2, 1) + with processed_ds.with_format("numpy") as pds: + for i, example in enumerate(pds): + assert "image" in example + assert isinstance(example["image"], np.ndarray) + assert example["image"].shape == (3, 3, 3) + if with_none and i == 0: + assert np.all(np.isnan(example["image"])) diff --git a/testbed/huggingface__datasets/tests/features/test_features.py b/testbed/huggingface__datasets/tests/features/test_features.py new file mode 100644 index 0000000000000000000000000000000000000000..4e72a2c63952a5b69310ae42aa2dc67e6dccbbe5 --- /dev/null +++ b/testbed/huggingface__datasets/tests/features/test_features.py @@ -0,0 +1,639 @@ +import datetime +from unittest import TestCase +from unittest.mock import patch + +import numpy as np +import pandas as pd +import pyarrow as pa +import pytest + +from datasets import Array2D +from datasets.arrow_dataset import Dataset +from datasets.features import Audio, ClassLabel, Features, Image, Sequence, Value +from datasets.features.features import ( + _arrow_to_datasets_dtype, + _cast_to_python_objects, + cast_to_python_objects, + encode_nested_example, + generate_from_dict, + string_to_arrow, +) +from datasets.features.translation import Translation, TranslationVariableLanguages +from datasets.info import DatasetInfo +from datasets.utils.py_utils import asdict + +from ..utils import require_jax, require_tf, require_torch + + +class FeaturesTest(TestCase): + def test_from_arrow_schema_simple(self): + data = {"a": [{"b": {"c": "text"}}] * 10, "foo": [1] * 10} + original_features = Features({"a": {"b": {"c": Value("string")}}, "foo": Value("int64")}) + dset = Dataset.from_dict(data, features=original_features) + new_features = dset.features + new_dset = Dataset.from_dict(data, features=new_features) + self.assertEqual(original_features.type, new_features.type) + self.assertDictEqual(dset[0], new_dset[0]) + self.assertDictEqual(dset[:], new_dset[:]) + + def test_from_arrow_schema_with_sequence(self): + data = {"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10} + original_features = Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}) + dset = Dataset.from_dict(data, features=original_features) + new_features = dset.features + new_dset = Dataset.from_dict(data, features=new_features) + self.assertEqual(original_features.type, new_features.type) + self.assertDictEqual(dset[0], new_dset[0]) + self.assertDictEqual(dset[:], new_dset[:]) + + def test_string_to_arrow_bijection_for_primitive_types(self): + supported_pyarrow_datatypes = [ + pa.time32("s"), + pa.time64("us"), + pa.timestamp("s"), + pa.timestamp("ns", tz="America/New_York"), + pa.date32(), + pa.date64(), + pa.duration("s"), + pa.decimal128(10, 2), + pa.decimal256(40, -3), + pa.string(), + pa.int32(), + pa.float64(), + pa.array([datetime.time(1, 1, 1)]).type, # arrow type: DataType(time64[us]) + ] + for dt in supported_pyarrow_datatypes: + self.assertEqual(dt, string_to_arrow(_arrow_to_datasets_dtype(dt))) + + unsupported_pyarrow_datatypes = [pa.list_(pa.float64())] + for dt in unsupported_pyarrow_datatypes: + with self.assertRaises(ValueError): + string_to_arrow(_arrow_to_datasets_dtype(dt)) + + supported_datasets_dtypes = [ + "time32[s]", + "timestamp[ns]", + "timestamp[ns, tz=+07:30]", + "duration[us]", + "decimal128(30, -4)", + "int32", + "float64", + ] + for sdt in supported_datasets_dtypes: + self.assertEqual(sdt, _arrow_to_datasets_dtype(string_to_arrow(sdt))) + + unsupported_datasets_dtypes = [ + "time32[ns]", + "timestamp[blob]", + "timestamp[[ns]]", + "timestamp[ns, tz=[ns]]", + "duration[[us]]", + "decimal20(30, -4)", + "int", + ] + for sdt in unsupported_datasets_dtypes: + with self.assertRaises(ValueError): + string_to_arrow(sdt) + + def test_feature_named_type(self): + """reference: issue #1110""" + features = Features({"_type": Value("string")}) + ds_info = DatasetInfo(features=features) + reloaded_features = Features.from_dict(asdict(ds_info)["features"]) + assert features == reloaded_features + + def test_feature_named_self_as_kwarg(self): + """reference: issue #5641""" + features = Features(self=Value("string")) + ds_info = DatasetInfo(features=features) + reloaded_features = Features.from_dict(asdict(ds_info)["features"]) + assert features == reloaded_features + + def test_class_label_feature_with_no_labels(self): + """reference: issue #4681""" + features = Features({"label": ClassLabel(names=[])}) + ds_info = DatasetInfo(features=features) + reloaded_features = Features.from_dict(asdict(ds_info)["features"]) + assert features == reloaded_features + + def test_reorder_fields_as(self): + features = Features( + { + "id": Value("string"), + "document": { + "title": Value("string"), + "url": Value("string"), + "html": Value("string"), + "tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}), + }, + "question": { + "text": Value("string"), + "tokens": Sequence(Value("string")), + }, + "annotations": Sequence( + { + "id": Value("string"), + "long_answer": { + "start_token": Value("int64"), + "end_token": Value("int64"), + "start_byte": Value("int64"), + "end_byte": Value("int64"), + }, + "short_answers": Sequence( + { + "start_token": Value("int64"), + "end_token": Value("int64"), + "start_byte": Value("int64"), + "end_byte": Value("int64"), + "text": Value("string"), + } + ), + "yes_no_answer": ClassLabel(names=["NO", "YES"]), + } + ), + } + ) + + other = Features( # same but with [] instead of sequences, and with a shuffled fields order + { + "id": Value("string"), + "document": { + "tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}), + "title": Value("string"), + "url": Value("string"), + "html": Value("string"), + }, + "question": { + "text": Value("string"), + "tokens": [Value("string")], + }, + "annotations": { + "yes_no_answer": [ClassLabel(names=["NO", "YES"])], + "id": [Value("string")], + "long_answer": [ + { + "end_byte": Value("int64"), + "start_token": Value("int64"), + "end_token": Value("int64"), + "start_byte": Value("int64"), + } + ], + "short_answers": [ + Sequence( + { + "text": Value("string"), + "start_token": Value("int64"), + "end_token": Value("int64"), + "start_byte": Value("int64"), + "end_byte": Value("int64"), + } + ) + ], + }, + } + ) + + expected = Features( + { + "id": Value("string"), + "document": { + "tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}), + "title": Value("string"), + "url": Value("string"), + "html": Value("string"), + }, + "question": { + "text": Value("string"), + "tokens": Sequence(Value("string")), + }, + "annotations": Sequence( + { + "yes_no_answer": ClassLabel(names=["NO", "YES"]), + "id": Value("string"), + "long_answer": { + "end_byte": Value("int64"), + "start_token": Value("int64"), + "end_token": Value("int64"), + "start_byte": Value("int64"), + }, + "short_answers": Sequence( + { + "text": Value("string"), + "start_token": Value("int64"), + "end_token": Value("int64"), + "start_byte": Value("int64"), + "end_byte": Value("int64"), + } + ), + } + ), + } + ) + + reordered_features = features.reorder_fields_as(other) + self.assertDictEqual(reordered_features, expected) + self.assertEqual(reordered_features.type, other.type) + self.assertEqual(reordered_features.type, expected.type) + self.assertNotEqual(reordered_features.type, features.type) + + def test_flatten(self): + features = Features({"foo": {"bar1": Value("int32"), "bar2": {"foobar": Value("string")}}}) + _features = features.copy() + flattened_features = features.flatten() + assert flattened_features == {"foo.bar1": Value("int32"), "foo.bar2.foobar": Value("string")} + assert features == _features, "calling flatten shouldn't alter the current features" + + def test_flatten_with_sequence(self): + features = Features({"foo": Sequence({"bar": {"my_value": Value("int32")}})}) + _features = features.copy() + flattened_features = features.flatten() + assert flattened_features == {"foo.bar": [{"my_value": Value("int32")}]} + assert features == _features, "calling flatten shouldn't alter the current features" + + def test_features_dicts_are_synced(self): + def assert_features_dicts_are_synced(features: Features): + assert ( + hasattr(features, "_column_requires_decoding") + and features.keys() == features._column_requires_decoding.keys() + ) + + features = Features({"foo": Sequence({"bar": {"my_value": Value("int32")}})}) + assert_features_dicts_are_synced(features) + features["barfoo"] = Image() + assert_features_dicts_are_synced(features) + del features["barfoo"] + assert_features_dicts_are_synced(features) + features.update({"foobar": Value("string")}) + assert_features_dicts_are_synced(features) + features.pop("foobar") + assert_features_dicts_are_synced(features) + features.popitem() + assert_features_dicts_are_synced(features) + features.setdefault("xyz", Value("bool")) + assert_features_dicts_are_synced(features) + features.clear() + assert_features_dicts_are_synced(features) + + +def test_classlabel_init(tmp_path_factory): + names = ["negative", "positive"] + names_file = str(tmp_path_factory.mktemp("features") / "labels.txt") + with open(names_file, "w", encoding="utf-8") as f: + f.write("\n".join(names)) + classlabel = ClassLabel(names=names) + assert classlabel.names == names and classlabel.num_classes == len(names) + classlabel = ClassLabel(names_file=names_file) + assert classlabel.names == names and classlabel.num_classes == len(names) + classlabel = ClassLabel(num_classes=len(names), names=names) + assert classlabel.names == names and classlabel.num_classes == len(names) + classlabel = ClassLabel(num_classes=len(names)) + assert classlabel.names == [str(i) for i in range(len(names))] and classlabel.num_classes == len(names) + with pytest.raises(ValueError): + classlabel = ClassLabel(num_classes=len(names) + 1, names=names) + with pytest.raises(ValueError): + classlabel = ClassLabel(names=names, names_file=names_file) + with pytest.raises(ValueError): + classlabel = ClassLabel() + with pytest.raises(TypeError): + classlabel = ClassLabel(names=np.array(names)) + + +def test_classlabel_str2int(): + names = ["negative", "positive"] + classlabel = ClassLabel(names=names) + for label in names: + assert classlabel.str2int(label) == names.index(label) + with pytest.raises(ValueError): + classlabel.str2int("__bad_label_name__") + with pytest.raises(ValueError): + classlabel.str2int(1) + with pytest.raises(ValueError): + classlabel.str2int(None) + + +def test_classlabel_int2str(): + names = ["negative", "positive"] + classlabel = ClassLabel(names=names) + for i in range(len(names)): + assert classlabel.int2str(i) == names[i] + with pytest.raises(ValueError): + classlabel.int2str(len(names)) + with pytest.raises(ValueError): + classlabel.int2str(-1) + with pytest.raises(ValueError): + classlabel.int2str(None) + + +def test_classlabel_cast_storage(): + names = ["negative", "positive"] + classlabel = ClassLabel(names=names) + # from integers + arr = pa.array([0, 1, -1, -100], type=pa.int64()) + result = classlabel.cast_storage(arr) + assert result.type == pa.int64() + assert result.to_pylist() == [0, 1, -1, -100] + arr = pa.array([0, 1, -1, -100], type=pa.int32()) + result = classlabel.cast_storage(arr) + assert result.type == pa.int64() + assert result.to_pylist() == [0, 1, -1, -100] + arr = pa.array([3]) + with pytest.raises(ValueError): + classlabel.cast_storage(arr) + # from strings + arr = pa.array(["negative", "positive"]) + result = classlabel.cast_storage(arr) + assert result.type == pa.int64() + assert result.to_pylist() == [0, 1] + arr = pa.array(["__label_that_doesnt_exist__"]) + with pytest.raises(ValueError): + classlabel.cast_storage(arr) + # from nulls + arr = pa.array([None]) + result = classlabel.cast_storage(arr) + assert result.type == pa.int64() + assert result.to_pylist() == [None] + # from empty + arr = pa.array([], pa.int64()) + result = classlabel.cast_storage(arr) + assert result.type == pa.int64() + assert result.to_pylist() == [] + arr = pa.array([], pa.string()) + result = classlabel.cast_storage(arr) + assert result.type == pa.int64() + assert result.to_pylist() == [] + + +@pytest.mark.parametrize("class_label_arg", ["names", "names_file"]) +def test_class_label_to_and_from_dict(class_label_arg, tmp_path_factory): + names = ["negative", "positive"] + names_file = str(tmp_path_factory.mktemp("features") / "labels.txt") + with open(names_file, "w", encoding="utf-8") as f: + f.write("\n".join(names)) + if class_label_arg == "names": + class_label = ClassLabel(names=names) + elif class_label_arg == "names_file": + class_label = ClassLabel(names_file=names_file) + generated_class_label = generate_from_dict(asdict(class_label)) + assert generated_class_label == class_label + + +@pytest.mark.parametrize("inner_type", [Value("int32"), {"subcolumn": Value("int32")}]) +def test_encode_nested_example_sequence_with_none(inner_type): + schema = Sequence(inner_type) + obj = None + result = encode_nested_example(schema, obj) + assert result is None + + +def test_encode_batch_with_example_with_empty_first_elem(): + features = Features( + { + "x": Sequence(Sequence(ClassLabel(names=["a", "b"]))), + } + ) + encoded_batch = features.encode_batch( + { + "x": [ + [["a"], ["b"]], + [[], ["b"]], + ] + } + ) + assert encoded_batch == {"x": [[[0], [1]], [[], [1]]]} + + +@pytest.mark.parametrize( + "feature", + [ + Value("int32"), + ClassLabel(num_classes=2), + Translation(languages=["en", "fr"]), + TranslationVariableLanguages(languages=["en", "fr"]), + ], +) +def test_dataset_feature_with_none(feature): + data = {"col": [None]} + features = Features({"col": feature}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"col"} + assert item["col"] is None + batch = dset[:1] + assert len(batch) == 1 + assert batch.keys() == {"col"} + assert isinstance(batch["col"], list) and all(item is None for item in batch["col"]) + column = dset["col"] + assert len(column) == 1 + assert isinstance(column, list) and all(item is None for item in column) + + # nested tests + + data = {"col": [[None]]} + features = Features({"col": Sequence(feature)}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"col"} + assert all(i is None for i in item["col"]) + + data = {"nested": [{"col": None}]} + features = Features({"nested": {"col": feature}}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"nested"} + assert item["nested"].keys() == {"col"} + assert item["nested"]["col"] is None + + +def iternumpy(key1, value1, value2): + if value1.dtype != value2.dtype: # check only for dtype + raise AssertionError( + f"dtype of '{key1}' key for casted object: {value1.dtype} and expected object: {value2.dtype} not matching" + ) + + +def dict_diff(d1: dict, d2: dict): # check if 2 dictionaries are equal + np.testing.assert_equal(d1, d2) # sanity check if dict values are equal or not + + for (k1, v1), (k2, v2) in zip(d1.items(), d2.items()): # check if their values have same dtype or not + if isinstance(v1, dict): # nested dictionary case + dict_diff(v1, v2) + elif isinstance(v1, np.ndarray): # checks if dtype and value of np.ndarray is equal + iternumpy(k1, v1, v2) + elif isinstance(v1, list): + for element1, element2 in zip(v1, v2): # iterates over all elements of list + if isinstance(element1, dict): + dict_diff(element1, element2) + elif isinstance(element1, np.ndarray): + iternumpy(k1, element1, element2) + + +class CastToPythonObjectsTest(TestCase): + def test_cast_to_python_objects_list(self): + obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} + expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} + casted_obj = cast_to_python_objects(obj) + self.assertDictEqual(casted_obj, expected_obj) + + def test_cast_to_python_objects_tuple(self): + obj = {"col_1": [{"vec": (1, 2, 3), "txt": "foo"}] * 3, "col_2": [(1, 2), (3, 4), (5, 6)]} + expected_obj = {"col_1": [{"vec": (1, 2, 3), "txt": "foo"}] * 3, "col_2": [(1, 2), (3, 4), (5, 6)]} + casted_obj = cast_to_python_objects(obj) + self.assertDictEqual(casted_obj, expected_obj) + + def test_cast_to_python_or_numpy(self): + obj = {"col_1": [{"vec": np.arange(1, 4), "txt": "foo"}] * 3, "col_2": np.arange(1, 7).reshape(3, 2)} + expected_obj = { + "col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3, + "col_2": np.array([[1, 2], [3, 4], [5, 6]]), + } + casted_obj = cast_to_python_objects(obj) + dict_diff(casted_obj, expected_obj) + + def test_cast_to_python_objects_series(self): + obj = { + "col_1": pd.Series([{"vec": [1, 2, 3], "txt": "foo"}] * 3), + "col_2": pd.Series([[1, 2], [3, 4], [5, 6]]), + } + expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} + casted_obj = cast_to_python_objects(obj) + self.assertDictEqual(casted_obj, expected_obj) + + def test_cast_to_python_objects_dataframe(self): + obj = pd.DataFrame({"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]}) + expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} + casted_obj = cast_to_python_objects(obj) + self.assertDictEqual(casted_obj, expected_obj) + + def test_cast_to_python_objects_pandas_timestamp(self): + obj = pd.Timestamp(2020, 1, 1) + expected_obj = obj.to_pydatetime() + casted_obj = cast_to_python_objects(obj) + self.assertEqual(casted_obj, expected_obj) + casted_obj = cast_to_python_objects(pd.Series([obj])) + self.assertListEqual(casted_obj, [expected_obj]) + casted_obj = cast_to_python_objects(pd.DataFrame({"a": [obj]})) + self.assertDictEqual(casted_obj, {"a": [expected_obj]}) + + def test_cast_to_python_objects_pandas_timedelta(self): + obj = pd.Timedelta(seconds=1) + expected_obj = obj.to_pytimedelta() + casted_obj = cast_to_python_objects(obj) + self.assertEqual(casted_obj, expected_obj) + casted_obj = cast_to_python_objects(pd.Series([obj])) + self.assertListEqual(casted_obj, [expected_obj]) + casted_obj = cast_to_python_objects(pd.DataFrame({"a": [obj]})) + self.assertDictEqual(casted_obj, {"a": [expected_obj]}) + + @require_torch + def test_cast_to_python_objects_torch(self): + import torch + + obj = { + "col_1": [{"vec": torch.tensor(np.arange(1, 4)), "txt": "foo"}] * 3, + "col_2": torch.tensor(np.arange(1, 7).reshape(3, 2)), + } + expected_obj = { + "col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3, + "col_2": np.array([[1, 2], [3, 4], [5, 6]]), + } + casted_obj = cast_to_python_objects(obj) + dict_diff(casted_obj, expected_obj) + + @require_tf + def test_cast_to_python_objects_tf(self): + import tensorflow as tf + + obj = { + "col_1": [{"vec": tf.constant(np.arange(1, 4)), "txt": "foo"}] * 3, + "col_2": tf.constant(np.arange(1, 7).reshape(3, 2)), + } + expected_obj = { + "col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3, + "col_2": np.array([[1, 2], [3, 4], [5, 6]]), + } + casted_obj = cast_to_python_objects(obj) + dict_diff(casted_obj, expected_obj) + + @require_jax + def test_cast_to_python_objects_jax(self): + import jax.numpy as jnp + + obj = { + "col_1": [{"vec": jnp.array(np.arange(1, 4)), "txt": "foo"}] * 3, + "col_2": jnp.array(np.arange(1, 7).reshape(3, 2)), + } + assert obj["col_2"].dtype == jnp.int32 + expected_obj = { + "col_1": [{"vec": np.array([1, 2, 3], dtype=np.int32), "txt": "foo"}] * 3, + "col_2": np.array([[1, 2], [3, 4], [5, 6]], dtype=np.int32), + } + casted_obj = cast_to_python_objects(obj) + dict_diff(casted_obj, expected_obj) + + @patch("datasets.features.features._cast_to_python_objects", side_effect=_cast_to_python_objects) + def test_dont_iterate_over_each_element_in_a_list(self, mocked_cast): + obj = {"col_1": [[1, 2], [3, 4], [5, 6]]} + cast_to_python_objects(obj) + self.assertEqual(mocked_cast.call_count, 4) # 4 = depth of obj + + +SIMPLE_FEATURES = [ + Features(), + Features({"a": Value("int32")}), + Features({"a": Value("int32", id="my feature")}), + Features({"a": Value("int32"), "b": Value("float64"), "c": Value("string")}), +] + +CUSTOM_FEATURES = [ + Features({"label": ClassLabel(names=["negative", "positive"])}), + Features({"array": Array2D(dtype="float32", shape=(4, 4))}), + Features({"image": Image()}), + Features({"audio": Audio()}), + Features({"image": Image(decode=False)}), + Features({"audio": Audio(decode=False)}), + Features({"translation": Translation(["en", "fr"])}), + Features({"translation": TranslationVariableLanguages(["en", "fr"])}), +] + +NESTED_FEATURES = [ + Features({"foo": {}}), + Features({"foo": {"bar": Value("int32")}}), + Features({"foo": {"bar1": Value("int32"), "bar2": Value("float64")}}), + Features({"foo": Sequence(Value("int32"))}), + Features({"foo": Sequence({})}), + Features({"foo": Sequence({"bar": Value("int32")})}), + Features({"foo": [Value("int32")]}), + Features({"foo": [{"bar": Value("int32")}]}), +] + +NESTED_CUSTOM_FEATURES = [ + Features({"foo": {"bar": ClassLabel(names=["negative", "positive"])}}), + Features({"foo": Sequence(ClassLabel(names=["negative", "positive"]))}), + Features({"foo": Sequence({"bar": ClassLabel(names=["negative", "positive"])})}), + Features({"foo": [ClassLabel(names=["negative", "positive"])]}), + Features({"foo": [{"bar": ClassLabel(names=["negative", "positive"])}]}), +] + + +@pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES) +def test_features_to_dict(features: Features): + features_dict = features.to_dict() + assert isinstance(features_dict, dict) + reloaded = Features.from_dict(features_dict) + assert features == reloaded + + +@pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES) +def test_features_to_yaml_list(features: Features): + features_yaml_list = features._to_yaml_list() + assert isinstance(features_yaml_list, list) + reloaded = Features._from_yaml_list(features_yaml_list) + assert features == reloaded + + +@pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES) +def test_features_to_arrow_schema(features: Features): + arrow_schema = features.arrow_schema + assert isinstance(arrow_schema, pa.Schema) + reloaded = Features.from_arrow_schema(arrow_schema) + assert features == reloaded diff --git a/testbed/huggingface__datasets/tests/features/test_image.py b/testbed/huggingface__datasets/tests/features/test_image.py new file mode 100644 index 0000000000000000000000000000000000000000..94d2609565675e9cba682f60c52f2acfdb20492d --- /dev/null +++ b/testbed/huggingface__datasets/tests/features/test_image.py @@ -0,0 +1,707 @@ +import os +import tarfile +import warnings + +import numpy as np +import pandas as pd +import pyarrow as pa +import pytest + +from datasets import Dataset, Features, Image, Sequence, Value, concatenate_datasets, load_dataset +from datasets.features.image import encode_np_array, image_to_bytes + +from ..utils import require_pil + + +@pytest.fixture +def tar_jpg_path(shared_datadir, tmp_path_factory): + image_path = str(shared_datadir / "test_image_rgb.jpg") + path = tmp_path_factory.mktemp("data") / "image_data.jpg.tar" + with tarfile.TarFile(path, "w") as f: + f.add(image_path, arcname=os.path.basename(image_path)) + return path + + +def iter_archive(archive_path): + with tarfile.open(archive_path) as tar: + for tarinfo in tar: + file_path = tarinfo.name + file_obj = tar.extractfile(tarinfo) + yield file_path, file_obj + + +def test_image_instantiation(): + image = Image() + assert image.id is None + assert image.dtype == "PIL.Image.Image" + assert image.pa_type == pa.struct({"bytes": pa.binary(), "path": pa.string()}) + assert image._type == "Image" + + +def test_image_feature_type_to_arrow(): + features = Features({"image": Image()}) + assert features.arrow_schema == pa.schema({"image": Image().pa_type}) + features = Features({"struct_containing_an_image": {"image": Image()}}) + assert features.arrow_schema == pa.schema({"struct_containing_an_image": pa.struct({"image": Image().pa_type})}) + features = Features({"sequence_of_images": Sequence(Image())}) + assert features.arrow_schema == pa.schema({"sequence_of_images": pa.list_(Image().pa_type)}) + + +@require_pil +@pytest.mark.parametrize( + "build_example", + [ + lambda image_path: image_path, + lambda image_path: open(image_path, "rb").read(), + lambda image_path: {"path": image_path}, + lambda image_path: {"path": image_path, "bytes": None}, + lambda image_path: {"path": image_path, "bytes": open(image_path, "rb").read()}, + lambda image_path: {"path": None, "bytes": open(image_path, "rb").read()}, + lambda image_path: {"bytes": open(image_path, "rb").read()}, + ], +) +def test_image_feature_encode_example(shared_datadir, build_example): + import PIL.Image + + image_path = str(shared_datadir / "test_image_rgb.jpg") + image = Image() + encoded_example = image.encode_example(build_example(image_path)) + assert isinstance(encoded_example, dict) + assert encoded_example.keys() == {"bytes", "path"} + assert encoded_example["bytes"] is not None or encoded_example["path"] is not None + decoded_example = image.decode_example(encoded_example) + assert isinstance(decoded_example, PIL.Image.Image) + + +@require_pil +def test_image_decode_example(shared_datadir): + import PIL.Image + + image_path = str(shared_datadir / "test_image_rgb.jpg") + image = Image() + decoded_example = image.decode_example({"path": image_path, "bytes": None}) + + assert isinstance(decoded_example, PIL.Image.Image) + assert os.path.samefile(decoded_example.filename, image_path) + assert decoded_example.size == (640, 480) + assert decoded_example.mode == "RGB" + + with pytest.raises(RuntimeError): + Image(decode=False).decode_example(image_path) + + +@require_pil +def test_dataset_with_image_feature(shared_datadir): + import PIL.Image + + image_path = str(shared_datadir / "test_image_rgb.jpg") + data = {"image": [image_path]} + features = Features({"image": Image()}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"image"} + assert isinstance(item["image"], PIL.Image.Image) + assert os.path.samefile(item["image"].filename, image_path) + assert item["image"].format == "JPEG" + assert item["image"].size == (640, 480) + assert item["image"].mode == "RGB" + batch = dset[:1] + assert len(batch) == 1 + assert batch.keys() == {"image"} + assert isinstance(batch["image"], list) and all(isinstance(item, PIL.Image.Image) for item in batch["image"]) + assert os.path.samefile(batch["image"][0].filename, image_path) + assert batch["image"][0].format == "JPEG" + assert batch["image"][0].size == (640, 480) + assert batch["image"][0].mode == "RGB" + column = dset["image"] + assert len(column) == 1 + assert isinstance(column, list) and all(isinstance(item, PIL.Image.Image) for item in column) + assert os.path.samefile(column[0].filename, image_path) + assert column[0].format == "JPEG" + assert column[0].size == (640, 480) + assert column[0].mode == "RGB" + + +@require_pil +@pytest.mark.parametrize("infer_feature", [False, True]) +def test_dataset_with_image_feature_from_pil_image(infer_feature, shared_datadir): + import PIL.Image + + image_path = str(shared_datadir / "test_image_rgb.jpg") + data = {"image": [PIL.Image.open(image_path)]} + features = Features({"image": Image()}) if not infer_feature else None + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"image"} + assert isinstance(item["image"], PIL.Image.Image) + assert os.path.samefile(item["image"].filename, image_path) + assert item["image"].format == "JPEG" + assert item["image"].size == (640, 480) + assert item["image"].mode == "RGB" + batch = dset[:1] + assert len(batch) == 1 + assert batch.keys() == {"image"} + assert isinstance(batch["image"], list) and all(isinstance(item, PIL.Image.Image) for item in batch["image"]) + assert os.path.samefile(batch["image"][0].filename, image_path) + assert batch["image"][0].format == "JPEG" + assert batch["image"][0].size == (640, 480) + assert batch["image"][0].mode == "RGB" + column = dset["image"] + assert len(column) == 1 + assert isinstance(column, list) and all(isinstance(item, PIL.Image.Image) for item in column) + assert os.path.samefile(column[0].filename, image_path) + assert column[0].format == "JPEG" + assert column[0].size == (640, 480) + assert column[0].mode == "RGB" + + +@require_pil +def test_dataset_with_image_feature_from_np_array(): + import PIL.Image + + image_array = np.arange(640 * 480, dtype=np.int32).reshape(480, 640) + data = {"image": [image_array]} + features = Features({"image": Image()}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"image"} + assert isinstance(item["image"], PIL.Image.Image) + + np.testing.assert_array_equal(np.array(item["image"]), image_array) + assert item["image"].filename == "" + assert item["image"].format in ["PNG", "TIFF"] + assert item["image"].size == (640, 480) + batch = dset[:1] + assert len(batch) == 1 + assert batch.keys() == {"image"} + assert isinstance(batch["image"], list) and all(isinstance(item, PIL.Image.Image) for item in batch["image"]) + np.testing.assert_array_equal(np.array(batch["image"][0]), image_array) + assert batch["image"][0].filename == "" + assert batch["image"][0].format in ["PNG", "TIFF"] + assert batch["image"][0].size == (640, 480) + column = dset["image"] + assert len(column) == 1 + assert isinstance(column, list) and all(isinstance(item, PIL.Image.Image) for item in column) + np.testing.assert_array_equal(np.array(column[0]), image_array) + assert column[0].filename == "" + assert column[0].format in ["PNG", "TIFF"] + assert column[0].size == (640, 480) + + +@require_pil +def test_dataset_with_image_feature_tar_jpg(tar_jpg_path): + import PIL.Image + + data = {"image": []} + for file_path, file_obj in iter_archive(tar_jpg_path): + data["image"].append({"path": file_path, "bytes": file_obj.read()}) + break + + features = Features({"image": Image()}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"image"} + assert isinstance(item["image"], PIL.Image.Image) + assert item["image"].filename == "" + assert item["image"].format == "JPEG" + assert item["image"].size == (640, 480) + assert item["image"].mode == "RGB" + batch = dset[:1] + assert len(batch) == 1 + assert batch.keys() == {"image"} + assert isinstance(batch["image"], list) and all(isinstance(item, PIL.Image.Image) for item in batch["image"]) + assert batch["image"][0].filename == "" + assert batch["image"][0].format == "JPEG" + assert batch["image"][0].size == (640, 480) + assert batch["image"][0].mode == "RGB" + column = dset["image"] + assert len(column) == 1 + assert isinstance(column, list) and all(isinstance(item, PIL.Image.Image) for item in column) + assert column[0].filename == "" + assert column[0].format == "JPEG" + assert column[0].size == (640, 480) + assert column[0].mode == "RGB" + + +@require_pil +def test_dataset_with_image_feature_with_none(): + data = {"image": [None]} + features = Features({"image": Image()}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"image"} + assert item["image"] is None + batch = dset[:1] + assert len(batch) == 1 + assert batch.keys() == {"image"} + assert isinstance(batch["image"], list) and all(item is None for item in batch["image"]) + column = dset["image"] + assert len(column) == 1 + assert isinstance(column, list) and all(item is None for item in column) + + # nested tests + + data = {"images": [[None]]} + features = Features({"images": Sequence(Image())}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"images"} + assert all(i is None for i in item["images"]) + + data = {"nested": [{"image": None}]} + features = Features({"nested": {"image": Image()}}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"nested"} + assert item["nested"].keys() == {"image"} + assert item["nested"]["image"] is None + + +@require_pil +@pytest.mark.parametrize( + "build_data", + [ + lambda image_path: {"image": [image_path]}, + lambda image_path: {"image": [open(image_path, "rb").read()]}, + lambda image_path: {"image": [{"path": image_path}]}, + lambda image_path: {"image": [{"path": image_path, "bytes": None}]}, + lambda image_path: {"image": [{"path": image_path, "bytes": open(image_path, "rb").read()}]}, + lambda image_path: {"image": [{"path": None, "bytes": open(image_path, "rb").read()}]}, + lambda image_path: {"image": [{"bytes": open(image_path, "rb").read()}]}, + ], +) +def test_dataset_cast_to_image_features(shared_datadir, build_data): + import PIL.Image + + image_path = str(shared_datadir / "test_image_rgb.jpg") + data = build_data(image_path) + dset = Dataset.from_dict(data) + item = dset.cast(Features({"image": Image()}))[0] + assert item.keys() == {"image"} + assert isinstance(item["image"], PIL.Image.Image) + item = dset.cast_column("image", Image())[0] + assert item.keys() == {"image"} + assert isinstance(item["image"], PIL.Image.Image) + + +@require_pil +def test_dataset_concatenate_image_features(shared_datadir): + # we use a different data structure between 1 and 2 to make sure they are compatible with each other + image_path = str(shared_datadir / "test_image_rgb.jpg") + data1 = {"image": [image_path]} + dset1 = Dataset.from_dict(data1, features=Features({"image": Image()})) + data2 = {"image": [{"bytes": open(image_path, "rb").read()}]} + dset2 = Dataset.from_dict(data2, features=Features({"image": Image()})) + concatenated_dataset = concatenate_datasets([dset1, dset2]) + assert len(concatenated_dataset) == len(dset1) + len(dset2) + assert concatenated_dataset[0]["image"] == dset1[0]["image"] + assert concatenated_dataset[1]["image"] == dset2[0]["image"] + + +@require_pil +def test_dataset_concatenate_nested_image_features(shared_datadir): + # we use a different data structure between 1 and 2 to make sure they are compatible with each other + image_path = str(shared_datadir / "test_image_rgb.jpg") + features = Features({"list_of_structs_of_images": [{"image": Image()}]}) + data1 = {"list_of_structs_of_images": [[{"image": image_path}]]} + dset1 = Dataset.from_dict(data1, features=features) + data2 = {"list_of_structs_of_images": [[{"image": {"bytes": open(image_path, "rb").read()}}]]} + dset2 = Dataset.from_dict(data2, features=features) + concatenated_dataset = concatenate_datasets([dset1, dset2]) + assert len(concatenated_dataset) == len(dset1) + len(dset2) + assert ( + concatenated_dataset[0]["list_of_structs_of_images"][0]["image"] + == dset1[0]["list_of_structs_of_images"][0]["image"] + ) + assert ( + concatenated_dataset[1]["list_of_structs_of_images"][0]["image"] + == dset2[0]["list_of_structs_of_images"][0]["image"] + ) + + +@require_pil +def test_dataset_with_image_feature_map(shared_datadir): + image_path = str(shared_datadir / "test_image_rgb.jpg") + data = {"image": [image_path], "caption": ["cats sleeping"]} + features = Features({"image": Image(), "caption": Value("string")}) + dset = Dataset.from_dict(data, features=features) + + for item in dset.cast_column("image", Image(decode=False)): + assert item.keys() == {"image", "caption"} + assert item == {"image": {"path": image_path, "bytes": None}, "caption": "cats sleeping"} + + # no decoding + + def process_caption(example): + example["caption"] = "Two " + example["caption"] + return example + + processed_dset = dset.map(process_caption) + for item in processed_dset.cast_column("image", Image(decode=False)): + assert item.keys() == {"image", "caption"} + assert item == {"image": {"path": image_path, "bytes": None}, "caption": "Two cats sleeping"} + + # decoding example + + def process_image_by_example(example): + example["mode"] = example["image"].mode + return example + + decoded_dset = dset.map(process_image_by_example) + for item in decoded_dset.cast_column("image", Image(decode=False)): + assert item.keys() == {"image", "caption", "mode"} + assert os.path.samefile(item["image"]["path"], image_path) + assert item["caption"] == "cats sleeping" + assert item["mode"] == "RGB" + + # decoding batch + + def process_image_by_batch(batch): + batch["mode"] = [image.mode for image in batch["image"]] + return batch + + decoded_dset = dset.map(process_image_by_batch, batched=True) + for item in decoded_dset.cast_column("image", Image(decode=False)): + assert item.keys() == {"image", "caption", "mode"} + assert os.path.samefile(item["image"]["path"], image_path) + assert item["caption"] == "cats sleeping" + assert item["mode"] == "RGB" + + +@require_pil +def test_formatted_dataset_with_image_feature_map(shared_datadir): + image_path = str(shared_datadir / "test_image_rgb.jpg") + pil_image = Image().decode_example({"path": image_path, "bytes": None}) + data = {"image": [image_path], "caption": ["cats sleeping"]} + features = Features({"image": Image(), "caption": Value("string")}) + + dset = Dataset.from_dict(data, features=features) + for item in dset.cast_column("image", Image(decode=False)): + assert item.keys() == {"image", "caption"} + assert item == {"image": {"path": image_path, "bytes": None}, "caption": "cats sleeping"} + + def process_image_by_example(example): + example["num_channels"] = example["image"].shape[-1] + return example + + decoded_dset = dset.with_format("numpy").map(process_image_by_example) + for item in decoded_dset.cast_column("image", Image(decode=False)): + assert item.keys() == {"image", "caption", "num_channels"} + assert item["image"] == encode_np_array(np.array(pil_image)) + assert item["caption"] == "cats sleeping" + assert item["num_channels"] == 3 + + def process_image_by_batch(batch): + batch["num_channels"] = [image.shape[-1] for image in batch["image"]] + return batch + + decoded_dset = dset.with_format("numpy").map(process_image_by_batch, batched=True) + for item in decoded_dset.cast_column("image", Image(decode=False)): + assert item.keys() == {"image", "caption", "num_channels"} + assert item["image"] == encode_np_array(np.array(pil_image)) + assert item["caption"] == "cats sleeping" + assert item["num_channels"] == 3 + + +@require_pil +def test_dataset_with_image_feature_map_change_image(shared_datadir): + import PIL.Image + + image_path = str(shared_datadir / "test_image_rgb.jpg") + pil_image = Image().decode_example({"path": image_path, "bytes": None}) + data = {"image": [image_path]} + features = Features({"image": Image()}) + dset = Dataset.from_dict(data, features=features) + + for item in dset.cast_column("image", Image(decode=False)): + assert item.keys() == {"image"} + assert item == { + "image": { + "bytes": None, + "path": image_path, + } + } + + # return pil image + + def process_image_resize_by_example(example): + example["image"] = example["image"].resize((100, 100)) + return example + + decoded_dset = dset.map(process_image_resize_by_example) + for item in decoded_dset.cast_column("image", Image(decode=False)): + assert item.keys() == {"image"} + assert item == {"image": {"bytes": image_to_bytes(pil_image.resize((100, 100))), "path": None}} + + def process_image_resize_by_batch(batch): + batch["image"] = [image.resize((100, 100)) for image in batch["image"]] + return batch + + decoded_dset = dset.map(process_image_resize_by_batch, batched=True) + for item in decoded_dset.cast_column("image", Image(decode=False)): + assert item.keys() == {"image"} + assert item == {"image": {"bytes": image_to_bytes(pil_image.resize((100, 100))), "path": None}} + + # return np.ndarray (e.g. when using albumentations) + + def process_image_resize_by_example_return_np_array(example): + example["image"] = np.array(example["image"].resize((100, 100))) + return example + + decoded_dset = dset.map(process_image_resize_by_example_return_np_array) + for item in decoded_dset.cast_column("image", Image(decode=False)): + assert item.keys() == {"image"} + assert item == { + "image": { + "bytes": image_to_bytes(PIL.Image.fromarray(np.array(pil_image.resize((100, 100))))), + "path": None, + } + } + + def process_image_resize_by_batch_return_np_array(batch): + batch["image"] = [np.array(image.resize((100, 100))) for image in batch["image"]] + return batch + + decoded_dset = dset.map(process_image_resize_by_batch_return_np_array, batched=True) + for item in decoded_dset.cast_column("image", Image(decode=False)): + assert item.keys() == {"image"} + assert item == { + "image": { + "bytes": image_to_bytes(PIL.Image.fromarray(np.array(pil_image.resize((100, 100))))), + "path": None, + } + } + + +@require_pil +def test_formatted_dataset_with_image_feature(shared_datadir): + import PIL.Image + + image_path = str(shared_datadir / "test_image_rgb.jpg") + data = {"image": [image_path, image_path]} + features = Features({"image": Image()}) + dset = Dataset.from_dict(data, features=features) + with dset.formatted_as("numpy"): + item = dset[0] + assert item.keys() == {"image"} + assert isinstance(item["image"], np.ndarray) + assert item["image"].shape == (480, 640, 3) + batch = dset[:1] + assert batch.keys() == {"image"} + assert len(batch) == 1 + assert isinstance(batch["image"], np.ndarray) + assert batch["image"].shape == (1, 480, 640, 3) + column = dset["image"] + assert len(column) == 2 + assert isinstance(column, np.ndarray) + assert column.shape == (2, 480, 640, 3) + + with dset.formatted_as("pandas"): + item = dset[0] + assert item.shape == (1, 1) + assert item.columns == ["image"] + assert isinstance(item["image"][0], PIL.Image.Image) + assert os.path.samefile(item["image"][0].filename, image_path) + assert item["image"][0].format == "JPEG" + assert item["image"][0].size == (640, 480) + assert item["image"][0].mode == "RGB" + batch = dset[:1] + assert batch.shape == (1, 1) + assert batch.columns == ["image"] + assert isinstance(batch["image"], pd.Series) and all( + isinstance(item, PIL.Image.Image) for item in batch["image"] + ) + assert os.path.samefile(batch["image"][0].filename, image_path) + assert batch["image"][0].format == "JPEG" + assert batch["image"][0].size == (640, 480) + assert batch["image"][0].mode == "RGB" + column = dset["image"] + assert len(column) == 2 + assert isinstance(column, pd.Series) and all(isinstance(item, PIL.Image.Image) for item in column) + assert os.path.samefile(column[0].filename, image_path) + assert column[0].format == "JPEG" + assert column[0].size == (640, 480) + assert column[0].mode == "RGB" + + +# Currently, the JSONL reader doesn't support complex feature types so we create a temporary dataset script +# to test streaming (without uploading the test dataset to the hub). + +DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset__" + +DATASET_LOADING_SCRIPT_CODE = """ +import os + +import datasets +from datasets import DatasetInfo, Features, Image, Split, SplitGenerator, Value + + +class __DummyDataset__(datasets.GeneratorBasedBuilder): + + def _info(self) -> DatasetInfo: + return DatasetInfo(features=Features({"image": Image(), "caption": Value("string")})) + + def _split_generators(self, dl_manager): + return [ + SplitGenerator(Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_manager.manual_dir, "train.txt")}), + ] + + def _generate_examples(self, filepath, **kwargs): + with open(filepath, encoding="utf-8") as f: + for i, line in enumerate(f): + image_path, caption = line.split(",") + yield i, {"image": image_path.strip(), "caption": caption.strip()} +""" + + +@pytest.fixture +def data_dir(shared_datadir, tmp_path): + data_dir = tmp_path / "dummy_dataset_data" + data_dir.mkdir() + image_path = str(shared_datadir / "test_image_rgb.jpg") + with open(data_dir / "train.txt", "w") as f: + f.write(f"{image_path},Two cats sleeping\n") + return str(data_dir) + + +@pytest.fixture +def dataset_loading_script_dir(tmp_path): + script_name = DATASET_LOADING_SCRIPT_NAME + script_dir = tmp_path / script_name + script_dir.mkdir() + script_path = script_dir / f"{script_name}.py" + with open(script_path, "w") as f: + f.write(DATASET_LOADING_SCRIPT_CODE) + return str(script_dir) + + +@require_pil +@pytest.mark.parametrize("streaming", [False, True]) +def test_load_dataset_with_image_feature(shared_datadir, data_dir, dataset_loading_script_dir, streaming): + import PIL.Image + + image_path = str(shared_datadir / "test_image_rgb.jpg") + dset = load_dataset(dataset_loading_script_dir, split="train", data_dir=data_dir, streaming=streaming) + item = dset[0] if not streaming else next(iter(dset)) + assert item.keys() == {"image", "caption"} + assert isinstance(item["image"], PIL.Image.Image) + assert os.path.samefile(item["image"].filename, image_path) + assert item["image"].format == "JPEG" + assert item["image"].size == (640, 480) + assert item["image"].mode == "RGB" + + +@require_pil +def test_dataset_with_image_feature_undecoded(shared_datadir): + image_path = str(shared_datadir / "test_image_rgb.jpg") + data = {"image": [image_path]} + features = Features({"image": Image(decode=False)}) + dset = Dataset.from_dict(data, features=features) + item = dset[0] + assert item.keys() == {"image"} + assert item["image"] == {"path": image_path, "bytes": None} + batch = dset[:1] + assert batch.keys() == {"image"} + assert len(batch["image"]) == 1 + assert batch["image"][0] == {"path": image_path, "bytes": None} + column = dset["image"] + assert len(column) == 1 + assert column[0] == {"path": image_path, "bytes": None} + + +@require_pil +def test_formatted_dataset_with_image_feature_undecoded(shared_datadir): + image_path = str(shared_datadir / "test_image_rgb.jpg") + data = {"image": [image_path]} + features = Features({"image": Image(decode=False)}) + dset = Dataset.from_dict(data, features=features) + with dset.formatted_as("numpy"): + item = dset[0] + assert item.keys() == {"image"} + assert item["image"] == {"path": image_path, "bytes": None} + batch = dset[:1] + assert batch.keys() == {"image"} + assert len(batch["image"]) == 1 + assert batch["image"][0] == {"path": image_path, "bytes": None} + column = dset["image"] + assert len(column) == 1 + assert column[0] == {"path": image_path, "bytes": None} + + with dset.formatted_as("pandas"): + item = dset[0] + assert item.shape == (1, 1) + assert item.columns == ["image"] + assert item["image"][0] == {"path": image_path, "bytes": None} + batch = dset[:1] + assert batch.shape == (1, 1) + assert batch.columns == ["image"] + assert batch["image"][0] == {"path": image_path, "bytes": None} + column = dset["image"] + assert len(column) == 1 + assert column[0] == {"path": image_path, "bytes": None} + + +@require_pil +def test_dataset_with_image_feature_map_undecoded(shared_datadir): + image_path = str(shared_datadir / "test_image_rgb.jpg") + data = {"image": [image_path]} + features = Features({"image": Image(decode=False)}) + dset = Dataset.from_dict(data, features=features) + + def assert_image_example_undecoded(example): + assert example["image"] == {"path": image_path, "bytes": None} + + dset.map(assert_image_example_undecoded) + + def assert_image_batch_undecoded(batch): + for image in batch["image"]: + assert image == {"path": image_path, "bytes": None} + + dset.map(assert_image_batch_undecoded, batched=True) + + +@require_pil +def test_image_embed_storage(shared_datadir): + image_path = str(shared_datadir / "test_image_rgb.jpg") + example = {"bytes": None, "path": image_path} + storage = pa.array([example], type=pa.struct({"bytes": pa.binary(), "path": pa.string()})) + embedded_storage = Image().embed_storage(storage) + embedded_example = embedded_storage.to_pylist()[0] + assert embedded_example == {"bytes": open(image_path, "rb").read(), "path": "test_image_rgb.jpg"} + + +@require_pil +@pytest.mark.parametrize( + "array, dtype_cast, expected_image_format", + [ + (np.arange(16).reshape(4, 4).astype(np.uint8), "exact_match", "PNG"), + (np.arange(16).reshape(4, 4).astype(np.uint16), "exact_match", "TIFF"), + (np.arange(16).reshape(4, 4).astype(np.int64), "downcast->|i4", "TIFF"), + (np.arange(16).reshape(4, 4).astype(np.complex128), "error", None), + (np.arange(16).reshape(2, 2, 4).astype(np.uint8), "exact_match", "PNG"), + (np.arange(16).reshape(2, 2, 4), "downcast->|u1", "PNG"), + (np.arange(16).reshape(2, 2, 4).astype(np.float64), "error", None), + ], +) +def test_encode_np_array(array, dtype_cast, expected_image_format): + if dtype_cast.startswith("downcast"): + _, dest_dtype = dtype_cast.split("->") + dest_dtype = np.dtype(dest_dtype) + with pytest.warns(UserWarning, match=f"Downcasting array dtype.+{dest_dtype}.+"): + encoded_image = Image().encode_example(array) + elif dtype_cast == "error": + with pytest.raises(TypeError): + Image().encode_example(array) + return + else: # exact_match (no warnings are raised) + with warnings.catch_warnings(): + warnings.simplefilter("error") + encoded_image = Image().encode_example(array) + + assert isinstance(encoded_image, dict) + assert encoded_image.keys() == {"path", "bytes"} + assert encoded_image["path"] is None + assert encoded_image["bytes"] is not None and isinstance(encoded_image["bytes"], bytes) + decoded_image = Image().decode_example(encoded_image) + assert decoded_image.format == expected_image_format + np.testing.assert_array_equal(np.array(decoded_image), array) diff --git a/testbed/huggingface__datasets/tests/fixtures/__init__.py b/testbed/huggingface__datasets/tests/fixtures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/tests/fixtures/files.py b/testbed/huggingface__datasets/tests/fixtures/files.py new file mode 100644 index 0000000000000000000000000000000000000000..da8d3efee48bdc07c2ae83bb7ce5c049eadfe838 --- /dev/null +++ b/testbed/huggingface__datasets/tests/fixtures/files.py @@ -0,0 +1,559 @@ +import contextlib +import csv +import json +import os +import sqlite3 +import tarfile +import textwrap +import zipfile + +import pyarrow as pa +import pyarrow.parquet as pq +import pytest + +import datasets +import datasets.config + + +# dataset + arrow_file + + +@pytest.fixture(scope="session") +def dataset(): + n = 10 + features = datasets.Features( + { + "tokens": datasets.Sequence(datasets.Value("string")), + "labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"])), + "answers": datasets.Sequence( + { + "text": datasets.Value("string"), + "answer_start": datasets.Value("int32"), + } + ), + "id": datasets.Value("int64"), + } + ) + dataset = datasets.Dataset.from_dict( + { + "tokens": [["foo"] * 5] * n, + "labels": [[1] * 5] * n, + "answers": [{"answer_start": [97], "text": ["1976"]}] * 10, + "id": list(range(n)), + }, + features=features, + ) + return dataset + + +@pytest.fixture(scope="session") +def arrow_file(tmp_path_factory, dataset): + filename = str(tmp_path_factory.mktemp("data") / "file.arrow") + dataset.map(cache_file_name=filename) + return filename + + +# FILE_CONTENT + files + + +FILE_CONTENT = """\ + Text data. + Second line of data.""" + + +@pytest.fixture(scope="session") +def text_file(tmp_path_factory): + filename = tmp_path_factory.mktemp("data") / "file.txt" + data = FILE_CONTENT + with open(filename, "w") as f: + f.write(data) + return filename + + +@pytest.fixture(scope="session") +def bz2_file(tmp_path_factory): + import bz2 + + path = tmp_path_factory.mktemp("data") / "file.txt.bz2" + data = bytes(FILE_CONTENT, "utf-8") + with bz2.open(path, "wb") as f: + f.write(data) + return path + + +@pytest.fixture(scope="session") +def gz_file(tmp_path_factory): + import gzip + + path = str(tmp_path_factory.mktemp("data") / "file.txt.gz") + data = bytes(FILE_CONTENT, "utf-8") + with gzip.open(path, "wb") as f: + f.write(data) + return path + + +@pytest.fixture(scope="session") +def lz4_file(tmp_path_factory): + if datasets.config.LZ4_AVAILABLE: + import lz4.frame + + path = tmp_path_factory.mktemp("data") / "file.txt.lz4" + data = bytes(FILE_CONTENT, "utf-8") + with lz4.frame.open(path, "wb") as f: + f.write(data) + return path + + +@pytest.fixture(scope="session") +def seven_zip_file(tmp_path_factory, text_file): + if datasets.config.PY7ZR_AVAILABLE: + import py7zr + + path = tmp_path_factory.mktemp("data") / "file.txt.7z" + with py7zr.SevenZipFile(path, "w") as archive: + archive.write(text_file, arcname=os.path.basename(text_file)) + return path + + +@pytest.fixture(scope="session") +def tar_file(tmp_path_factory, text_file): + import tarfile + + path = tmp_path_factory.mktemp("data") / "file.txt.tar" + with tarfile.TarFile(path, "w") as f: + f.add(text_file, arcname=os.path.basename(text_file)) + return path + + +@pytest.fixture(scope="session") +def xz_file(tmp_path_factory): + import lzma + + path = tmp_path_factory.mktemp("data") / "file.txt.xz" + data = bytes(FILE_CONTENT, "utf-8") + with lzma.open(path, "wb") as f: + f.write(data) + return path + + +@pytest.fixture(scope="session") +def zip_file(tmp_path_factory, text_file): + import zipfile + + path = tmp_path_factory.mktemp("data") / "file.txt.zip" + with zipfile.ZipFile(path, "w") as f: + f.write(text_file, arcname=os.path.basename(text_file)) + return path + + +@pytest.fixture(scope="session") +def zstd_file(tmp_path_factory): + if datasets.config.ZSTANDARD_AVAILABLE: + import zstandard as zstd + + path = tmp_path_factory.mktemp("data") / "file.txt.zst" + data = bytes(FILE_CONTENT, "utf-8") + with zstd.open(path, "wb") as f: + f.write(data) + return path + + +# xml_file + + +@pytest.fixture(scope="session") +def xml_file(tmp_path_factory): + filename = tmp_path_factory.mktemp("data") / "file.xml" + data = textwrap.dedent( + """\ + + +
+ + + Contingut 1 + Content 1 + + + Contingut 2 + Content 2 + + + Contingut 3 + Content 3 + + + Contingut 4 + Content 4 + + + Contingut 5 + Content 5 + + + """ + ) + with open(filename, "w") as f: + f.write(data) + return filename + + +DATA = [ + {"col_1": "0", "col_2": 0, "col_3": 0.0}, + {"col_1": "1", "col_2": 1, "col_3": 1.0}, + {"col_1": "2", "col_2": 2, "col_3": 2.0}, + {"col_1": "3", "col_2": 3, "col_3": 3.0}, +] +DATA2 = [ + {"col_1": "4", "col_2": 4, "col_3": 4.0}, + {"col_1": "5", "col_2": 5, "col_3": 5.0}, +] +DATA_DICT_OF_LISTS = { + "col_1": ["0", "1", "2", "3"], + "col_2": [0, 1, 2, 3], + "col_3": [0.0, 1.0, 2.0, 3.0], +} + +DATA_312 = [ + {"col_3": 0.0, "col_1": "0", "col_2": 0}, + {"col_3": 1.0, "col_1": "1", "col_2": 1}, +] + +DATA_STR = [ + {"col_1": "s0", "col_2": 0, "col_3": 0.0}, + {"col_1": "s1", "col_2": 1, "col_3": 1.0}, + {"col_1": "s2", "col_2": 2, "col_3": 2.0}, + {"col_1": "s3", "col_2": 3, "col_3": 3.0}, +] + + +@pytest.fixture(scope="session") +def dataset_dict(): + return DATA_DICT_OF_LISTS + + +@pytest.fixture(scope="session") +def arrow_path(tmp_path_factory): + dataset = datasets.Dataset.from_dict(DATA_DICT_OF_LISTS) + path = str(tmp_path_factory.mktemp("data") / "dataset.arrow") + dataset.map(cache_file_name=path) + return path + + +@pytest.fixture(scope="session") +def sqlite_path(tmp_path_factory): + path = str(tmp_path_factory.mktemp("data") / "dataset.sqlite") + with contextlib.closing(sqlite3.connect(path)) as con: + cur = con.cursor() + cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)") + for item in DATA: + cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values())) + con.commit() + return path + + +@pytest.fixture(scope="session") +def csv_path(tmp_path_factory): + path = str(tmp_path_factory.mktemp("data") / "dataset.csv") + with open(path, "w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"]) + writer.writeheader() + for item in DATA: + writer.writerow(item) + return path + + +@pytest.fixture(scope="session") +def csv2_path(tmp_path_factory): + path = str(tmp_path_factory.mktemp("data") / "dataset2.csv") + with open(path, "w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"]) + writer.writeheader() + for item in DATA: + writer.writerow(item) + return path + + +@pytest.fixture(scope="session") +def bz2_csv_path(csv_path, tmp_path_factory): + import bz2 + + path = tmp_path_factory.mktemp("data") / "dataset.csv.bz2" + with open(csv_path, "rb") as f: + data = f.read() + # data = bytes(FILE_CONTENT, "utf-8") + with bz2.open(path, "wb") as f: + f.write(data) + return path + + +@pytest.fixture(scope="session") +def zip_csv_path(csv_path, csv2_path, tmp_path_factory): + path = tmp_path_factory.mktemp("zip_csv_path") / "csv-dataset.zip" + with zipfile.ZipFile(path, "w") as f: + f.write(csv_path, arcname=os.path.basename(csv_path)) + f.write(csv2_path, arcname=os.path.basename(csv2_path)) + return path + + +@pytest.fixture(scope="session") +def zip_uppercase_csv_path(csv_path, csv2_path, tmp_path_factory): + path = tmp_path_factory.mktemp("data") / "dataset.csv.zip" + with zipfile.ZipFile(path, "w") as f: + f.write(csv_path, arcname=os.path.basename(csv_path.replace(".csv", ".CSV"))) + f.write(csv2_path, arcname=os.path.basename(csv2_path.replace(".csv", ".CSV"))) + return path + + +@pytest.fixture(scope="session") +def zip_csv_with_dir_path(csv_path, csv2_path, tmp_path_factory): + path = tmp_path_factory.mktemp("data") / "dataset_with_dir.csv.zip" + with zipfile.ZipFile(path, "w") as f: + f.write(csv_path, arcname=os.path.join("main_dir", os.path.basename(csv_path))) + f.write(csv2_path, arcname=os.path.join("main_dir", os.path.basename(csv2_path))) + return path + + +@pytest.fixture(scope="session") +def parquet_path(tmp_path_factory): + path = str(tmp_path_factory.mktemp("data") / "dataset.parquet") + schema = pa.schema( + { + "col_1": pa.string(), + "col_2": pa.int64(), + "col_3": pa.float64(), + } + ) + with open(path, "wb") as f: + writer = pq.ParquetWriter(f, schema=schema) + pa_table = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(DATA))] for k in DATA[0]}, schema=schema) + writer.write_table(pa_table) + writer.close() + return path + + +@pytest.fixture(scope="session") +def json_list_of_dicts_path(tmp_path_factory): + path = str(tmp_path_factory.mktemp("data") / "dataset.json") + data = {"data": DATA} + with open(path, "w") as f: + json.dump(data, f) + return path + + +@pytest.fixture(scope="session") +def json_dict_of_lists_path(tmp_path_factory): + path = str(tmp_path_factory.mktemp("data") / "dataset.json") + data = {"data": DATA_DICT_OF_LISTS} + with open(path, "w") as f: + json.dump(data, f) + return path + + +@pytest.fixture(scope="session") +def jsonl_path(tmp_path_factory): + path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl") + with open(path, "w") as f: + for item in DATA: + f.write(json.dumps(item) + "\n") + return path + + +@pytest.fixture(scope="session") +def jsonl2_path(tmp_path_factory): + path = str(tmp_path_factory.mktemp("data") / "dataset2.jsonl") + with open(path, "w") as f: + for item in DATA: + f.write(json.dumps(item) + "\n") + return path + + +@pytest.fixture(scope="session") +def jsonl_312_path(tmp_path_factory): + path = str(tmp_path_factory.mktemp("data") / "dataset_312.jsonl") + with open(path, "w") as f: + for item in DATA_312: + f.write(json.dumps(item) + "\n") + return path + + +@pytest.fixture(scope="session") +def jsonl_str_path(tmp_path_factory): + path = str(tmp_path_factory.mktemp("data") / "dataset-str.jsonl") + with open(path, "w") as f: + for item in DATA_STR: + f.write(json.dumps(item) + "\n") + return path + + +@pytest.fixture(scope="session") +def text_gz_path(tmp_path_factory, text_path): + import gzip + + path = str(tmp_path_factory.mktemp("data") / "dataset.txt.gz") + with open(text_path, "rb") as orig_file: + with gzip.open(path, "wb") as zipped_file: + zipped_file.writelines(orig_file) + return path + + +@pytest.fixture(scope="session") +def jsonl_gz_path(tmp_path_factory, jsonl_path): + import gzip + + path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl.gz") + with open(jsonl_path, "rb") as orig_file: + with gzip.open(path, "wb") as zipped_file: + zipped_file.writelines(orig_file) + return path + + +@pytest.fixture(scope="session") +def zip_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory): + path = tmp_path_factory.mktemp("data") / "dataset.jsonl.zip" + with zipfile.ZipFile(path, "w") as f: + f.write(jsonl_path, arcname=os.path.basename(jsonl_path)) + f.write(jsonl2_path, arcname=os.path.basename(jsonl2_path)) + return path + + +@pytest.fixture(scope="session") +def zip_nested_jsonl_path(zip_jsonl_path, jsonl_path, jsonl2_path, tmp_path_factory): + path = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.zip" + with zipfile.ZipFile(path, "w") as f: + f.write(zip_jsonl_path, arcname=os.path.join("nested", os.path.basename(zip_jsonl_path))) + return path + + +@pytest.fixture(scope="session") +def zip_jsonl_with_dir_path(jsonl_path, jsonl2_path, tmp_path_factory): + path = tmp_path_factory.mktemp("data") / "dataset_with_dir.jsonl.zip" + with zipfile.ZipFile(path, "w") as f: + f.write(jsonl_path, arcname=os.path.join("main_dir", os.path.basename(jsonl_path))) + f.write(jsonl2_path, arcname=os.path.join("main_dir", os.path.basename(jsonl2_path))) + return path + + +@pytest.fixture(scope="session") +def tar_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory): + path = tmp_path_factory.mktemp("data") / "dataset.jsonl.tar" + with tarfile.TarFile(path, "w") as f: + f.add(jsonl_path, arcname=os.path.basename(jsonl_path)) + f.add(jsonl2_path, arcname=os.path.basename(jsonl2_path)) + return path + + +@pytest.fixture(scope="session") +def tar_nested_jsonl_path(tar_jsonl_path, jsonl_path, jsonl2_path, tmp_path_factory): + path = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.tar" + with tarfile.TarFile(path, "w") as f: + f.add(tar_jsonl_path, arcname=os.path.join("nested", os.path.basename(tar_jsonl_path))) + return path + + +@pytest.fixture(scope="session") +def text_path(tmp_path_factory): + data = ["0", "1", "2", "3"] + path = str(tmp_path_factory.mktemp("data") / "dataset.txt") + with open(path, "w") as f: + for item in data: + f.write(item + "\n") + return path + + +@pytest.fixture(scope="session") +def text2_path(tmp_path_factory): + data = ["0", "1", "2", "3"] + path = str(tmp_path_factory.mktemp("data") / "dataset2.txt") + with open(path, "w") as f: + for item in data: + f.write(item + "\n") + return path + + +@pytest.fixture(scope="session") +def text_dir_with_unsupported_extension(tmp_path_factory): + data = ["0", "1", "2", "3"] + path = tmp_path_factory.mktemp("data") / "dataset.abc" + with open(path, "w") as f: + for item in data: + f.write(item + "\n") + return path + + +@pytest.fixture(scope="session") +def zip_text_path(text_path, text2_path, tmp_path_factory): + path = tmp_path_factory.mktemp("data") / "dataset.text.zip" + with zipfile.ZipFile(path, "w") as f: + f.write(text_path, arcname=os.path.basename(text_path)) + f.write(text2_path, arcname=os.path.basename(text2_path)) + return path + + +@pytest.fixture(scope="session") +def zip_text_with_dir_path(text_path, text2_path, tmp_path_factory): + path = tmp_path_factory.mktemp("data") / "dataset_with_dir.text.zip" + with zipfile.ZipFile(path, "w") as f: + f.write(text_path, arcname=os.path.join("main_dir", os.path.basename(text_path))) + f.write(text2_path, arcname=os.path.join("main_dir", os.path.basename(text2_path))) + return path + + +@pytest.fixture(scope="session") +def zip_unsupported_ext_path(text_path, text2_path, tmp_path_factory): + path = tmp_path_factory.mktemp("data") / "dataset.ext.zip" + with zipfile.ZipFile(path, "w") as f: + f.write(text_path, arcname=os.path.basename("unsupported.ext")) + f.write(text2_path, arcname=os.path.basename("unsupported_2.ext")) + return path + + +@pytest.fixture(scope="session") +def text_path_with_unicode_new_lines(tmp_path_factory): + text = "\n".join(["First", "Second\u2029with Unicode new line", "Third"]) + path = str(tmp_path_factory.mktemp("data") / "dataset_with_unicode_new_lines.txt") + with open(path, "w", encoding="utf-8") as f: + f.write(text) + return path + + +@pytest.fixture(scope="session") +def image_file(): + return os.path.join("tests", "features", "data", "test_image_rgb.jpg") + + +@pytest.fixture(scope="session") +def audio_file(): + return os.path.join("tests", "features", "data", "test_audio_44100.wav") + + +@pytest.fixture(scope="session") +def zip_image_path(image_file, tmp_path_factory): + path = tmp_path_factory.mktemp("data") / "dataset.img.zip" + with zipfile.ZipFile(path, "w") as f: + f.write(image_file, arcname=os.path.basename(image_file)) + f.write(image_file, arcname=os.path.basename(image_file).replace(".jpg", "2.jpg")) + return path + + +@pytest.fixture(scope="session") +def data_dir_with_hidden_files(tmp_path_factory): + data_dir = tmp_path_factory.mktemp("data_dir") + + (data_dir / "subdir").mkdir() + with open(data_dir / "subdir" / "train.txt", "w") as f: + f.write("foo\n" * 10) + with open(data_dir / "subdir" / "test.txt", "w") as f: + f.write("bar\n" * 10) + # hidden file + with open(data_dir / "subdir" / ".test.txt", "w") as f: + f.write("bar\n" * 10) + + # hidden directory + (data_dir / ".subdir").mkdir() + with open(data_dir / ".subdir" / "train.txt", "w") as f: + f.write("foo\n" * 10) + with open(data_dir / ".subdir" / "test.txt", "w") as f: + f.write("bar\n" * 10) + + return data_dir diff --git a/testbed/huggingface__datasets/tests/fixtures/fsspec.py b/testbed/huggingface__datasets/tests/fixtures/fsspec.py new file mode 100644 index 0000000000000000000000000000000000000000..288a2865c217a2f3fb4a9f421492a005fefa0c4e --- /dev/null +++ b/testbed/huggingface__datasets/tests/fixtures/fsspec.py @@ -0,0 +1,113 @@ +import posixpath +from pathlib import Path +from unittest.mock import patch + +import pytest +from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path +from fsspec.registry import _registry as _fsspec_registry + + +class MockFileSystem(AbstractFileSystem): + protocol = "mock" + + def __init__(self, *args, local_root_dir, **kwargs): + super().__init__() + self._fs = LocalFileSystem(*args, **kwargs) + self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/" + + def mkdir(self, path, *args, **kwargs): + path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) + return self._fs.mkdir(path, *args, **kwargs) + + def makedirs(self, path, *args, **kwargs): + path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) + return self._fs.makedirs(path, *args, **kwargs) + + def rmdir(self, path): + path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) + return self._fs.rmdir(path) + + def ls(self, path, detail=True, *args, **kwargs): + path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) + out = self._fs.ls(path, detail=detail, *args, **kwargs) + if detail: + return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out] + else: + return [name[len(self.local_root_dir) :] for name in out] + + def info(self, path, *args, **kwargs): + path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) + out = dict(self._fs.info(path, *args, **kwargs)) + out["name"] = out["name"][len(self.local_root_dir) :] + return out + + def cp_file(self, path1, path2, *args, **kwargs): + path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1)) + path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2)) + return self._fs.cp_file(path1, path2, *args, **kwargs) + + def rm_file(self, path, *args, **kwargs): + path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) + return self._fs.rm_file(path, *args, **kwargs) + + def rm(self, path, *args, **kwargs): + path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) + return self._fs.rm(path, *args, **kwargs) + + def _open(self, path, *args, **kwargs): + path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) + return self._fs._open(path, *args, **kwargs) + + def created(self, path): + path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) + return self._fs.created(path) + + def modified(self, path): + path = posixpath.join(self.local_root_dir, self._strip_protocol(path)) + return self._fs.modified(path) + + @classmethod + def _strip_protocol(cls, path): + path = stringify_path(path) + if path.startswith("mock://"): + path = path[7:] + return path + + +class TmpDirFileSystem(MockFileSystem): + protocol = "tmp" + tmp_dir = None + + def __init__(self, *args, **kwargs): + assert self.tmp_dir is not None, "TmpDirFileSystem.tmp_dir is not set" + super().__init__(*args, **kwargs, local_root_dir=self.tmp_dir, auto_mkdir=True) + + @classmethod + def _strip_protocol(cls, path): + path = stringify_path(path) + if path.startswith("tmp://"): + path = path[6:] + return path + + +@pytest.fixture +def mock_fsspec(): + _fsspec_registry["mock"] = MockFileSystem + _fsspec_registry["tmp"] = TmpDirFileSystem + yield + del _fsspec_registry["mock"] + del _fsspec_registry["tmp"] + + +@pytest.fixture +def mockfs(tmp_path_factory, mock_fsspec): + local_fs_dir = tmp_path_factory.mktemp("mockfs") + return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True) + + +@pytest.fixture +def tmpfs(tmp_path_factory, mock_fsspec): + tmp_fs_dir = tmp_path_factory.mktemp("tmpfs") + with patch.object(TmpDirFileSystem, "tmp_dir", tmp_fs_dir): + yield TmpDirFileSystem() + TmpDirFileSystem.clear_instance_cache() diff --git a/testbed/huggingface__datasets/tests/fixtures/hub.py b/testbed/huggingface__datasets/tests/fixtures/hub.py new file mode 100644 index 0000000000000000000000000000000000000000..9bd8a162da5b00e62a7245569bb23ff0510ccb99 --- /dev/null +++ b/testbed/huggingface__datasets/tests/fixtures/hub.py @@ -0,0 +1,154 @@ +import time +import uuid +from contextlib import contextmanager +from pathlib import Path +from typing import Optional + +import pytest +import requests +from huggingface_hub.hf_api import HfApi, HfFolder, RepositoryNotFoundError + + +CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__" +CI_HUB_USER_FULL_NAME = "Dummy User" +CI_HUB_USER_TOKEN = "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" + +CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co" +CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}" +CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}" +CI_HUB_TOKEN_PATH = Path("~/.huggingface/hub_ci_token").expanduser() + + +@pytest.fixture +def ci_hfh_hf_hub_url(monkeypatch): + monkeypatch.setattr( + "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE + ) + + +@pytest.fixture +def ci_hub_config(monkeypatch): + monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT) + monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL) + + +@pytest.fixture +def ci_hub_token_path(monkeypatch): + monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", CI_HUB_TOKEN_PATH) + + +@pytest.fixture +def set_ci_hub_access_token(ci_hub_config, ci_hub_token_path): + HfFolder.save_token(CI_HUB_USER_TOKEN) + yield + HfFolder.delete_token() + + +@pytest.fixture(scope="session") +def hf_api(): + return HfApi(endpoint=CI_HUB_ENDPOINT) + + +@pytest.fixture(scope="session") +def hf_token(): + yield CI_HUB_USER_TOKEN + + +@pytest.fixture +def cleanup_repo(hf_api): + def _cleanup_repo(repo_id): + hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset") + + return _cleanup_repo + + +@pytest.fixture +def temporary_repo(cleanup_repo): + @contextmanager + def _temporary_repo(repo_id: Optional[str] = None): + repo_id = repo_id or f"{CI_HUB_USER}/test-dataset-{uuid.uuid4().hex[:6]}-{int(time.time() * 10e3)}" + try: + yield repo_id + finally: + try: + cleanup_repo(repo_id) + except RepositoryNotFoundError: + pass + + return _temporary_repo + + +@pytest.fixture(scope="session") +def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file): + repo_name = f"repo_txt_data-{int(time.time() * 10e6)}" + repo_id = f"{CI_HUB_USER}/{repo_name}" + hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True) + hf_api.upload_file( + token=hf_token, + path_or_fileobj=str(text_file), + path_in_repo="data/text_data.txt", + repo_id=repo_id, + repo_type="dataset", + ) + yield repo_id + try: + hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset") + except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error + pass + + +@pytest.fixture() +def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url): + return hf_private_dataset_repo_txt_data_ + + +@pytest.fixture(scope="session") +def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path): + repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e6)}" + repo_id = f"{CI_HUB_USER}/{repo_name}" + hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True) + hf_api.upload_file( + token=hf_token, + path_or_fileobj=str(zip_csv_with_dir_path), + path_in_repo="data.zip", + repo_id=repo_id, + repo_type="dataset", + ) + yield repo_id + try: + hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset") + except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error + pass + + +@pytest.fixture() +def hf_private_dataset_repo_zipped_txt_data( + hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url +): + return hf_private_dataset_repo_zipped_txt_data_ + + +@pytest.fixture(scope="session") +def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path): + repo_name = f"repo_zipped_img_data-{int(time.time() * 10e6)}" + repo_id = f"{CI_HUB_USER}/{repo_name}" + hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True) + hf_api.upload_file( + token=hf_token, + path_or_fileobj=str(zip_image_path), + path_in_repo="data.zip", + repo_id=repo_id, + repo_type="dataset", + ) + yield repo_id + try: + hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset") + except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error + pass + + +@pytest.fixture() +def hf_private_dataset_repo_zipped_img_data( + hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url +): + return hf_private_dataset_repo_zipped_img_data_ diff --git a/testbed/huggingface__datasets/tests/io/__init__.py b/testbed/huggingface__datasets/tests/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/tests/io/test_csv.py b/testbed/huggingface__datasets/tests/io/test_csv.py new file mode 100644 index 0000000000000000000000000000000000000000..5f75fec0fa3c13287fcbccb5a18ad1461784ed67 --- /dev/null +++ b/testbed/huggingface__datasets/tests/io/test_csv.py @@ -0,0 +1,164 @@ +import csv +import os + +import pytest + +from datasets import Dataset, DatasetDict, Features, NamedSplit, Value +from datasets.io.csv import CsvDatasetReader, CsvDatasetWriter + +from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases + + +def _check_csv_dataset(dataset, expected_features): + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_dataset_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = CsvDatasetReader(csv_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() + _check_csv_dataset(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_dataset_from_csv_features(features, csv_path, tmp_path): + cache_dir = tmp_path / "cache" + # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" + default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = CsvDatasetReader(csv_path, features=features, cache_dir=cache_dir).read() + _check_csv_dataset(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_dataset_from_csv_split(split, csv_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + dataset = CsvDatasetReader(csv_path, cache_dir=cache_dir, split=split).read() + _check_csv_dataset(dataset, expected_features) + assert dataset.split == split if split else "train" + + +@pytest.mark.parametrize("path_type", [str, list]) +def test_dataset_from_csv_path_type(path_type, csv_path, tmp_path): + if issubclass(path_type, str): + path = csv_path + elif issubclass(path_type, list): + path = [csv_path] + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + dataset = CsvDatasetReader(path, cache_dir=cache_dir).read() + _check_csv_dataset(dataset, expected_features) + + +def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)): + assert isinstance(dataset_dict, DatasetDict) + for split in splits: + dataset = dataset_dict[split] + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_csv_datasetdict_reader_keep_in_memory(keep_in_memory, csv_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() + _check_csv_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_csv_datasetdict_reader_features(features, csv_path, tmp_path): + cache_dir = tmp_path / "cache" + # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" + default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = CsvDatasetReader({"train": csv_path}, features=features, cache_dir=cache_dir).read() + _check_csv_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_csv_datasetdict_reader_split(split, csv_path, tmp_path): + if split: + path = {split: csv_path} + else: + path = {"train": csv_path, "test": csv_path} + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + dataset = CsvDatasetReader(path, cache_dir=cache_dir).read() + _check_csv_datasetdict(dataset, expected_features, splits=list(path.keys())) + assert all(dataset[split].split == split for split in path.keys()) + + +def iter_csv_file(csv_path): + with open(csv_path, encoding="utf-8") as csvfile: + yield from csv.reader(csvfile) + + +def test_dataset_to_csv(csv_path, tmp_path): + cache_dir = tmp_path / "cache" + output_csv = os.path.join(cache_dir, "tmp.csv") + dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read() + CsvDatasetWriter(dataset["train"], output_csv, num_proc=1).write() + + original_csv = iter_csv_file(csv_path) + expected_csv = iter_csv_file(output_csv) + + for row1, row2 in zip(original_csv, expected_csv): + assert row1 == row2 + + +def test_dataset_to_csv_multiproc(csv_path, tmp_path): + cache_dir = tmp_path / "cache" + output_csv = os.path.join(cache_dir, "tmp.csv") + dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read() + CsvDatasetWriter(dataset["train"], output_csv, num_proc=2).write() + + original_csv = iter_csv_file(csv_path) + expected_csv = iter_csv_file(output_csv) + + for row1, row2 in zip(original_csv, expected_csv): + assert row1 == row2 + + +def test_dataset_to_csv_invalidproc(csv_path, tmp_path): + cache_dir = tmp_path / "cache" + output_csv = os.path.join(cache_dir, "tmp.csv") + dataset = CsvDatasetReader({"train": csv_path}, cache_dir=cache_dir).read() + with pytest.raises(ValueError): + CsvDatasetWriter(dataset["train"], output_csv, num_proc=0) diff --git a/testbed/huggingface__datasets/tests/io/test_json.py b/testbed/huggingface__datasets/tests/io/test_json.py new file mode 100644 index 0000000000000000000000000000000000000000..fd71e510dcb69550ea8e4678d1105d1221494707 --- /dev/null +++ b/testbed/huggingface__datasets/tests/io/test_json.py @@ -0,0 +1,270 @@ +import io +import json + +import fsspec +import pytest + +from datasets import Dataset, DatasetDict, Features, NamedSplit, Value +from datasets.io.json import JsonDatasetReader, JsonDatasetWriter + +from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases + + +def _check_json_dataset(dataset, expected_features): + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_dataset_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = JsonDatasetReader(jsonl_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() + _check_json_dataset(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_dataset_from_json_features(features, jsonl_path, tmp_path): + cache_dir = tmp_path / "cache" + default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = JsonDatasetReader(jsonl_path, features=features, cache_dir=cache_dir).read() + _check_json_dataset(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"col_3": "float64", "col_1": "string", "col_2": "int64"}, + ], +) +def test_dataset_from_json_with_unsorted_column_names(features, jsonl_312_path, tmp_path): + cache_dir = tmp_path / "cache" + default_expected_features = {"col_3": "float64", "col_1": "string", "col_2": "int64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = JsonDatasetReader(jsonl_312_path, features=features, cache_dir=cache_dir).read() + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 2 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_3", "col_1", "col_2"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +def test_dataset_from_json_with_mismatched_features(jsonl_312_path, tmp_path): + # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} + features = {"col_2": "int64", "col_3": "float64", "col_1": "string"} + expected_features = features.copy() + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + cache_dir = tmp_path / "cache" + dataset = JsonDatasetReader(jsonl_312_path, features=features, cache_dir=cache_dir).read() + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 2 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_2", "col_3", "col_1"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_dataset_from_json_split(split, jsonl_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + dataset = JsonDatasetReader(jsonl_path, cache_dir=cache_dir, split=split).read() + _check_json_dataset(dataset, expected_features) + assert dataset.split == split if split else "train" + + +@pytest.mark.parametrize("path_type", [str, list]) +def test_dataset_from_json_path_type(path_type, jsonl_path, tmp_path): + if issubclass(path_type, str): + path = jsonl_path + elif issubclass(path_type, list): + path = [jsonl_path] + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + dataset = JsonDatasetReader(path, cache_dir=cache_dir).read() + _check_json_dataset(dataset, expected_features) + + +def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)): + assert isinstance(dataset_dict, DatasetDict) + for split in splits: + dataset = dataset_dict[split] + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = JsonDatasetReader({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() + _check_json_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_datasetdict_from_json_features(features, jsonl_path, tmp_path): + cache_dir = tmp_path / "cache" + default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = JsonDatasetReader({"train": jsonl_path}, features=features, cache_dir=cache_dir).read() + _check_json_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path): + if split: + path = {split: jsonl_path} + else: + split = "train" + path = {"train": jsonl_path, "test": jsonl_path} + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + dataset = JsonDatasetReader(path, cache_dir=cache_dir).read() + _check_json_datasetdict(dataset, expected_features, splits=list(path.keys())) + assert all(dataset[split].split == split for split in path.keys()) + + +def load_json(buffer): + return json.load(buffer) + + +def load_json_lines(buffer): + return [json.loads(line) for line in buffer] + + +class TestJsonDatasetWriter: + @pytest.mark.parametrize("lines, load_json_function", [(True, load_json_lines), (False, load_json)]) + def test_dataset_to_json_lines(self, lines, load_json_function, dataset): + with io.BytesIO() as buffer: + JsonDatasetWriter(dataset, buffer, lines=lines).write() + buffer.seek(0) + exported_content = load_json_function(buffer) + assert isinstance(exported_content, list) + assert isinstance(exported_content[0], dict) + assert len(exported_content) == 10 + + @pytest.mark.parametrize( + "orient, container, keys, len_at", + [ + ("records", list, {"tokens", "labels", "answers", "id"}, None), + ("split", dict, {"columns", "data"}, "data"), + ("index", dict, set("0123456789"), None), + ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), + ("values", list, None, None), + ("table", dict, {"schema", "data"}, "data"), + ], + ) + def test_dataset_to_json_orient(self, orient, container, keys, len_at, dataset): + with io.BytesIO() as buffer: + JsonDatasetWriter(dataset, buffer, lines=False, orient=orient).write() + buffer.seek(0) + exported_content = load_json(buffer) + assert isinstance(exported_content, container) + if keys: + if container is dict: + assert exported_content.keys() == keys + else: + assert exported_content[0].keys() == keys + else: + assert not hasattr(exported_content, "keys") and not hasattr(exported_content[0], "keys") + if len_at: + assert len(exported_content[len_at]) == 10 + else: + assert len(exported_content) == 10 + + @pytest.mark.parametrize("lines, load_json_function", [(True, load_json_lines), (False, load_json)]) + def test_dataset_to_json_lines_multiproc(self, lines, load_json_function, dataset): + with io.BytesIO() as buffer: + JsonDatasetWriter(dataset, buffer, lines=lines, num_proc=2).write() + buffer.seek(0) + exported_content = load_json_function(buffer) + assert isinstance(exported_content, list) + assert isinstance(exported_content[0], dict) + assert len(exported_content) == 10 + + @pytest.mark.parametrize( + "orient, container, keys, len_at", + [ + ("records", list, {"tokens", "labels", "answers", "id"}, None), + ("split", dict, {"columns", "data"}, "data"), + ("index", dict, set("0123456789"), None), + ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), + ("values", list, None, None), + ("table", dict, {"schema", "data"}, "data"), + ], + ) + def test_dataset_to_json_orient_multiproc(self, orient, container, keys, len_at, dataset): + with io.BytesIO() as buffer: + JsonDatasetWriter(dataset, buffer, lines=False, orient=orient, num_proc=2).write() + buffer.seek(0) + exported_content = load_json(buffer) + assert isinstance(exported_content, container) + if keys: + if container is dict: + assert exported_content.keys() == keys + else: + assert exported_content[0].keys() == keys + else: + assert not hasattr(exported_content, "keys") and not hasattr(exported_content[0], "keys") + if len_at: + assert len(exported_content[len_at]) == 10 + else: + assert len(exported_content) == 10 + + def test_dataset_to_json_orient_invalidproc(self, dataset): + with pytest.raises(ValueError): + with io.BytesIO() as buffer: + JsonDatasetWriter(dataset, buffer, num_proc=0) + + @pytest.mark.parametrize("compression, extension", [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")]) + def test_dataset_to_json_compression(self, shared_datadir, tmp_path_factory, extension, compression, dataset): + path = tmp_path_factory.mktemp("data") / f"test.json.{extension}" + original_path = str(shared_datadir / f"test_file.json.{extension}") + JsonDatasetWriter(dataset, path, compression=compression).write() + + with fsspec.open(path, "rb", compression="infer") as f: + exported_content = f.read() + with fsspec.open(original_path, "rb", compression="infer") as f: + original_content = f.read() + assert exported_content == original_content diff --git a/testbed/huggingface__datasets/tests/io/test_parquet.py b/testbed/huggingface__datasets/tests/io/test_parquet.py new file mode 100644 index 0000000000000000000000000000000000000000..bf17bc142ff8e79f7f5bf3389fd2fb3994163098 --- /dev/null +++ b/testbed/huggingface__datasets/tests/io/test_parquet.py @@ -0,0 +1,196 @@ +import pyarrow.parquet as pq +import pytest + +from datasets import Audio, Dataset, DatasetDict, Features, IterableDatasetDict, NamedSplit, Sequence, Value, config +from datasets.features.image import Image +from datasets.info import DatasetInfo +from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size + +from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases + + +def _check_parquet_dataset(dataset, expected_features): + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_dataset_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = ParquetDatasetReader(parquet_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() + _check_parquet_dataset(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_dataset_from_parquet_features(features, parquet_path, tmp_path): + cache_dir = tmp_path / "cache" + default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = ParquetDatasetReader(parquet_path, features=features, cache_dir=cache_dir).read() + _check_parquet_dataset(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_dataset_from_parquet_split(split, parquet_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + dataset = ParquetDatasetReader(parquet_path, cache_dir=cache_dir, split=split).read() + _check_parquet_dataset(dataset, expected_features) + assert dataset.split == split if split else "train" + + +@pytest.mark.parametrize("path_type", [str, list]) +def test_dataset_from_parquet_path_type(path_type, parquet_path, tmp_path): + if issubclass(path_type, str): + path = parquet_path + elif issubclass(path_type, list): + path = [parquet_path] + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + dataset = ParquetDatasetReader(path, cache_dir=cache_dir).read() + _check_parquet_dataset(dataset, expected_features) + + +def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)): + assert isinstance(dataset_dict, (DatasetDict, IterableDatasetDict)) + for split in splits: + dataset = dataset_dict[split] + assert len(list(dataset)) == 4 + assert dataset.features is not None + assert set(dataset.features) == set(expected_features) + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_parquet_datasetdict_reader_keep_in_memory(keep_in_memory, parquet_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = ParquetDatasetReader( + {"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory + ).read() + _check_parquet_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize("streaming", [False, True]) +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_parquet_datasetdict_reader_features(streaming, features, parquet_path, tmp_path): + cache_dir = tmp_path / "cache" + default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = ParquetDatasetReader( + {"train": parquet_path}, features=features, cache_dir=cache_dir, streaming=streaming + ).read() + _check_parquet_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize("streaming", [False, True]) +@pytest.mark.parametrize("columns", [None, ["col_1"]]) +@pytest.mark.parametrize("pass_features", [False, True]) +@pytest.mark.parametrize("pass_info", [False, True]) +def test_parquet_datasetdict_reader_columns(streaming, columns, pass_features, pass_info, parquet_path, tmp_path): + cache_dir = tmp_path / "cache" + + default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + info = ( + DatasetInfo(features=Features({feature: Value(dtype) for feature, dtype in default_expected_features.items()})) + if pass_info + else None + ) + + expected_features = ( + {col: default_expected_features[col] for col in columns} if columns else default_expected_features + ) + features = ( + Features({feature: Value(dtype) for feature, dtype in expected_features.items()}) if pass_features else None + ) + + dataset = ParquetDatasetReader( + {"train": parquet_path}, + columns=columns, + features=features, + info=info, + cache_dir=cache_dir, + streaming=streaming, + ).read() + _check_parquet_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_parquet_datasetdict_reader_split(split, parquet_path, tmp_path): + if split: + path = {split: parquet_path} + else: + split = "train" + path = {"train": parquet_path, "test": parquet_path} + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + dataset = ParquetDatasetReader(path, cache_dir=cache_dir).read() + _check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys())) + assert all(dataset[split].split == split for split in path.keys()) + + +def test_parquet_write(dataset, tmp_path): + writer = ParquetDatasetWriter(dataset, tmp_path / "foo.parquet") + assert writer.write() > 0 + pf = pq.ParquetFile(tmp_path / "foo.parquet") + output_table = pf.read() + assert dataset.data.table == output_table + + +def test_dataset_to_parquet_keeps_features(shared_datadir, tmp_path): + image_path = str(shared_datadir / "test_image_rgb.jpg") + data = {"image": [image_path]} + features = Features({"image": Image()}) + dataset = Dataset.from_dict(data, features=features) + writer = ParquetDatasetWriter(dataset, tmp_path / "foo.parquet") + assert writer.write() > 0 + + reloaded_dataset = Dataset.from_parquet(str(tmp_path / "foo.parquet")) + assert dataset.features == reloaded_dataset.features + + reloaded_iterable_dataset = ParquetDatasetReader(str(tmp_path / "foo.parquet"), streaming=True).read() + assert dataset.features == reloaded_iterable_dataset.features + + +@pytest.mark.parametrize( + "feature, expected", + [ + (Features({"foo": Value("int32")}), None), + (Features({"image": Image(), "foo": Value("int32")}), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), + (Features({"nested": Sequence(Audio())}), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), + ], +) +def test_get_writer_batch_size(feature, expected): + assert get_writer_batch_size(feature) == expected diff --git a/testbed/huggingface__datasets/tests/io/test_sql.py b/testbed/huggingface__datasets/tests/io/test_sql.py new file mode 100644 index 0000000000000000000000000000000000000000..5adda22033f9c6a08d2ad5dbe25874d0c9f5869d --- /dev/null +++ b/testbed/huggingface__datasets/tests/io/test_sql.py @@ -0,0 +1,98 @@ +import contextlib +import os +import sqlite3 + +import pytest + +from datasets import Dataset, Features, Value +from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter + +from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy + + +def _check_sql_dataset(dataset, expected_features): + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@require_sqlalchemy +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_dataset_from_sql_keep_in_memory(keep_in_memory, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = SqlDatasetReader( + "dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory + ).read() + _check_sql_dataset(dataset, expected_features) + + +@require_sqlalchemy +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_dataset_from_sql_features(features, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): + cache_dir = tmp_path / "cache" + default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, features=features, cache_dir=cache_dir).read() + _check_sql_dataset(dataset, expected_features) + + +def iter_sql_file(sqlite_path): + with contextlib.closing(sqlite3.connect(sqlite_path)) as con: + cur = con.cursor() + cur.execute("SELECT * FROM dataset") + for row in cur: + yield row + + +@require_sqlalchemy +def test_dataset_to_sql(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): + cache_dir = tmp_path / "cache" + output_sqlite_path = os.path.join(cache_dir, "tmp.sql") + dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read() + SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=1).write() + + original_sql = iter_sql_file(sqlite_path) + expected_sql = iter_sql_file(output_sqlite_path) + + for row1, row2 in zip(original_sql, expected_sql): + assert row1 == row2 + + +@require_sqlalchemy +def test_dataset_to_sql_multiproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): + cache_dir = tmp_path / "cache" + output_sqlite_path = os.path.join(cache_dir, "tmp.sql") + dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read() + SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=2).write() + + original_sql = iter_sql_file(sqlite_path) + expected_sql = iter_sql_file(output_sqlite_path) + + for row1, row2 in zip(original_sql, expected_sql): + assert row1 == row2 + + +@require_sqlalchemy +def test_dataset_to_sql_invalidproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): + cache_dir = tmp_path / "cache" + output_sqlite_path = os.path.join(cache_dir, "tmp.sql") + dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read() + with pytest.raises(ValueError): + SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=0).write() diff --git a/testbed/huggingface__datasets/tests/io/test_text.py b/testbed/huggingface__datasets/tests/io/test_text.py new file mode 100644 index 0000000000000000000000000000000000000000..496d42dcf6f47cf896c46255d89a38e6af076419 --- /dev/null +++ b/testbed/huggingface__datasets/tests/io/test_text.py @@ -0,0 +1,120 @@ +import pytest + +from datasets import Dataset, DatasetDict, Features, NamedSplit, Value +from datasets.io.text import TextDatasetReader + +from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases + + +def _check_text_dataset(dataset, expected_features): + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 4 + assert dataset.num_columns == 1 + assert dataset.column_names == ["text"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_dataset_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"text": "string"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = TextDatasetReader(text_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() + _check_text_dataset(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"text": "string"}, + {"text": "int32"}, + {"text": "float32"}, + ], +) +def test_dataset_from_text_features(features, text_path, tmp_path): + cache_dir = tmp_path / "cache" + default_expected_features = {"text": "string"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = TextDatasetReader(text_path, features=features, cache_dir=cache_dir).read() + _check_text_dataset(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_dataset_from_text_split(split, text_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"text": "string"} + dataset = TextDatasetReader(text_path, cache_dir=cache_dir, split=split).read() + _check_text_dataset(dataset, expected_features) + assert dataset.split == split if split else "train" + + +@pytest.mark.parametrize("path_type", [str, list]) +def test_dataset_from_text_path_type(path_type, text_path, tmp_path): + if issubclass(path_type, str): + path = text_path + elif issubclass(path_type, list): + path = [text_path] + cache_dir = tmp_path / "cache" + expected_features = {"text": "string"} + dataset = TextDatasetReader(path, cache_dir=cache_dir).read() + _check_text_dataset(dataset, expected_features) + + +def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)): + assert isinstance(dataset_dict, DatasetDict) + for split in splits: + dataset = dataset_dict[split] + assert dataset.num_rows == 4 + assert dataset.num_columns == 1 + assert dataset.column_names == ["text"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"text": "string"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = TextDatasetReader({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read() + _check_text_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"text": "string"}, + {"text": "int32"}, + {"text": "float32"}, + ], +) +def test_datasetdict_from_text_features(features, text_path, tmp_path): + cache_dir = tmp_path / "cache" + # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" + default_expected_features = {"text": "string"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = TextDatasetReader({"train": text_path}, features=features, cache_dir=cache_dir).read() + _check_text_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_datasetdict_from_text_split(split, text_path, tmp_path): + if split: + path = {split: text_path} + else: + split = "train" + path = {"train": text_path, "test": text_path} + cache_dir = tmp_path / "cache" + expected_features = {"text": "string"} + dataset = TextDatasetReader(path, cache_dir=cache_dir).read() + _check_text_datasetdict(dataset, expected_features, splits=list(path.keys())) + assert all(dataset[split].split == split for split in path.keys()) diff --git a/testbed/huggingface__datasets/tests/packaged_modules/__init__.py b/testbed/huggingface__datasets/tests/packaged_modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/huggingface__datasets/tests/packaged_modules/test_audiofolder.py b/testbed/huggingface__datasets/tests/packaged_modules/test_audiofolder.py new file mode 100644 index 0000000000000000000000000000000000000000..712e6aeac4f4b35a9b9236092d50e2725c0231ee --- /dev/null +++ b/testbed/huggingface__datasets/tests/packaged_modules/test_audiofolder.py @@ -0,0 +1,503 @@ +import shutil +import textwrap + +import librosa +import numpy as np +import pytest +import soundfile as sf + +from datasets import Audio, ClassLabel, Features, Value +from datasets.data_files import DataFilesDict, get_data_patterns +from datasets.download.streaming_download_manager import StreamingDownloadManager +from datasets.packaged_modules.audiofolder.audiofolder import AudioFolder + +from ..utils import require_sndfile + + +@pytest.fixture +def cache_dir(tmp_path): + return str(tmp_path / "audiofolder_cache_dir") + + +@pytest.fixture +def data_files_with_labels_no_metadata(tmp_path, audio_file): + data_dir = tmp_path / "data_files_with_labels_no_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + subdir_class_0 = data_dir / "fr" + subdir_class_0.mkdir(parents=True, exist_ok=True) + subdir_class_1 = data_dir / "uk" + subdir_class_1.mkdir(parents=True, exist_ok=True) + + audio_filename = subdir_class_0 / "audio_fr.wav" + shutil.copyfile(audio_file, audio_filename) + audio_filename2 = subdir_class_1 / "audio_uk.wav" + shutil.copyfile(audio_file, audio_filename2) + + data_files_with_labels_no_metadata = DataFilesDict.from_patterns( + get_data_patterns(str(data_dir)), data_dir.as_posix() + ) + + return data_files_with_labels_no_metadata + + +@pytest.fixture +def audio_files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, audio_file): + data_dir = tmp_path / "audio_files_with_labels_and_label_key_in_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + subdir_class_0 = data_dir / "fr" + subdir_class_0.mkdir(parents=True, exist_ok=True) + subdir_class_1 = data_dir / "uk" + subdir_class_1.mkdir(parents=True, exist_ok=True) + + audio_filename = subdir_class_0 / "audio_fr.wav" + shutil.copyfile(audio_file, audio_filename) + audio_filename2 = subdir_class_1 / "audio_uk.wav" + shutil.copyfile(audio_file, audio_filename2) + + audio_metadata_filename = tmp_path / data_dir / "metadata.jsonl" + audio_metadata = textwrap.dedent( + """\ + {"file_name": "fr/audio_fr.wav", "text": "Audio in French", "label": "Fr"} + {"file_name": "uk/audio_uk.wav", "text": "Audio in Ukrainian", "label": "Uk"} + """ + ) + with open(audio_metadata_filename, "w", encoding="utf-8") as f: + f.write(audio_metadata) + + return str(audio_filename), str(audio_filename2), str(audio_metadata_filename) + + +@pytest.fixture +def audio_file_with_metadata(tmp_path, audio_file): + audio_filename = tmp_path / "audio_file.wav" + shutil.copyfile(audio_file, audio_filename) + audio_metadata_filename = tmp_path / "metadata.jsonl" + audio_metadata = textwrap.dedent( + """\ + {"file_name": "audio_file.wav", "text": "Audio transcription"} + """ + ) + with open(audio_metadata_filename, "w", encoding="utf-8") as f: + f.write(audio_metadata) + return str(audio_filename), str(audio_metadata_filename) + + +@pytest.fixture +def audio_files_with_metadata_that_misses_one_audio(tmp_path, audio_file): + audio_filename = tmp_path / "audio_file.wav" + shutil.copyfile(audio_file, audio_filename) + audio_filename2 = tmp_path / "audio_file2.wav" + shutil.copyfile(audio_file, audio_filename2) + audio_metadata_filename = tmp_path / "metadata.jsonl" + audio_metadata = textwrap.dedent( + """\ + {"file_name": "audio_file.wav", "text": "Audio transcription"} + """ + ) + with open(audio_metadata_filename, "w", encoding="utf-8") as f: + f.write(audio_metadata) + return str(audio_filename), str(audio_filename2), str(audio_metadata_filename) + + +@pytest.fixture +def data_files_with_one_split_and_metadata(tmp_path, audio_file): + data_dir = tmp_path / "audiofolder_data_dir_with_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + subdir = data_dir / "subdir" + subdir.mkdir(parents=True, exist_ok=True) + + audio_filename = data_dir / "audio_file.wav" + shutil.copyfile(audio_file, audio_filename) + audio_filename2 = data_dir / "audio_file2.wav" + shutil.copyfile(audio_file, audio_filename2) + audio_filename3 = subdir / "audio_file3.wav" # in subdir + shutil.copyfile(audio_file, audio_filename3) + + audio_metadata_filename = data_dir / "metadata.jsonl" + audio_metadata = textwrap.dedent( + """\ + {"file_name": "audio_file.wav", "text": "First audio transcription"} + {"file_name": "audio_file2.wav", "text": "Second audio transcription"} + {"file_name": "subdir/audio_file3.wav", "text": "Third audio transcription (in subdir)"} + """ + ) + with open(audio_metadata_filename, "w", encoding="utf-8") as f: + f.write(audio_metadata) + data_files_with_one_split_and_metadata = DataFilesDict.from_patterns( + get_data_patterns(str(data_dir)), data_dir.as_posix() + ) + assert len(data_files_with_one_split_and_metadata) == 1 + assert len(data_files_with_one_split_and_metadata["train"]) == 4 + return data_files_with_one_split_and_metadata + + +@pytest.fixture(params=["jsonl", "csv"]) +def data_files_with_two_splits_and_metadata(request, tmp_path, audio_file): + data_dir = tmp_path / "audiofolder_data_dir_with_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + train_dir = data_dir / "train" + train_dir.mkdir(parents=True, exist_ok=True) + test_dir = data_dir / "test" + test_dir.mkdir(parents=True, exist_ok=True) + + audio_filename = train_dir / "audio_file.wav" # train audio + shutil.copyfile(audio_file, audio_filename) + audio_filename2 = train_dir / "audio_file2.wav" # train audio + shutil.copyfile(audio_file, audio_filename2) + audio_filename3 = test_dir / "audio_file3.wav" # test audio + shutil.copyfile(audio_file, audio_filename3) + + train_audio_metadata_filename = train_dir / f"metadata.{request.param}" + audio_metadata = ( + textwrap.dedent( + """\ + {"file_name": "audio_file.wav", "text": "First train audio transcription"} + {"file_name": "audio_file2.wav", "text": "Second train audio transcription"} + """ + ) + if request.param == "jsonl" + else textwrap.dedent( + """\ + file_name,text + audio_file.wav,First train audio transcription + audio_file2.wav,Second train audio transcription + """ + ) + ) + with open(train_audio_metadata_filename, "w", encoding="utf-8") as f: + f.write(audio_metadata) + test_audio_metadata_filename = test_dir / f"metadata.{request.param}" + audio_metadata = ( + textwrap.dedent( + """\ + {"file_name": "audio_file3.wav", "text": "Test audio transcription"} + """ + ) + if request.param == "jsonl" + else textwrap.dedent( + """\ + file_name,text + audio_file3.wav,Test audio transcription + """ + ) + ) + with open(test_audio_metadata_filename, "w", encoding="utf-8") as f: + f.write(audio_metadata) + data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns( + get_data_patterns(str(data_dir)), data_dir.as_posix() + ) + assert len(data_files_with_two_splits_and_metadata) == 2 + assert len(data_files_with_two_splits_and_metadata["train"]) == 3 + assert len(data_files_with_two_splits_and_metadata["test"]) == 2 + return data_files_with_two_splits_and_metadata + + +@pytest.fixture +def data_files_with_zip_archives(tmp_path, audio_file): + data_dir = tmp_path / "audiofolder_data_dir_with_zip_archives" + data_dir.mkdir(parents=True, exist_ok=True) + archive_dir = data_dir / "archive" + archive_dir.mkdir(parents=True, exist_ok=True) + subdir = archive_dir / "subdir" + subdir.mkdir(parents=True, exist_ok=True) + + audio_filename = archive_dir / "audio_file.wav" + shutil.copyfile(audio_file, audio_filename) + audio_filename2 = subdir / "audio_file2.wav" # in subdir + # make sure they're two different audios + # Indeed we won't be able to compare the audio filenames, since the archive is not extracted in streaming mode + array, sampling_rate = librosa.load(str(audio_filename), sr=16000) # original sampling rate is 44100 + sf.write(str(audio_filename2), array, samplerate=16000) + + audio_metadata_filename = archive_dir / "metadata.jsonl" + audio_metadata = textwrap.dedent( + """\ + {"file_name": "audio_file.wav", "text": "First audio transcription"} + {"file_name": "subdir/audio_file2.wav", "text": "Second audio transcription (in subdir)"} + """ + ) + + with open(audio_metadata_filename, "w", encoding="utf-8") as f: + f.write(audio_metadata) + + shutil.make_archive(str(archive_dir), "zip", archive_dir) + shutil.rmtree(str(archive_dir)) + + data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) + + assert len(data_files_with_zip_archives) == 1 + assert len(data_files_with_zip_archives["train"]) == 1 + return data_files_with_zip_archives + + +@require_sndfile +# check that labels are inferred correctly from dir names +def test_generate_examples_with_labels(data_files_with_labels_no_metadata, cache_dir): + # there are no metadata.jsonl files in this test case + audiofolder = AudioFolder(data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False) + audiofolder.download_and_prepare() + assert audiofolder.info.features == Features({"audio": Audio(), "label": ClassLabel(names=["fr", "uk"])}) + dataset = list(audiofolder.as_dataset()["train"]) + label_feature = audiofolder.info.features["label"] + + assert dataset[0]["label"] == label_feature._str2int["fr"] + assert dataset[1]["label"] == label_feature._str2int["uk"] + + +@require_sndfile +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +@pytest.mark.parametrize("drop_labels", [None, True, False]) +def test_generate_examples_duplicated_label_key( + audio_files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog +): + fr_audio_file, uk_audio_file, audio_metadata_file = audio_files_with_labels_and_duplicated_label_key_in_metadata + audiofolder = AudioFolder( + drop_metadata=drop_metadata, + drop_labels=drop_labels, + data_files=[fr_audio_file, uk_audio_file, audio_metadata_file], + cache_dir=cache_dir, + ) + if drop_labels is False: + # infer labels from directories even if metadata files are found + audiofolder.download_and_prepare() + warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records) + assert warning_in_logs if drop_metadata is not True else not warning_in_logs + dataset = audiofolder.as_dataset()["train"] + assert audiofolder.info.features["label"] == ClassLabel(names=["fr", "uk"]) + assert all(example["label"] in audiofolder.info.features["label"]._str2int.values() for example in dataset) + else: + audiofolder.download_and_prepare() + dataset = audiofolder.as_dataset()["train"] + if drop_metadata is not True: + # labels are from metadata + assert audiofolder.info.features["label"] == Value("string") + assert all(example["label"] in ["Fr", "Uk"] for example in dataset) + else: + # drop both labels and metadata + assert audiofolder.info.features == Features({"audio": Audio()}) + assert all(example.keys() == {"audio"} for example in dataset) + + +@require_sndfile +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +@pytest.mark.parametrize("drop_labels", [None, True, False]) +def test_generate_examples_drop_labels(data_files_with_labels_no_metadata, drop_metadata, drop_labels): + audiofolder = AudioFolder( + drop_metadata=drop_metadata, drop_labels=drop_labels, data_files=data_files_with_labels_no_metadata + ) + gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + # removing the labels explicitly requires drop_labels=True + assert gen_kwargs["add_labels"] is not bool(drop_labels) + assert gen_kwargs["add_metadata"] is False # metadata files is not present in this case + generator = audiofolder._generate_examples(**gen_kwargs) + if not drop_labels: + assert all( + example.keys() == {"audio", "label"} and all(val is not None for val in example.values()) + for _, example in generator + ) + else: + assert all( + example.keys() == {"audio"} and all(val is not None for val in example.values()) + for _, example in generator + ) + + +@require_sndfile +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +@pytest.mark.parametrize("drop_labels", [None, True, False]) +def test_generate_examples_drop_metadata(audio_file_with_metadata, drop_metadata, drop_labels): + audio_file, audio_metadata_file = audio_file_with_metadata + audiofolder = AudioFolder( + drop_metadata=drop_metadata, drop_labels=drop_labels, data_files={"train": [audio_file, audio_metadata_file]} + ) + gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + # since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True + assert gen_kwargs["add_metadata"] is not bool(drop_metadata) + # since the dataset has metadata, adding the labels explicitly requires drop_labels=False + assert gen_kwargs["add_labels"] is (drop_labels is False) + generator = audiofolder._generate_examples(**gen_kwargs) + expected_columns = {"audio"} + if gen_kwargs["add_metadata"]: + expected_columns.add("text") + if gen_kwargs["add_labels"]: + expected_columns.add("label") + result = [example for _, example in generator] + assert len(result) == 1 + example = result[0] + assert example.keys() == expected_columns + for column in expected_columns: + assert example[column] is not None + + +@require_sndfile +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +def test_generate_examples_with_metadata_in_wrong_location(audio_file, audio_file_with_metadata, drop_metadata): + _, audio_metadata_file = audio_file_with_metadata + audiofolder = AudioFolder(drop_metadata=drop_metadata, data_files={"train": [audio_file, audio_metadata_file]}) + gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + generator = audiofolder._generate_examples(**gen_kwargs) + if not drop_metadata: + with pytest.raises(ValueError): + list(generator) + else: + assert all( + example.keys() == {"audio"} and all(val is not None for val in example.values()) + for _, example in generator + ) + + +@require_sndfile +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +def test_generate_examples_with_metadata_that_misses_one_audio( + audio_files_with_metadata_that_misses_one_audio, drop_metadata +): + audio_file, audio_file2, audio_metadata_file = audio_files_with_metadata_that_misses_one_audio + if not drop_metadata: + features = Features({"audio": Audio(), "text": Value("string")}) + else: + features = Features({"audio": Audio()}) + audiofolder = AudioFolder( + drop_metadata=drop_metadata, + features=features, + data_files={"train": [audio_file, audio_file2, audio_metadata_file]}, + ) + gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + generator = audiofolder._generate_examples(**gen_kwargs) + if not drop_metadata: + with pytest.raises(ValueError): + _ = list(generator) + else: + assert all( + example.keys() == {"audio"} and all(val is not None for val in example.values()) + for _, example in generator + ) + + +@require_sndfile +@pytest.mark.parametrize("streaming", [False, True]) +def test_data_files_with_metadata_and_single_split(streaming, cache_dir, data_files_with_one_split_and_metadata): + data_files = data_files_with_one_split_and_metadata + audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir) + audiofolder.download_and_prepare() + datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset() + for split, data_files in data_files.items(): + expected_num_of_audios = len(data_files) - 1 # don't count the metadata file + assert split in datasets + dataset = list(datasets[split]) + assert len(dataset) == expected_num_of_audios + # make sure each sample has its own audio and metadata + assert len({example["audio"]["path"] for example in dataset}) == expected_num_of_audios + assert len({example["text"] for example in dataset}) == expected_num_of_audios + assert all(example["text"] is not None for example in dataset) + + +@require_sndfile +@pytest.mark.parametrize("streaming", [False, True]) +def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata): + data_files = data_files_with_two_splits_and_metadata + audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir) + audiofolder.download_and_prepare() + datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset() + for split, data_files in data_files.items(): + expected_num_of_audios = len(data_files) - 1 # don't count the metadata file + assert split in datasets + dataset = list(datasets[split]) + assert len(dataset) == expected_num_of_audios + # make sure each sample has its own audio and metadata + assert len({example["audio"]["path"] for example in dataset}) == expected_num_of_audios + assert len({example["text"] for example in dataset}) == expected_num_of_audios + assert all(example["text"] is not None for example in dataset) + + +@require_sndfile +@pytest.mark.parametrize("streaming", [False, True]) +def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives): + audiofolder = AudioFolder(data_files=data_files_with_zip_archives, cache_dir=cache_dir) + audiofolder.download_and_prepare() + datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset() + for split, data_files in data_files_with_zip_archives.items(): + num_of_archives = len(data_files) # the metadata file is inside the archive + expected_num_of_audios = 2 * num_of_archives + assert split in datasets + dataset = list(datasets[split]) + assert len(dataset) == expected_num_of_audios + # make sure each sample has its own audio (all arrays are different) and metadata + assert ( + sum(np.array_equal(dataset[0]["audio"]["array"], example["audio"]["array"]) for example in dataset[1:]) + == 0 + ) + assert len({example["text"] for example in dataset}) == expected_num_of_audios + assert all(example["text"] is not None for example in dataset) + + +@require_sndfile +def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, audio_file): + data_dir = tmp_path / "data_dir_with_bad_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + shutil.copyfile(audio_file, data_dir / "audio_file.wav") + audio_metadata_filename = data_dir / "bad_metadata.jsonl" # bad file + audio_metadata = textwrap.dedent( + """\ + {"file_name": "audio_file.wav", "text": "Audio transcription"} + """ + ) + with open(audio_metadata_filename, "w", encoding="utf-8") as f: + f.write(audio_metadata) + + data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) + audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) + audiofolder.download_and_prepare() + dataset = audiofolder.as_dataset(split="train") + # check that there are no metadata, since the metadata file name doesn't have the right name + assert "text" not in dataset.column_names + + +@require_sndfile +def test_data_files_with_wrong_audio_file_name_column_in_metadata_file(cache_dir, tmp_path, audio_file): + data_dir = tmp_path / "data_dir_with_bad_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + shutil.copyfile(audio_file, data_dir / "audio_file.wav") + audio_metadata_filename = data_dir / "metadata.jsonl" + audio_metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name" + """\ + {"bad_file_name_column": "audio_file.wav", "text": "Audio transcription"} + """ + ) + with open(audio_metadata_filename, "w", encoding="utf-8") as f: + f.write(audio_metadata) + + data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) + audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) + with pytest.raises(ValueError) as exc_info: + audiofolder.download_and_prepare() + assert "`file_name` must be present" in str(exc_info.value) + + +@require_sndfile +def test_data_files_with_with_metadata_in_different_formats(cache_dir, tmp_path, audio_file): + data_dir = tmp_path / "data_dir_with_metadata_in_different_format" + data_dir.mkdir(parents=True, exist_ok=True) + shutil.copyfile(audio_file, data_dir / "audio_file.wav") + audio_metadata_filename_jsonl = data_dir / "metadata.jsonl" + audio_metadata_jsonl = textwrap.dedent( + """\ + {"file_name": "audio_file.wav", "text": "Audio transcription"} + """ + ) + with open(audio_metadata_filename_jsonl, "w", encoding="utf-8") as f: + f.write(audio_metadata_jsonl) + audio_metadata_filename_csv = data_dir / "metadata.csv" + audio_metadata_csv = textwrap.dedent( + """\ + file_name,text + audio_file.wav,Audio transcription + """ + ) + with open(audio_metadata_filename_csv, "w", encoding="utf-8") as f: + f.write(audio_metadata_csv) + + data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) + audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) + with pytest.raises(ValueError) as exc_info: + audiofolder.download_and_prepare() + assert "metadata files with different extensions" in str(exc_info.value) diff --git a/testbed/huggingface__datasets/tests/packaged_modules/test_folder_based_builder.py b/testbed/huggingface__datasets/tests/packaged_modules/test_folder_based_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..c6aad5ded09f586c6b19958b8336f7b8885767ea --- /dev/null +++ b/testbed/huggingface__datasets/tests/packaged_modules/test_folder_based_builder.py @@ -0,0 +1,530 @@ +import importlib +import shutil +import textwrap + +import pytest + +from datasets import ClassLabel, DownloadManager, Features, Value +from datasets.data_files import DataFilesDict, get_data_patterns +from datasets.download.streaming_download_manager import StreamingDownloadManager +from datasets.packaged_modules.folder_based_builder.folder_based_builder import ( + FolderBasedBuilder, + FolderBasedBuilderConfig, +) +from datasets.tasks import TextClassification + + +remote_files = [ + "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/hallo.txt", + "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/hello.txt", + "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/class1/bonjour.txt", + "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/class1/bonjour2.txt", +] + + +class DummyFolderBasedBuilder(FolderBasedBuilder): + BASE_FEATURE = dict + BASE_COLUMN_NAME = "base" + BUILDER_CONFIG_CLASS = FolderBasedBuilderConfig + EXTENSIONS = [".txt"] + CLASSIFICATION_TASK = TextClassification(text_column="base", label_column="label") + + +@pytest.fixture +def cache_dir(tmp_path): + return str(tmp_path / "autofolder_cache_dir") + + +@pytest.fixture +def auto_text_file(text_file): + return str(text_file) + + +@pytest.fixture +def data_files_with_labels_no_metadata(tmp_path, auto_text_file): + data_dir = tmp_path / "data_files_with_labels_no_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + subdir_class_0 = data_dir / "class0" + subdir_class_0.mkdir(parents=True, exist_ok=True) + subdir_class_1 = data_dir / "class1" + subdir_class_1.mkdir(parents=True, exist_ok=True) + + filename = subdir_class_0 / "file0.txt" + shutil.copyfile(auto_text_file, filename) + filename2 = subdir_class_1 / "file1.txt" + shutil.copyfile(auto_text_file, filename2) + + data_files_with_labels_no_metadata = DataFilesDict.from_patterns( + get_data_patterns(str(data_dir)), data_dir.as_posix() + ) + + return data_files_with_labels_no_metadata + + +@pytest.fixture +def data_files_with_different_levels_no_metadata(tmp_path, auto_text_file): + data_dir = tmp_path / "data_files_with_different_levels" + data_dir.mkdir(parents=True, exist_ok=True) + subdir_class_0 = data_dir / "class0" + subdir_class_0.mkdir(parents=True, exist_ok=True) + subdir_class_1 = data_dir / "subdir" / "class1" + subdir_class_1.mkdir(parents=True, exist_ok=True) + + filename = subdir_class_0 / "file0.txt" + shutil.copyfile(auto_text_file, filename) + filename2 = subdir_class_1 / "file1.txt" + shutil.copyfile(auto_text_file, filename2) + + data_files_with_different_levels = DataFilesDict.from_patterns( + get_data_patterns(str(data_dir)), data_dir.as_posix() + ) + + return data_files_with_different_levels + + +@pytest.fixture +def data_files_with_one_label_no_metadata(tmp_path, auto_text_file): + # only one label found = all files in a single dir/in a root dir + data_dir = tmp_path / "data_files_with_one_label" + data_dir.mkdir(parents=True, exist_ok=True) + + filename = data_dir / "file0.txt" + shutil.copyfile(auto_text_file, filename) + filename2 = data_dir / "file1.txt" + shutil.copyfile(auto_text_file, filename2) + + data_files_with_one_label = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) + + return data_files_with_one_label + + +@pytest.fixture +def files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, auto_text_file): + data_dir = tmp_path / "files_with_labels_and_label_key_in_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + subdir_class_0 = data_dir / "class0" + subdir_class_0.mkdir(parents=True, exist_ok=True) + subdir_class_1 = data_dir / "class1" + subdir_class_1.mkdir(parents=True, exist_ok=True) + + filename = subdir_class_0 / "file_class0.txt" + shutil.copyfile(auto_text_file, filename) + filename2 = subdir_class_1 / "file_class1.txt" + shutil.copyfile(auto_text_file, filename2) + + metadata_filename = tmp_path / data_dir / "metadata.jsonl" + metadata = textwrap.dedent( + """\ + {"file_name": "class0/file_class0.txt", "additional_feature": "First dummy file", "label": "CLASS_0"} + {"file_name": "class1/file_class1.txt", "additional_feature": "Second dummy file", "label": "CLASS_1"} + """ + ) + with open(metadata_filename, "w", encoding="utf-8") as f: + f.write(metadata) + + return str(filename), str(filename2), str(metadata_filename) + + +@pytest.fixture +def file_with_metadata(tmp_path, text_file): + filename = tmp_path / "file.txt" + shutil.copyfile(text_file, filename) + metadata_filename = tmp_path / "metadata.jsonl" + metadata = textwrap.dedent( + """\ + {"file_name": "file.txt", "additional_feature": "Dummy file"} + """ + ) + with open(metadata_filename, "w", encoding="utf-8") as f: + f.write(metadata) + return str(filename), str(metadata_filename) + + +@pytest.fixture() +def files_with_metadata_that_misses_one_sample(tmp_path, auto_text_file): + filename = tmp_path / "file.txt" + shutil.copyfile(auto_text_file, filename) + filename2 = tmp_path / "file2.txt" + shutil.copyfile(auto_text_file, filename2) + metadata_filename = tmp_path / "metadata.jsonl" + metadata = textwrap.dedent( + """\ + {"file_name": "file.txt", "additional_feature": "Dummy file"} + """ + ) + with open(metadata_filename, "w", encoding="utf-8") as f: + f.write(metadata) + return str(filename), str(filename2), str(metadata_filename) + + +@pytest.fixture +def data_files_with_one_split_and_metadata(tmp_path, auto_text_file): + data_dir = tmp_path / "autofolder_data_dir_with_metadata_one_split" + data_dir.mkdir(parents=True, exist_ok=True) + subdir = data_dir / "subdir" + subdir.mkdir(parents=True, exist_ok=True) + + filename = data_dir / "file.txt" + shutil.copyfile(auto_text_file, filename) + filename2 = data_dir / "file2.txt" + shutil.copyfile(auto_text_file, filename2) + filename3 = subdir / "file3.txt" # in subdir + shutil.copyfile(auto_text_file, filename3) + + metadata_filename = data_dir / "metadata.jsonl" + metadata = textwrap.dedent( + """\ + {"file_name": "file.txt", "additional_feature": "Dummy file"} + {"file_name": "file2.txt", "additional_feature": "Second dummy file"} + {"file_name": "./subdir/file3.txt", "additional_feature": "Third dummy file"} + """ + ) + with open(metadata_filename, "w", encoding="utf-8") as f: + f.write(metadata) + data_files_with_one_split_and_metadata = DataFilesDict.from_patterns( + get_data_patterns(str(data_dir)), data_dir.as_posix() + ) + assert len(data_files_with_one_split_and_metadata) == 1 + assert len(data_files_with_one_split_and_metadata["train"]) == 4 + return data_files_with_one_split_and_metadata + + +@pytest.fixture +def data_files_with_two_splits_and_metadata(tmp_path, auto_text_file): + data_dir = tmp_path / "autofolder_data_dir_with_metadata_two_splits" + data_dir.mkdir(parents=True, exist_ok=True) + train_dir = data_dir / "train" + train_dir.mkdir(parents=True, exist_ok=True) + test_dir = data_dir / "test" + test_dir.mkdir(parents=True, exist_ok=True) + + filename = train_dir / "file.txt" # train + shutil.copyfile(auto_text_file, filename) + filename2 = train_dir / "file2.txt" # train + shutil.copyfile(auto_text_file, filename2) + filename3 = test_dir / "file3.txt" # test + shutil.copyfile(auto_text_file, filename3) + + train_metadata_filename = train_dir / "metadata.jsonl" + train_metadata = textwrap.dedent( + """\ + {"file_name": "file.txt", "additional_feature": "Train dummy file"} + {"file_name": "file2.txt", "additional_feature": "Second train dummy file"} + """ + ) + with open(train_metadata_filename, "w", encoding="utf-8") as f: + f.write(train_metadata) + test_metadata_filename = test_dir / "metadata.jsonl" + test_metadata = textwrap.dedent( + """\ + {"file_name": "file3.txt", "additional_feature": "Test dummy file"} + """ + ) + with open(test_metadata_filename, "w", encoding="utf-8") as f: + f.write(test_metadata) + data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns( + get_data_patterns(str(data_dir)), data_dir.as_posix() + ) + assert len(data_files_with_two_splits_and_metadata) == 2 + assert len(data_files_with_two_splits_and_metadata["train"]) == 3 + assert len(data_files_with_two_splits_and_metadata["test"]) == 2 + return data_files_with_two_splits_and_metadata + + +@pytest.fixture +def data_files_with_zip_archives(tmp_path, auto_text_file): + data_dir = tmp_path / "autofolder_data_dir_with_zip_archives" + data_dir.mkdir(parents=True, exist_ok=True) + archive_dir = data_dir / "archive" + archive_dir.mkdir(parents=True, exist_ok=True) + subdir = archive_dir / "subdir" + subdir.mkdir(parents=True, exist_ok=True) + + filename = archive_dir / "file.txt" + shutil.copyfile(auto_text_file, filename) + filename2 = subdir / "file2.txt" # in subdir + shutil.copyfile(auto_text_file, filename2) + + metadata_filename = archive_dir / "metadata.jsonl" + metadata = textwrap.dedent( + """\ + {"file_name": "file.txt", "additional_feature": "Dummy file"} + {"file_name": "subdir/file2.txt", "additional_feature": "Second dummy file"} + """ + ) + with open(metadata_filename, "w", encoding="utf-8") as f: + f.write(metadata) + + shutil.make_archive(archive_dir, "zip", archive_dir) + shutil.rmtree(str(archive_dir)) + + data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) + + assert len(data_files_with_zip_archives) == 1 + assert len(data_files_with_zip_archives["train"]) == 1 + return data_files_with_zip_archives + + +def test_inferring_labels_from_data_dirs(data_files_with_labels_no_metadata, cache_dir): + autofolder = DummyFolderBasedBuilder( + data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False + ) + gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + assert autofolder.info.features == Features({"base": {}, "label": ClassLabel(names=["class0", "class1"])}) + generator = autofolder._generate_examples(**gen_kwargs) + assert all(example["label"] in {"class0", "class1"} for _, example in generator) + + +def test_default_folder_builder_not_usable(data_files_with_labels_no_metadata, cache_dir): + # builder would try to access non-existing attributes of a default `BuilderConfig` class + # as a custom one is not provided + with pytest.raises(AttributeError): + _ = FolderBasedBuilder( + data_files=data_files_with_labels_no_metadata, + cache_dir=cache_dir, + ) + + +# test that AutoFolder is extended for streaming when it's child class is instantiated: +# see line 115 in src/datasets/streaming.py +def test_streaming_patched(): + _ = DummyFolderBasedBuilder() + module = importlib.import_module(FolderBasedBuilder.__module__) + assert hasattr(module, "_patched_for_streaming") + assert module._patched_for_streaming + + +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +@pytest.mark.parametrize("drop_labels", [None, True, False]) +def test_generate_examples_duplicated_label_key( + files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog +): + class0_file, class1_file, metadata_file = files_with_labels_and_duplicated_label_key_in_metadata + autofolder = DummyFolderBasedBuilder( + data_files=[class0_file, class1_file, metadata_file], + cache_dir=cache_dir, + drop_metadata=drop_metadata, + drop_labels=drop_labels, + ) + gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + generator = autofolder._generate_examples(**gen_kwargs) + if drop_labels is False: + # infer labels from directories even if metadata files are found + warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records) + assert warning_in_logs if drop_metadata is not True else not warning_in_logs + assert autofolder.info.features["label"] == ClassLabel(names=["class0", "class1"]) + assert all(example["label"] in ["class0", "class1"] for _, example in generator) + + else: + if drop_metadata is not True: + # labels are from metadata + assert autofolder.info.features["label"] == Value("string") + assert all(example["label"] in ["CLASS_0", "CLASS_1"] for _, example in generator) + else: + # drop both labels and metadata + assert autofolder.info.features == Features({"base": {}}) + assert all(example.keys() == {"base"} for _, example in generator) + + +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +@pytest.mark.parametrize("drop_labels", [None, True, False]) +def test_generate_examples_drop_labels( + data_files_with_labels_no_metadata, auto_text_file, drop_metadata, drop_labels, cache_dir +): + autofolder = DummyFolderBasedBuilder( + data_files=data_files_with_labels_no_metadata, + drop_metadata=drop_metadata, + drop_labels=drop_labels, + cache_dir=cache_dir, + ) + gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + # removing labels explicitly requires drop_labels=True + assert gen_kwargs["add_labels"] is not bool(drop_labels) + assert gen_kwargs["add_metadata"] is False + generator = autofolder._generate_examples(**gen_kwargs) + if not drop_labels: + assert all( + example.keys() == {"base", "label"} and all(val is not None for val in example.values()) + for _, example in generator + ) + else: + assert all( + example.keys() == {"base"} and all(val is not None for val in example.values()) for _, example in generator + ) + + +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +@pytest.mark.parametrize("drop_labels", [None, True, False]) +def test_generate_examples_drop_metadata(file_with_metadata, drop_metadata, drop_labels, cache_dir): + file, metadata_file = file_with_metadata + autofolder = DummyFolderBasedBuilder( + data_files=[file, metadata_file], + drop_metadata=drop_metadata, + drop_labels=drop_labels, + cache_dir=cache_dir, + ) + gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + # since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True + assert gen_kwargs["add_metadata"] is not bool(drop_metadata) + # since the dataset has metadata, adding the labels explicitly requires drop_labels=False + assert gen_kwargs["add_labels"] is (drop_labels is False) + generator = autofolder._generate_examples(**gen_kwargs) + expected_columns = {"base"} + if gen_kwargs["add_metadata"]: + expected_columns.add("additional_feature") + if gen_kwargs["add_labels"]: + expected_columns.add("label") + result = [example for _, example in generator] + assert len(result) == 1 + example = result[0] + assert example.keys() == expected_columns + for column in expected_columns: + assert example[column] is not None + + +@pytest.mark.parametrize("remote", [True, False]) +@pytest.mark.parametrize("drop_labels", [None, True, False]) +def test_data_files_with_different_levels_no_metadata( + data_files_with_different_levels_no_metadata, drop_labels, remote, cache_dir +): + data_files = remote_files if remote else data_files_with_different_levels_no_metadata + autofolder = DummyFolderBasedBuilder( + data_files=data_files, + cache_dir=cache_dir, + drop_labels=drop_labels, + ) + gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + generator = autofolder._generate_examples(**gen_kwargs) + if drop_labels is not False: + # with None (default) we should drop labels if files are on different levels in dir structure + assert "label" not in autofolder.info.features + assert all(example.keys() == {"base"} for _, example in generator) + else: + assert "label" in autofolder.info.features + assert isinstance(autofolder.info.features["label"], ClassLabel) + assert all(example.keys() == {"base", "label"} for _, example in generator) + + +@pytest.mark.parametrize("remote", [False, True]) +@pytest.mark.parametrize("drop_labels", [None, True, False]) +def test_data_files_with_one_label_no_metadata(data_files_with_one_label_no_metadata, drop_labels, remote, cache_dir): + data_files = remote_files[:2] if remote else data_files_with_one_label_no_metadata + autofolder = DummyFolderBasedBuilder( + data_files=data_files, + cache_dir=cache_dir, + drop_labels=drop_labels, + ) + gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + generator = autofolder._generate_examples(**gen_kwargs) + if drop_labels is not False: + # with None (default) we should drop labels if only one label is found (=if there is a single dir) + assert "label" not in autofolder.info.features + assert all(example.keys() == {"base"} for _, example in generator) + else: + assert "label" in autofolder.info.features + assert isinstance(autofolder.info.features["label"], ClassLabel) + assert all(example.keys() == {"base", "label"} for _, example in generator) + + +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +def test_data_files_with_metadata_that_misses_one_sample( + files_with_metadata_that_misses_one_sample, drop_metadata, cache_dir +): + file, file2, metadata_file = files_with_metadata_that_misses_one_sample + if not drop_metadata: + features = Features({"base": None, "additional_feature": Value("string")}) + else: + features = Features({"base": None}) + autofolder = DummyFolderBasedBuilder( + data_files=[file, file2, metadata_file], + drop_metadata=drop_metadata, + features=features, + cache_dir=cache_dir, + ) + gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + generator = autofolder._generate_examples(**gen_kwargs) + if not drop_metadata: + with pytest.raises(ValueError): + list(generator) + else: + assert all( + example.keys() == {"base"} and all(val is not None for val in example.values()) for _, example in generator + ) + + +@pytest.mark.parametrize("streaming", [False, True]) +@pytest.mark.parametrize("n_splits", [1, 2]) +def test_data_files_with_metadata_and_splits( + streaming, cache_dir, n_splits, data_files_with_one_split_and_metadata, data_files_with_two_splits_and_metadata +): + data_files = data_files_with_one_split_and_metadata if n_splits == 1 else data_files_with_two_splits_and_metadata + autofolder = DummyFolderBasedBuilder( + data_files=data_files, + cache_dir=cache_dir, + ) + download_manager = StreamingDownloadManager() if streaming else DownloadManager() + generated_splits = autofolder._split_generators(download_manager) + for (split, files), generated_split in zip(data_files.items(), generated_splits): + assert split == generated_split.name + expected_num_of_examples = len(files) - 1 + generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs)) + assert len(generated_examples) == expected_num_of_examples + assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples + assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples + assert all(example["additional_feature"] is not None for _, example in generated_examples) + + +@pytest.mark.parametrize("streaming", [False, True]) +def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives): + autofolder = DummyFolderBasedBuilder(data_files=data_files_with_zip_archives, cache_dir=cache_dir) + download_manager = StreamingDownloadManager() if streaming else DownloadManager() + generated_splits = autofolder._split_generators(download_manager) + for (split, files), generated_split in zip(data_files_with_zip_archives.items(), generated_splits): + assert split == generated_split.name + num_of_archives = len(files) + expected_num_of_examples = 2 * num_of_archives + generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs)) + assert len(generated_examples) == expected_num_of_examples + assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples + assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples + assert all(example["additional_feature"] is not None for _, example in generated_examples) + + +def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, auto_text_file): + data_dir = tmp_path / "data_dir_with_bad_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + shutil.copyfile(auto_text_file, data_dir / "file.txt") + metadata_filename = data_dir / "bad_metadata.jsonl" # bad file + metadata = textwrap.dedent( + """\ + {"file_name": "file.txt", "additional_feature": "Dummy file"} + """ + ) + with open(metadata_filename, "w", encoding="utf-8") as f: + f.write(metadata) + + data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) + autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) + gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + generator = autofolder._generate_examples(**gen_kwargs) + assert all("additional_feature" not in example for _, example in generator) + + +def test_data_files_with_wrong_file_name_column_in_metadata_file(cache_dir, tmp_path, auto_text_file): + data_dir = tmp_path / "data_dir_with_bad_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + shutil.copyfile(auto_text_file, data_dir / "file.txt") + metadata_filename = data_dir / "metadata.jsonl" + metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name" + """\ + {"bad_file_name": "file.txt", "additional_feature": "Dummy file"} + """ + ) + with open(metadata_filename, "w", encoding="utf-8") as f: + f.write(metadata) + + data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) + autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) + with pytest.raises(ValueError) as exc_info: + _ = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + assert "`file_name` must be present" in str(exc_info.value) diff --git a/testbed/huggingface__datasets/tests/packaged_modules/test_imagefolder.py b/testbed/huggingface__datasets/tests/packaged_modules/test_imagefolder.py new file mode 100644 index 0000000000000000000000000000000000000000..3be9195d6aa975fd2648dd2e2d915c27162ca0e2 --- /dev/null +++ b/testbed/huggingface__datasets/tests/packaged_modules/test_imagefolder.py @@ -0,0 +1,509 @@ +import shutil +import textwrap + +import numpy as np +import pytest + +from datasets import ClassLabel, Features, Image, Value +from datasets.data_files import DataFilesDict, get_data_patterns +from datasets.download.streaming_download_manager import StreamingDownloadManager +from datasets.packaged_modules.imagefolder.imagefolder import ImageFolder + +from ..utils import require_pil + + +@pytest.fixture +def cache_dir(tmp_path): + return str(tmp_path / "imagefolder_cache_dir") + + +@pytest.fixture +def data_files_with_labels_no_metadata(tmp_path, image_file): + data_dir = tmp_path / "data_files_with_labels_no_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + subdir_class_0 = data_dir / "cat" + subdir_class_0.mkdir(parents=True, exist_ok=True) + subdir_class_1 = data_dir / "dog" + subdir_class_1.mkdir(parents=True, exist_ok=True) + + image_filename = subdir_class_0 / "image_cat.jpg" + shutil.copyfile(image_file, image_filename) + image_filename2 = subdir_class_1 / "image_dog.jpg" + shutil.copyfile(image_file, image_filename2) + + data_files_with_labels_no_metadata = DataFilesDict.from_patterns( + get_data_patterns(str(data_dir)), data_dir.as_posix() + ) + + return data_files_with_labels_no_metadata + + +@pytest.fixture +def image_files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, image_file): + data_dir = tmp_path / "image_files_with_labels_and_label_key_in_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + subdir_class_0 = data_dir / "cat" + subdir_class_0.mkdir(parents=True, exist_ok=True) + subdir_class_1 = data_dir / "dog" + subdir_class_1.mkdir(parents=True, exist_ok=True) + + image_filename = subdir_class_0 / "image_cat.jpg" + shutil.copyfile(image_file, image_filename) + image_filename2 = subdir_class_1 / "image_dog.jpg" + shutil.copyfile(image_file, image_filename2) + + image_metadata_filename = tmp_path / data_dir / "metadata.jsonl" + image_metadata = textwrap.dedent( + """\ + {"file_name": "cat/image_cat.jpg", "caption": "Nice image of a cat", "label": "Cat"} + {"file_name": "dog/image_dog.jpg", "caption": "Nice image of a dog", "label": "Dog"} + """ + ) + with open(image_metadata_filename, "w", encoding="utf-8") as f: + f.write(image_metadata) + + return str(image_filename), str(image_filename2), str(image_metadata_filename) + + +@pytest.fixture +def image_file_with_metadata(tmp_path, image_file): + image_filename = tmp_path / "image_rgb.jpg" + shutil.copyfile(image_file, image_filename) + image_metadata_filename = tmp_path / "metadata.jsonl" + image_metadata = textwrap.dedent( + """\ + {"file_name": "image_rgb.jpg", "caption": "Nice image"} + """ + ) + with open(image_metadata_filename, "w", encoding="utf-8") as f: + f.write(image_metadata) + return str(image_filename), str(image_metadata_filename) + + +@pytest.fixture +def image_files_with_metadata_that_misses_one_image(tmp_path, image_file): + image_filename = tmp_path / "image_rgb.jpg" + shutil.copyfile(image_file, image_filename) + image_filename2 = tmp_path / "image_rgb2.jpg" + shutil.copyfile(image_file, image_filename2) + image_metadata_filename = tmp_path / "metadata.jsonl" + image_metadata = textwrap.dedent( + """\ + {"file_name": "image_rgb.jpg", "caption": "Nice image"} + """ + ) + with open(image_metadata_filename, "w", encoding="utf-8") as f: + f.write(image_metadata) + return str(image_filename), str(image_filename2), str(image_metadata_filename) + + +@pytest.fixture(params=["jsonl", "csv"]) +def data_files_with_one_split_and_metadata(request, tmp_path, image_file): + data_dir = tmp_path / "imagefolder_data_dir_with_metadata_one_split" + data_dir.mkdir(parents=True, exist_ok=True) + subdir = data_dir / "subdir" + subdir.mkdir(parents=True, exist_ok=True) + + image_filename = data_dir / "image_rgb.jpg" + shutil.copyfile(image_file, image_filename) + image_filename2 = data_dir / "image_rgb2.jpg" + shutil.copyfile(image_file, image_filename2) + image_filename3 = subdir / "image_rgb3.jpg" # in subdir + shutil.copyfile(image_file, image_filename3) + + image_metadata_filename = data_dir / f"metadata.{request.param}" + image_metadata = ( + textwrap.dedent( + """\ + {"file_name": "image_rgb.jpg", "caption": "Nice image"} + {"file_name": "image_rgb2.jpg", "caption": "Nice second image"} + {"file_name": "subdir/image_rgb3.jpg", "caption": "Nice third image"} + """ + ) + if request.param == "jsonl" + else textwrap.dedent( + """\ + file_name,caption + image_rgb.jpg,Nice image + image_rgb2.jpg,Nice second image + subdir/image_rgb3.jpg,Nice third image + """ + ) + ) + with open(image_metadata_filename, "w", encoding="utf-8") as f: + f.write(image_metadata) + data_files_with_one_split_and_metadata = DataFilesDict.from_patterns( + get_data_patterns(str(data_dir)), data_dir.as_posix() + ) + assert len(data_files_with_one_split_and_metadata) == 1 + assert len(data_files_with_one_split_and_metadata["train"]) == 4 + return data_files_with_one_split_and_metadata + + +@pytest.fixture(params=["jsonl", "csv"]) +def data_files_with_two_splits_and_metadata(request, tmp_path, image_file): + data_dir = tmp_path / "imagefolder_data_dir_with_metadata_two_splits" + data_dir.mkdir(parents=True, exist_ok=True) + train_dir = data_dir / "train" + train_dir.mkdir(parents=True, exist_ok=True) + test_dir = data_dir / "test" + test_dir.mkdir(parents=True, exist_ok=True) + + image_filename = train_dir / "image_rgb.jpg" # train image + shutil.copyfile(image_file, image_filename) + image_filename2 = train_dir / "image_rgb2.jpg" # train image + shutil.copyfile(image_file, image_filename2) + image_filename3 = test_dir / "image_rgb3.jpg" # test image + shutil.copyfile(image_file, image_filename3) + + train_image_metadata_filename = train_dir / f"metadata.{request.param}" + image_metadata = ( + textwrap.dedent( + """\ + {"file_name": "image_rgb.jpg", "caption": "Nice train image"} + {"file_name": "image_rgb2.jpg", "caption": "Nice second train image"} + """ + ) + if request.param == "jsonl" + else textwrap.dedent( + """\ + file_name,caption + image_rgb.jpg,Nice train image + image_rgb2.jpg,Nice second train image + """ + ) + ) + with open(train_image_metadata_filename, "w", encoding="utf-8") as f: + f.write(image_metadata) + test_image_metadata_filename = test_dir / f"metadata.{request.param}" + image_metadata = ( + textwrap.dedent( + """\ + {"file_name": "image_rgb3.jpg", "caption": "Nice test image"} + """ + ) + if request.param == "jsonl" + else textwrap.dedent( + """\ + file_name,caption + image_rgb3.jpg,Nice test image + """ + ) + ) + with open(test_image_metadata_filename, "w", encoding="utf-8") as f: + f.write(image_metadata) + data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns( + get_data_patterns(str(data_dir)), data_dir.as_posix() + ) + assert len(data_files_with_two_splits_and_metadata) == 2 + assert len(data_files_with_two_splits_and_metadata["train"]) == 3 + assert len(data_files_with_two_splits_and_metadata["test"]) == 2 + return data_files_with_two_splits_and_metadata + + +@pytest.fixture +def data_files_with_zip_archives(tmp_path, image_file): + from PIL import Image, ImageOps + + data_dir = tmp_path / "imagefolder_data_dir_with_zip_archives" + data_dir.mkdir(parents=True, exist_ok=True) + archive_dir = data_dir / "archive" + archive_dir.mkdir(parents=True, exist_ok=True) + subdir = archive_dir / "subdir" + subdir.mkdir(parents=True, exist_ok=True) + + image_filename = archive_dir / "image_rgb.jpg" + shutil.copyfile(image_file, image_filename) + image_filename2 = subdir / "image_rgb2.jpg" # in subdir + # make sure they're two different images + # Indeed we won't be able to compare the image.filename, since the archive is not extracted in streaming mode + ImageOps.flip(Image.open(image_file)).save(image_filename2) + + image_metadata_filename = archive_dir / "metadata.jsonl" + image_metadata = textwrap.dedent( + """\ + {"file_name": "image_rgb.jpg", "caption": "Nice image"} + {"file_name": "subdir/image_rgb2.jpg", "caption": "Nice second image"} + """ + ) + with open(image_metadata_filename, "w", encoding="utf-8") as f: + f.write(image_metadata) + + shutil.make_archive(archive_dir, "zip", archive_dir) + shutil.rmtree(str(archive_dir)) + + data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) + + assert len(data_files_with_zip_archives) == 1 + assert len(data_files_with_zip_archives["train"]) == 1 + return data_files_with_zip_archives + + +@require_pil +# check that labels are inferred correctly from dir names +def test_generate_examples_with_labels(data_files_with_labels_no_metadata, cache_dir): + # there are no metadata.jsonl files in this test case + imagefolder = ImageFolder(data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False) + imagefolder.download_and_prepare() + assert imagefolder.info.features == Features({"image": Image(), "label": ClassLabel(names=["cat", "dog"])}) + dataset = list(imagefolder.as_dataset()["train"]) + label_feature = imagefolder.info.features["label"] + + assert dataset[0]["label"] == label_feature._str2int["cat"] + assert dataset[1]["label"] == label_feature._str2int["dog"] + + +@require_pil +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +@pytest.mark.parametrize("drop_labels", [None, True, False]) +def test_generate_examples_duplicated_label_key( + image_files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog +): + cat_image_file, dog_image_file, image_metadata_file = image_files_with_labels_and_duplicated_label_key_in_metadata + imagefolder = ImageFolder( + drop_metadata=drop_metadata, + drop_labels=drop_labels, + data_files=[cat_image_file, dog_image_file, image_metadata_file], + cache_dir=cache_dir, + ) + if drop_labels is False: + # infer labels from directories even if metadata files are found + imagefolder.download_and_prepare() + warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records) + assert warning_in_logs if drop_metadata is not True else not warning_in_logs + dataset = imagefolder.as_dataset()["train"] + assert imagefolder.info.features["label"] == ClassLabel(names=["cat", "dog"]) + assert all(example["label"] in imagefolder.info.features["label"]._str2int.values() for example in dataset) + else: + imagefolder.download_and_prepare() + dataset = imagefolder.as_dataset()["train"] + if drop_metadata is not True: + # labels are from metadata + assert imagefolder.info.features["label"] == Value("string") + assert all(example["label"] in ["Cat", "Dog"] for example in dataset) + else: + # drop both labels and metadata + assert imagefolder.info.features == Features({"image": Image()}) + assert all(example.keys() == {"image"} for example in dataset) + + +@require_pil +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +@pytest.mark.parametrize("drop_labels", [None, True, False]) +def test_generate_examples_drop_labels(data_files_with_labels_no_metadata, drop_metadata, drop_labels): + imagefolder = ImageFolder( + drop_metadata=drop_metadata, drop_labels=drop_labels, data_files=data_files_with_labels_no_metadata + ) + gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + # removing the labels explicitly requires drop_labels=True + assert gen_kwargs["add_labels"] is not bool(drop_labels) + assert gen_kwargs["add_metadata"] is False + generator = imagefolder._generate_examples(**gen_kwargs) + if not drop_labels: + assert all( + example.keys() == {"image", "label"} and all(val is not None for val in example.values()) + for _, example in generator + ) + else: + assert all( + example.keys() == {"image"} and all(val is not None for val in example.values()) + for _, example in generator + ) + + +@require_pil +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +@pytest.mark.parametrize("drop_labels", [None, True, False]) +def test_generate_examples_drop_metadata(image_file_with_metadata, drop_metadata, drop_labels): + image_file, image_metadata_file = image_file_with_metadata + imagefolder = ImageFolder( + drop_metadata=drop_metadata, drop_labels=drop_labels, data_files={"train": [image_file, image_metadata_file]} + ) + gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + # since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True + assert gen_kwargs["add_metadata"] is not bool(drop_metadata) + # since the dataset has metadata, adding the labels explicitly requires drop_labels=False + assert gen_kwargs["add_labels"] is (drop_labels is False) + generator = imagefolder._generate_examples(**gen_kwargs) + expected_columns = {"image"} + if gen_kwargs["add_metadata"]: + expected_columns.add("caption") + if gen_kwargs["add_labels"]: + expected_columns.add("label") + result = [example for _, example in generator] + assert len(result) == 1 + example = result[0] + assert example.keys() == expected_columns + for column in expected_columns: + assert example[column] is not None + + +@require_pil +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +def test_generate_examples_with_metadata_in_wrong_location(image_file, image_file_with_metadata, drop_metadata): + _, image_metadata_file = image_file_with_metadata + imagefolder = ImageFolder(drop_metadata=drop_metadata, data_files={"train": [image_file, image_metadata_file]}) + gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + generator = imagefolder._generate_examples(**gen_kwargs) + if not drop_metadata: + with pytest.raises(ValueError): + list(generator) + else: + assert all( + example.keys() == {"image"} and all(val is not None for val in example.values()) + for _, example in generator + ) + + +@require_pil +@pytest.mark.parametrize("drop_metadata", [None, True, False]) +def test_generate_examples_with_metadata_that_misses_one_image( + image_files_with_metadata_that_misses_one_image, drop_metadata +): + image_file, image_file2, image_metadata_file = image_files_with_metadata_that_misses_one_image + if not drop_metadata: + features = Features({"image": Image(), "caption": Value("string")}) + else: + features = Features({"image": Image()}) + imagefolder = ImageFolder( + drop_metadata=drop_metadata, + features=features, + data_files={"train": [image_file, image_file2, image_metadata_file]}, + ) + gen_kwargs = imagefolder._split_generators(StreamingDownloadManager())[0].gen_kwargs + generator = imagefolder._generate_examples(**gen_kwargs) + if not drop_metadata: + with pytest.raises(ValueError): + list(generator) + else: + assert all( + example.keys() == {"image"} and all(val is not None for val in example.values()) + for _, example in generator + ) + + +@require_pil +@pytest.mark.parametrize("streaming", [False, True]) +def test_data_files_with_metadata_and_single_split(streaming, cache_dir, data_files_with_one_split_and_metadata): + data_files = data_files_with_one_split_and_metadata + imagefolder = ImageFolder(data_files=data_files, cache_dir=cache_dir) + imagefolder.download_and_prepare() + datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset() + for split, data_files in data_files.items(): + expected_num_of_images = len(data_files) - 1 # don't count the metadata file + assert split in datasets + dataset = list(datasets[split]) + assert len(dataset) == expected_num_of_images + # make sure each sample has its own image and metadata + assert len({example["image"].filename for example in dataset}) == expected_num_of_images + assert len({example["caption"] for example in dataset}) == expected_num_of_images + assert all(example["caption"] is not None for example in dataset) + + +@require_pil +@pytest.mark.parametrize("streaming", [False, True]) +def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata): + data_files = data_files_with_two_splits_and_metadata + imagefolder = ImageFolder(data_files=data_files, cache_dir=cache_dir) + imagefolder.download_and_prepare() + datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset() + for split, data_files in data_files.items(): + expected_num_of_images = len(data_files) - 1 # don't count the metadata file + assert split in datasets + dataset = list(datasets[split]) + assert len(dataset) == expected_num_of_images + # make sure each sample has its own image and metadata + assert len({example["image"].filename for example in dataset}) == expected_num_of_images + assert len({example["caption"] for example in dataset}) == expected_num_of_images + assert all(example["caption"] is not None for example in dataset) + + +@require_pil +@pytest.mark.parametrize("streaming", [False, True]) +def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives): + imagefolder = ImageFolder(data_files=data_files_with_zip_archives, cache_dir=cache_dir) + imagefolder.download_and_prepare() + datasets = imagefolder.as_streaming_dataset() if streaming else imagefolder.as_dataset() + for split, data_files in data_files_with_zip_archives.items(): + num_of_archives = len(data_files) # the metadata file is inside the archive + expected_num_of_images = 2 * num_of_archives + assert split in datasets + dataset = list(datasets[split]) + assert len(dataset) == expected_num_of_images + # make sure each sample has its own image and metadata + assert len({np.array(example["image"])[0, 0, 0] for example in dataset}) == expected_num_of_images + assert len({example["caption"] for example in dataset}) == expected_num_of_images + assert all(example["caption"] is not None for example in dataset) + + +@require_pil +def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, image_file): + data_dir = tmp_path / "data_dir_with_bad_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + shutil.copyfile(image_file, data_dir / "image_rgb.jpg") + image_metadata_filename = data_dir / "bad_metadata.jsonl" # bad file + image_metadata = textwrap.dedent( + """\ + {"file_name": "image_rgb.jpg", "caption": "Nice image"} + """ + ) + with open(image_metadata_filename, "w", encoding="utf-8") as f: + f.write(image_metadata) + + data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) + imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) + imagefolder.download_and_prepare() + dataset = imagefolder.as_dataset(split="train") + # check that there are no metadata, since the metadata file name doesn't have the right name + assert "caption" not in dataset.column_names + + +@require_pil +def test_data_files_with_wrong_image_file_name_column_in_metadata_file(cache_dir, tmp_path, image_file): + data_dir = tmp_path / "data_dir_with_bad_metadata" + data_dir.mkdir(parents=True, exist_ok=True) + shutil.copyfile(image_file, data_dir / "image_rgb.jpg") + image_metadata_filename = data_dir / "metadata.jsonl" + image_metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name" + """\ + {"bad_file_name": "image_rgb.jpg", "caption": "Nice image"} + """ + ) + with open(image_metadata_filename, "w", encoding="utf-8") as f: + f.write(image_metadata) + + data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) + imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) + with pytest.raises(ValueError) as exc_info: + imagefolder.download_and_prepare() + assert "`file_name` must be present" in str(exc_info.value) + + +@require_pil +def test_data_files_with_with_metadata_in_different_formats(cache_dir, tmp_path, image_file): + data_dir = tmp_path / "data_dir_with_metadata_in_different_format" + data_dir.mkdir(parents=True, exist_ok=True) + shutil.copyfile(image_file, data_dir / "image_rgb.jpg") + image_metadata_filename_jsonl = data_dir / "metadata.jsonl" + image_metadata_jsonl = textwrap.dedent( + """\ + {"file_name": "image_rgb.jpg", "caption": "Nice image"} + """ + ) + with open(image_metadata_filename_jsonl, "w", encoding="utf-8") as f: + f.write(image_metadata_jsonl) + image_metadata_filename_csv = data_dir / "metadata.csv" + image_metadata_csv = textwrap.dedent( + """\ + file_name,caption + image_rgb.jpg,Nice image + """ + ) + with open(image_metadata_filename_csv, "w", encoding="utf-8") as f: + f.write(image_metadata_csv) + + data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) + imagefolder = ImageFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) + with pytest.raises(ValueError) as exc_info: + imagefolder.download_and_prepare() + assert "metadata files with different extensions" in str(exc_info.value) diff --git a/testbed/huggingface__datasets/tests/packaged_modules/test_json.py b/testbed/huggingface__datasets/tests/packaged_modules/test_json.py new file mode 100644 index 0000000000000000000000000000000000000000..43bd90ac98b3497fc2753042c25a8590c80a860a --- /dev/null +++ b/testbed/huggingface__datasets/tests/packaged_modules/test_json.py @@ -0,0 +1,120 @@ +import textwrap + +import pyarrow as pa +import pytest + +from datasets import Features, Value +from datasets.packaged_modules.json.json import Json + + +@pytest.fixture +def jsonl_file(tmp_path): + filename = tmp_path / "file.jsonl" + data = textwrap.dedent( + """\ + {"col_1": -1} + {"col_1": 1, "col_2": 2} + {"col_1": 10, "col_2": 20} + """ + ) + with open(filename, "w") as f: + f.write(data) + return str(filename) + + +@pytest.fixture +def jsonl_file_utf16_encoded(tmp_path): + filename = tmp_path / "file_utf16_encoded.jsonl" + data = textwrap.dedent( + """\ + {"col_1": -1} + {"col_1": 1, "col_2": 2} + {"col_1": 10, "col_2": 20} + """ + ) + with open(filename, "w", encoding="utf-16") as f: + f.write(data) + return str(filename) + + +@pytest.fixture +def json_file_with_list_of_dicts(tmp_path): + filename = tmp_path / "file_with_list_of_dicts.json" + data = textwrap.dedent( + """\ + [ + {"col_1": -1}, + {"col_1": 1, "col_2": 2}, + {"col_1": 10, "col_2": 20} + ] + """ + ) + with open(filename, "w") as f: + f.write(data) + return str(filename) + + +@pytest.fixture +def json_file_with_list_of_dicts_field(tmp_path): + filename = tmp_path / "file_with_list_of_dicts_field.json" + data = textwrap.dedent( + """\ + { + "field1": 1, + "field2": "aabb", + "field3": [ + {"col_1": -1}, + {"col_1": 1, "col_2": 2}, + {"col_1": 10, "col_2": 20} + ] + } + """ + ) + with open(filename, "w") as f: + f.write(data) + return str(filename) + + +@pytest.mark.parametrize( + "file_fixture, config_kwargs", + [ + ("jsonl_file", {}), + ("jsonl_file_utf16_encoded", {"encoding": "utf-16"}), + ("json_file_with_list_of_dicts", {}), + ("json_file_with_list_of_dicts_field", {"field": "field3"}), + ], +) +def test_json_generate_tables(file_fixture, config_kwargs, request): + json = Json(**config_kwargs) + generator = json._generate_tables([[request.getfixturevalue(file_fixture)]]) + pa_table = pa.concat_tables([table for _, table in generator]) + assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20]} + + +@pytest.mark.parametrize( + "file_fixture, config_kwargs", + [ + ( + "jsonl_file", + {"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})}, + ), + ( + "json_file_with_list_of_dicts", + {"features": Features({"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")})}, + ), + ( + "json_file_with_list_of_dicts_field", + { + "field": "field3", + "features": Features( + {"col_1": Value("int64"), "col_2": Value("int64"), "missing_col": Value("string")} + ), + }, + ), + ], +) +def test_json_generate_tables_with_missing_features(file_fixture, config_kwargs, request): + json = Json(**config_kwargs) + generator = json._generate_tables([[request.getfixturevalue(file_fixture)]]) + pa_table = pa.concat_tables([table for _, table in generator]) + assert pa_table.to_pydict() == {"col_1": [-1, 1, 10], "col_2": [None, 2, 20], "missing_col": [None, None, None]} diff --git a/testbed/huggingface__datasets/tests/packaged_modules/test_spark.py b/testbed/huggingface__datasets/tests/packaged_modules/test_spark.py new file mode 100644 index 0000000000000000000000000000000000000000..cabc61c682f5d0e9fcbae14388cfbcaf899cf97b --- /dev/null +++ b/testbed/huggingface__datasets/tests/packaged_modules/test_spark.py @@ -0,0 +1,118 @@ +from unittest.mock import patch + +import pyspark + +from datasets.packaged_modules.spark.spark import ( + Spark, + SparkExamplesIterable, + _generate_iterable_examples, +) + +from ..utils import ( + require_dill_gt_0_3_2, + require_not_windows, +) + + +def _get_expected_row_ids_and_row_dicts_for_partition_order(df, partition_order): + expected_row_ids_and_row_dicts = [] + for part_id in partition_order: + partition = df.where(f"SPARK_PARTITION_ID() = {part_id}").collect() + for row_idx, row in enumerate(partition): + expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict())) + return expected_row_ids_and_row_dicts + + +@require_not_windows +@require_dill_gt_0_3_2 +def test_repartition_df_if_needed(): + spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() + df = spark.range(100).repartition(1) + spark_builder = Spark(df) + # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means + # that each partition can hold 2 rows. + spark_builder._repartition_df_if_needed(max_shard_size=16) + # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. + assert spark_builder.df.rdd.getNumPartitions() == 50 + + +@require_not_windows +@require_dill_gt_0_3_2 +def test_generate_iterable_examples(): + spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() + df = spark.range(10).repartition(2) + partition_order = [1, 0] + generate_fn = _generate_iterable_examples(df, partition_order) # Reverse the partitions. + expected_row_ids_and_row_dicts = _get_expected_row_ids_and_row_dicts_for_partition_order(df, partition_order) + + for i, (row_id, row_dict) in enumerate(generate_fn()): + expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts[i] + assert row_id == expected_row_id + assert row_dict == expected_row_dict + + +@require_not_windows +@require_dill_gt_0_3_2 +def test_spark_examples_iterable(): + spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() + df = spark.range(10).repartition(1) + it = SparkExamplesIterable(df) + assert it.n_shards == 1 + for i, (row_id, row_dict) in enumerate(it): + assert row_id == f"0_{i}" + assert row_dict == {"id": i} + + +@require_not_windows +@require_dill_gt_0_3_2 +def test_spark_examples_iterable_shuffle(): + spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() + df = spark.range(30).repartition(3) + # Mock the generator so that shuffle reverses the partition indices. + with patch("numpy.random.Generator") as generator_mock: + generator_mock.shuffle.side_effect = lambda x: x.reverse() + expected_row_ids_and_row_dicts = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [2, 1, 0]) + + shuffled_it = SparkExamplesIterable(df).shuffle_data_sources(generator_mock) + assert shuffled_it.n_shards == 3 + for i, (row_id, row_dict) in enumerate(shuffled_it): + expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts[i] + assert row_id == expected_row_id + assert row_dict == expected_row_dict + + +@require_not_windows +@require_dill_gt_0_3_2 +def test_spark_examples_iterable_shard(): + spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() + df = spark.range(20).repartition(4) + + # Partitions 0 and 2 + shard_it_1 = SparkExamplesIterable(df).shard_data_sources(worker_id=0, num_workers=2) + assert shard_it_1.n_shards == 2 + expected_row_ids_and_row_dicts_1 = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [0, 2]) + for i, (row_id, row_dict) in enumerate(shard_it_1): + expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts_1[i] + assert row_id == expected_row_id + assert row_dict == expected_row_dict + + # Partitions 1 and 3 + shard_it_2 = SparkExamplesIterable(df).shard_data_sources(worker_id=1, num_workers=2) + assert shard_it_2.n_shards == 2 + expected_row_ids_and_row_dicts_2 = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [1, 3]) + for i, (row_id, row_dict) in enumerate(shard_it_2): + expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts_2[i] + assert row_id == expected_row_id + assert row_dict == expected_row_dict + + +@require_not_windows +@require_dill_gt_0_3_2 +def test_repartition_df_if_needed_max_num_df_rows(): + spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() + df = spark.range(100).repartition(1) + spark_builder = Spark(df) + # Choose a small max_shard_size for maximum partitioning. + spark_builder._repartition_df_if_needed(max_shard_size=1) + # The new number of partitions should not be greater than the number of rows. + assert spark_builder.df.rdd.getNumPartitions() == 100 diff --git a/testbed/huggingface__datasets/tests/packaged_modules/test_text.py b/testbed/huggingface__datasets/tests/packaged_modules/test_text.py new file mode 100644 index 0000000000000000000000000000000000000000..0d1b3f3b5a4f60437386f6a1f358d196cb6976b0 --- /dev/null +++ b/testbed/huggingface__datasets/tests/packaged_modules/test_text.py @@ -0,0 +1,77 @@ +import textwrap + +import pyarrow as pa +import pytest + +from datasets import Features, Image +from datasets.packaged_modules.text.text import Text + +from ..utils import require_pil + + +@pytest.fixture +def text_file(tmp_path): + filename = tmp_path / "text.txt" + data = textwrap.dedent( + """\ + Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. + Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. + Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. + Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. + + Second paragraph: + Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. + Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. + Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. + Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. + """ + ) + with open(filename, "w", encoding="utf-8") as f: + f.write(data) + return str(filename) + + +@pytest.fixture +def text_file_with_image(tmp_path, image_file): + filename = tmp_path / "text_with_image.txt" + with open(filename, "w", encoding="utf-8") as f: + f.write(image_file) + return str(filename) + + +@pytest.mark.parametrize("keep_linebreaks", [True, False]) +def test_text_linebreaks(text_file, keep_linebreaks): + with open(text_file, encoding="utf-8") as f: + expected_content = f.read().splitlines(keepends=keep_linebreaks) + text = Text(keep_linebreaks=keep_linebreaks, encoding="utf-8") + generator = text._generate_tables([[text_file]]) + generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"] + assert generated_content == expected_content + + +@require_pil +def test_text_cast_image(text_file_with_image): + with open(text_file_with_image, encoding="utf-8") as f: + image_file = f.read().splitlines()[0] + text = Text(encoding="utf-8", features=Features({"image": Image()})) + generator = text._generate_tables([[text_file_with_image]]) + pa_table = pa.concat_tables([table for _, table in generator]) + assert pa_table.schema.field("image").type == Image()() + generated_content = pa_table.to_pydict()["image"] + assert generated_content == [{"path": image_file, "bytes": None}] + + +@pytest.mark.parametrize("sample_by", ["line", "paragraph", "document"]) +def test_text_sample_by(sample_by, text_file): + with open(text_file, encoding="utf-8") as f: + expected_content = f.read() + if sample_by == "line": + expected_content = expected_content.splitlines() + elif sample_by == "paragraph": + expected_content = expected_content.split("\n\n") + elif sample_by == "document": + expected_content = [expected_content] + text = Text(sample_by=sample_by, encoding="utf-8", chunksize=100) + generator = text._generate_tables([[text_file]]) + generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"] + assert generated_content == expected_content diff --git a/testbed/huggingface__datasets/tests/test_arrow_dataset.py b/testbed/huggingface__datasets/tests/test_arrow_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..4d4ecc9802bd9488cf5ebf0f835cad2771d97089 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_arrow_dataset.py @@ -0,0 +1,4695 @@ +import contextlib +import copy +import itertools +import json +import os +import pickle +import re +import sys +import tempfile +from functools import partial +from pathlib import Path +from unittest import TestCase +from unittest.mock import MagicMock, patch + +import numpy as np +import numpy.testing as npt +import pandas as pd +import pyarrow as pa +import pytest +from absl.testing import parameterized +from fsspec.core import strip_protocol +from packaging import version + +import datasets.arrow_dataset +from datasets import concatenate_datasets, interleave_datasets, load_from_disk +from datasets.arrow_dataset import Dataset, transmit_format, update_metadata_with_features +from datasets.dataset_dict import DatasetDict +from datasets.features import ( + Array2D, + Array3D, + Audio, + ClassLabel, + Features, + Image, + Sequence, + Translation, + TranslationVariableLanguages, + Value, +) +from datasets.info import DatasetInfo +from datasets.iterable_dataset import IterableDataset +from datasets.splits import NamedSplit +from datasets.table import ConcatenationTable, InMemoryTable, MemoryMappedTable +from datasets.tasks import ( + AutomaticSpeechRecognition, + LanguageModeling, + QuestionAnsweringExtractive, + Summarization, + TextClassification, +) +from datasets.utils.logging import INFO, get_logger +from datasets.utils.py_utils import temp_seed + +from .utils import ( + assert_arrow_memory_doesnt_increase, + assert_arrow_memory_increases, + require_dill_gt_0_3_2, + require_jax, + require_not_windows, + require_pil, + require_pyspark, + require_sqlalchemy, + require_tf, + require_torch, + require_transformers, + set_current_working_directory_to_temp_dir, +) + + +class PickableMagicMock(MagicMock): + def __reduce__(self): + return MagicMock, () + + +class Unpicklable: + def __getstate__(self): + raise pickle.PicklingError() + + +def picklable_map_function(x): + return {"id": int(x["filename"].split("_")[-1])} + + +def picklable_map_function_with_indices(x, i): + return {"id": i} + + +def picklable_map_function_with_rank(x, r): + return {"rank": r} + + +def picklable_map_function_with_indices_and_rank(x, i, r): + return {"id": i, "rank": r} + + +def picklable_filter_function(x): + return int(x["filename"].split("_")[-1]) < 10 + + +def assert_arrow_metadata_are_synced_with_dataset_features(dataset: Dataset): + assert dataset.data.schema.metadata is not None + assert b"huggingface" in dataset.data.schema.metadata + metadata = json.loads(dataset.data.schema.metadata[b"huggingface"].decode()) + assert "info" in metadata + features = DatasetInfo.from_dict(metadata["info"]).features + assert features is not None + assert features == dataset.features + assert features == Features.from_arrow_schema(dataset.data.schema) + assert list(features) == dataset.data.column_names + assert list(features) == list(dataset.features) + + +IN_MEMORY_PARAMETERS = [ + {"testcase_name": name, "in_memory": im} for im, name in [(True, "in_memory"), (False, "on_disk")] +] + + +@parameterized.named_parameters(IN_MEMORY_PARAMETERS) +class BaseDatasetTest(TestCase): + @pytest.fixture(autouse=True) + def inject_fixtures(self, caplog, set_sqlalchemy_silence_uber_warning): + self._caplog = caplog + + def _create_dummy_dataset( + self, in_memory: bool, tmp_dir: str, multiple_columns=False, array_features=False, nested_features=False + ) -> Dataset: + assert int(multiple_columns) + int(array_features) + int(nested_features) < 2 + if multiple_columns: + data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"], "col_3": [False, True, False, True]} + dset = Dataset.from_dict(data) + elif array_features: + data = { + "col_1": [[[True, False], [False, True]]] * 4, # 2D + "col_2": [[[["a", "b"], ["c", "d"]], [["e", "f"], ["g", "h"]]]] * 4, # 3D array + "col_3": [[3, 2, 1, 0]] * 4, # Sequence + } + features = Features( + { + "col_1": Array2D(shape=(2, 2), dtype="bool"), + "col_2": Array3D(shape=(2, 2, 2), dtype="string"), + "col_3": Sequence(feature=Value("int64")), + } + ) + dset = Dataset.from_dict(data, features=features) + elif nested_features: + data = {"nested": [{"a": i, "x": i * 10, "c": i * 100} for i in range(1, 11)]} + features = Features({"nested": {"a": Value("int64"), "x": Value("int64"), "c": Value("int64")}}) + dset = Dataset.from_dict(data, features=features) + else: + dset = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(x) for x in np.arange(30).tolist()]}) + if not in_memory: + dset = self._to(in_memory, tmp_dir, dset) + return dset + + def _to(self, in_memory, tmp_dir, *datasets): + if in_memory: + datasets = [dataset.map(keep_in_memory=True) for dataset in datasets] + else: + start = 0 + while os.path.isfile(os.path.join(tmp_dir, f"dataset{start}.arrow")): + start += 1 + datasets = [ + dataset.map(cache_file_name=os.path.join(tmp_dir, f"dataset{start + i}.arrow")) + for i, dataset in enumerate(datasets) + ] + return datasets if len(datasets) > 1 else datasets[0] + + def test_dummy_dataset(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertEqual(dset[0]["filename"], "my_name-train_0") + self.assertEqual(dset["filename"][0], "my_name-train_0") + + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + self.assertDictEqual( + dset.features, + Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}), + ) + self.assertEqual(dset[0]["col_1"], 3) + self.assertEqual(dset["col_1"][0], 3) + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: + self.assertDictEqual( + dset.features, + Features( + { + "col_1": Array2D(shape=(2, 2), dtype="bool"), + "col_2": Array3D(shape=(2, 2, 2), dtype="string"), + "col_3": Sequence(feature=Value("int64")), + } + ), + ) + self.assertEqual(dset[0]["col_2"], [[["a", "b"], ["c", "d"]], [["e", "f"], ["g", "h"]]]) + self.assertEqual(dset["col_2"][0], [[["a", "b"], ["c", "d"]], [["e", "f"], ["g", "h"]]]) + + def test_dataset_getitem(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + self.assertEqual(dset[0]["filename"], "my_name-train_0") + self.assertEqual(dset["filename"][0], "my_name-train_0") + + self.assertEqual(dset[-1]["filename"], "my_name-train_29") + self.assertEqual(dset["filename"][-1], "my_name-train_29") + + self.assertListEqual(dset[:2]["filename"], ["my_name-train_0", "my_name-train_1"]) + self.assertListEqual(dset["filename"][:2], ["my_name-train_0", "my_name-train_1"]) + + self.assertEqual(dset[:-1]["filename"][-1], "my_name-train_28") + self.assertEqual(dset["filename"][:-1][-1], "my_name-train_28") + + self.assertListEqual(dset[[0, -1]]["filename"], ["my_name-train_0", "my_name-train_29"]) + self.assertListEqual(dset[range(0, -2, -1)]["filename"], ["my_name-train_0", "my_name-train_29"]) + self.assertListEqual(dset[np.array([0, -1])]["filename"], ["my_name-train_0", "my_name-train_29"]) + self.assertListEqual(dset[pd.Series([0, -1])]["filename"], ["my_name-train_0", "my_name-train_29"]) + + with dset.select(range(2)) as dset_subset: + self.assertListEqual(dset_subset[-1:]["filename"], ["my_name-train_1"]) + self.assertListEqual(dset_subset["filename"][-1:], ["my_name-train_1"]) + + def test_dummy_dataset_deepcopy(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: + with assert_arrow_memory_doesnt_increase(): + dset2 = copy.deepcopy(dset) + # don't copy the underlying arrow data using memory + self.assertEqual(len(dset2), 10) + self.assertDictEqual(dset2.features, Features({"filename": Value("string")})) + self.assertEqual(dset2[0]["filename"], "my_name-train_0") + self.assertEqual(dset2["filename"][0], "my_name-train_0") + del dset2 + + def test_dummy_dataset_pickle(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + tmp_file = os.path.join(tmp_dir, "dset.pt") + + with self._create_dummy_dataset(in_memory, tmp_dir).select(range(0, 10, 2)) as dset: + with open(tmp_file, "wb") as f: + pickle.dump(dset, f) + + with open(tmp_file, "rb") as f: + with pickle.load(f) as dset: + self.assertEqual(len(dset), 5) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertEqual(dset[0]["filename"], "my_name-train_0") + self.assertEqual(dset["filename"][0], "my_name-train_0") + + with self._create_dummy_dataset(in_memory, tmp_dir).select( + range(0, 10, 2), indices_cache_file_name=os.path.join(tmp_dir, "ind.arrow") + ) as dset: + if not in_memory: + dset._data.table = Unpicklable() + dset._indices.table = Unpicklable() + with open(tmp_file, "wb") as f: + pickle.dump(dset, f) + + with open(tmp_file, "rb") as f: + with pickle.load(f) as dset: + self.assertEqual(len(dset), 5) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertEqual(dset[0]["filename"], "my_name-train_0") + self.assertEqual(dset["filename"][0], "my_name-train_0") + + def test_dummy_dataset_serialize(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with set_current_working_directory_to_temp_dir(): + with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: + dataset_path = "my_dataset" # rel path + dset.save_to_disk(dataset_path) + + with Dataset.load_from_disk(dataset_path) as dset: + self.assertEqual(len(dset), 10) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertEqual(dset[0]["filename"], "my_name-train_0") + self.assertEqual(dset["filename"][0], "my_name-train_0") + expected = dset.to_dict() + + with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: + dataset_path = os.path.join(tmp_dir, "my_dataset") # abs path + dset.save_to_disk(dataset_path) + + with Dataset.load_from_disk(dataset_path) as dset: + self.assertEqual(len(dset), 10) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertEqual(dset[0]["filename"], "my_name-train_0") + self.assertEqual(dset["filename"][0], "my_name-train_0") + + with self._create_dummy_dataset(in_memory, tmp_dir).select( + range(10), indices_cache_file_name=os.path.join(tmp_dir, "ind.arrow") + ) as dset: + with assert_arrow_memory_doesnt_increase(): + dset.save_to_disk(dataset_path) + + with Dataset.load_from_disk(dataset_path) as dset: + self.assertEqual(len(dset), 10) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertEqual(dset[0]["filename"], "my_name-train_0") + self.assertEqual(dset["filename"][0], "my_name-train_0") + + with self._create_dummy_dataset(in_memory, tmp_dir, nested_features=True) as dset: + with assert_arrow_memory_doesnt_increase(): + dset.save_to_disk(dataset_path) + + with Dataset.load_from_disk(dataset_path) as dset: + self.assertEqual(len(dset), 10) + self.assertDictEqual( + dset.features, + Features({"nested": {"a": Value("int64"), "x": Value("int64"), "c": Value("int64")}}), + ) + self.assertDictEqual(dset[0]["nested"], {"a": 1, "c": 100, "x": 10}) + self.assertDictEqual(dset["nested"][0], {"a": 1, "c": 100, "x": 10}) + + with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: + with assert_arrow_memory_doesnt_increase(): + dset.save_to_disk(dataset_path, num_shards=4) + + with Dataset.load_from_disk(dataset_path) as dset: + self.assertEqual(len(dset), 10) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset.to_dict(), expected) + self.assertEqual(len(dset.cache_files), 4) + + with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: + with assert_arrow_memory_doesnt_increase(): + dset.save_to_disk(dataset_path, num_proc=2) + + with Dataset.load_from_disk(dataset_path) as dset: + self.assertEqual(len(dset), 10) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset.to_dict(), expected) + self.assertEqual(len(dset.cache_files), 2) + + with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: + with assert_arrow_memory_doesnt_increase(): + dset.save_to_disk(dataset_path, num_shards=7, num_proc=2) + + with Dataset.load_from_disk(dataset_path) as dset: + self.assertEqual(len(dset), 10) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset.to_dict(), expected) + self.assertEqual(len(dset.cache_files), 7) + + with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: + with assert_arrow_memory_doesnt_increase(): + max_shard_size = dset._estimate_nbytes() // 2 + 1 + dset.save_to_disk(dataset_path, max_shard_size=max_shard_size) + + with Dataset.load_from_disk(dataset_path) as dset: + self.assertEqual(len(dset), 10) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset.to_dict(), expected) + self.assertEqual(len(dset.cache_files), 2) + + def test_dummy_dataset_load_from_disk(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset: + dataset_path = os.path.join(tmp_dir, "my_dataset") + dset.save_to_disk(dataset_path) + + with load_from_disk(dataset_path) as dset: + self.assertEqual(len(dset), 10) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertEqual(dset[0]["filename"], "my_name-train_0") + self.assertEqual(dset["filename"][0], "my_name-train_0") + + def test_restore_saved_format(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True) + dataset_path = os.path.join(tmp_dir, "my_dataset") + dset.save_to_disk(dataset_path) + + with load_from_disk(dataset_path) as loaded_dset: + self.assertEqual(dset.format, loaded_dset.format) + + def test_set_format_numpy_multiple_columns(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + fingerprint = dset._fingerprint + dset.set_format(type="numpy", columns=["col_1"]) + self.assertEqual(len(dset[0]), 1) + self.assertIsInstance(dset[0]["col_1"], np.int64) + self.assertEqual(dset[0]["col_1"].item(), 3) + self.assertIsInstance(dset["col_1"], np.ndarray) + self.assertListEqual(list(dset["col_1"].shape), [4]) + np.testing.assert_array_equal(dset["col_1"], np.array([3, 2, 1, 0])) + self.assertNotEqual(dset._fingerprint, fingerprint) + + dset.reset_format() + with dset.formatted_as(type="numpy", columns=["col_1"]): + self.assertEqual(len(dset[0]), 1) + self.assertIsInstance(dset[0]["col_1"], np.int64) + self.assertEqual(dset[0]["col_1"].item(), 3) + self.assertIsInstance(dset["col_1"], np.ndarray) + self.assertListEqual(list(dset["col_1"].shape), [4]) + np.testing.assert_array_equal(dset["col_1"], np.array([3, 2, 1, 0])) + + self.assertEqual(dset.format["type"], None) + self.assertEqual(dset.format["format_kwargs"], {}) + self.assertEqual(dset.format["columns"], dset.column_names) + self.assertEqual(dset.format["output_all_columns"], False) + + dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True) + self.assertEqual(len(dset[0]), 3) + self.assertIsInstance(dset[0]["col_2"], str) + self.assertEqual(dset[0]["col_2"], "a") + + dset.set_format(type="numpy", columns=["col_1", "col_2"]) + self.assertEqual(len(dset[0]), 2) + self.assertIsInstance(dset[0]["col_2"], np.str_) + self.assertEqual(dset[0]["col_2"].item(), "a") + + @require_torch + def test_set_format_torch(self, in_memory): + import torch + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + dset.set_format(type="torch", columns=["col_1"]) + self.assertEqual(len(dset[0]), 1) + self.assertIsInstance(dset[0]["col_1"], torch.Tensor) + self.assertIsInstance(dset["col_1"], torch.Tensor) + self.assertListEqual(list(dset[0]["col_1"].shape), []) + self.assertEqual(dset[0]["col_1"].item(), 3) + + dset.set_format(type="torch", columns=["col_1"], output_all_columns=True) + self.assertEqual(len(dset[0]), 3) + self.assertIsInstance(dset[0]["col_2"], str) + self.assertEqual(dset[0]["col_2"], "a") + + dset.set_format(type="torch") + self.assertEqual(len(dset[0]), 3) + self.assertIsInstance(dset[0]["col_1"], torch.Tensor) + self.assertIsInstance(dset["col_1"], torch.Tensor) + self.assertListEqual(list(dset[0]["col_1"].shape), []) + self.assertEqual(dset[0]["col_1"].item(), 3) + self.assertIsInstance(dset[0]["col_2"], str) + self.assertEqual(dset[0]["col_2"], "a") + self.assertIsInstance(dset[0]["col_3"], torch.Tensor) + self.assertIsInstance(dset["col_3"], torch.Tensor) + self.assertListEqual(list(dset[0]["col_3"].shape), []) + + @require_tf + def test_set_format_tf(self, in_memory): + import tensorflow as tf + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + dset.set_format(type="tensorflow", columns=["col_1"]) + self.assertEqual(len(dset[0]), 1) + self.assertIsInstance(dset[0]["col_1"], tf.Tensor) + self.assertListEqual(list(dset[0]["col_1"].shape), []) + self.assertEqual(dset[0]["col_1"].numpy().item(), 3) + + dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True) + self.assertEqual(len(dset[0]), 3) + self.assertIsInstance(dset[0]["col_2"], str) + self.assertEqual(dset[0]["col_2"], "a") + + dset.set_format(type="tensorflow", columns=["col_1", "col_2"]) + self.assertEqual(len(dset[0]), 2) + self.assertEqual(dset[0]["col_2"].numpy().decode("utf-8"), "a") + + def test_set_format_pandas(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + dset.set_format(type="pandas", columns=["col_1"]) + self.assertEqual(len(dset[0].columns), 1) + self.assertIsInstance(dset[0], pd.DataFrame) + self.assertListEqual(list(dset[0].shape), [1, 1]) + self.assertEqual(dset[0]["col_1"].item(), 3) + + dset.set_format(type="pandas", columns=["col_1", "col_2"]) + self.assertEqual(len(dset[0].columns), 2) + self.assertEqual(dset[0]["col_2"].item(), "a") + + def test_set_transform(self, in_memory): + def transform(batch): + return {k: [str(i).upper() for i in v] for k, v in batch.items()} + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + dset.set_transform(transform=transform, columns=["col_1"]) + self.assertEqual(dset.format["type"], "custom") + self.assertEqual(len(dset[0].keys()), 1) + self.assertEqual(dset[0]["col_1"], "3") + self.assertEqual(dset[:2]["col_1"], ["3", "2"]) + self.assertEqual(dset["col_1"][:2], ["3", "2"]) + + prev_format = dset.format + dset.set_format(**dset.format) + self.assertEqual(prev_format, dset.format) + + dset.set_transform(transform=transform, columns=["col_1", "col_2"]) + self.assertEqual(len(dset[0].keys()), 2) + self.assertEqual(dset[0]["col_2"], "A") + + def test_transmit_format(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + transform = datasets.arrow_dataset.transmit_format(lambda x: x) + # make sure identity transform doesn't apply unnecessary format + self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) + dset.set_format(**dset.format) + self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) + # check lists comparisons + dset.set_format(columns=["col_1"]) + self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) + dset.set_format(columns=["col_1", "col_2"]) + self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) + dset.set_format("numpy", columns=["col_1", "col_2"]) + self.assertEqual(dset._fingerprint, transform(dset)._fingerprint) + + def test_cast(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + features = dset.features + features["col_1"] = Value("float64") + features = Features({k: features[k] for k in list(features)[::-1]}) + fingerprint = dset._fingerprint + # TODO: with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): + with dset.cast(features) as casted_dset: + self.assertEqual(casted_dset.num_columns, 3) + self.assertEqual(casted_dset.features["col_1"], Value("float64")) + self.assertIsInstance(casted_dset[0]["col_1"], float) + self.assertNotEqual(casted_dset._fingerprint, fingerprint) + self.assertNotEqual(casted_dset, dset) + assert_arrow_metadata_are_synced_with_dataset_features(casted_dset) + + def test_class_encode_column(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + with self.assertRaises(ValueError): + dset.class_encode_column(column="does not exist") + + with dset.class_encode_column("col_1") as casted_dset: + self.assertIsInstance(casted_dset.features["col_1"], ClassLabel) + self.assertListEqual(casted_dset.features["col_1"].names, ["0", "1", "2", "3"]) + self.assertListEqual(casted_dset["col_1"], [3, 2, 1, 0]) + self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint) + self.assertNotEqual(casted_dset, dset) + assert_arrow_metadata_are_synced_with_dataset_features(casted_dset) + + with dset.class_encode_column("col_2") as casted_dset: + self.assertIsInstance(casted_dset.features["col_2"], ClassLabel) + self.assertListEqual(casted_dset.features["col_2"].names, ["a", "b", "c", "d"]) + self.assertListEqual(casted_dset["col_2"], [0, 1, 2, 3]) + self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint) + self.assertNotEqual(casted_dset, dset) + assert_arrow_metadata_are_synced_with_dataset_features(casted_dset) + + with dset.class_encode_column("col_3") as casted_dset: + self.assertIsInstance(casted_dset.features["col_3"], ClassLabel) + self.assertListEqual(casted_dset.features["col_3"].names, ["False", "True"]) + self.assertListEqual(casted_dset["col_3"], [0, 1, 0, 1]) + self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint) + self.assertNotEqual(casted_dset, dset) + assert_arrow_metadata_are_synced_with_dataset_features(casted_dset) + + # Test raises if feature is an array / sequence + with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: + for column in dset.column_names: + with self.assertRaises(ValueError): + dset.class_encode_column(column) + + def test_remove_columns(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + fingerprint = dset._fingerprint + with dset.remove_columns(column_names="col_1") as new_dset: + self.assertEqual(new_dset.num_columns, 2) + self.assertListEqual(list(new_dset.column_names), ["col_2", "col_3"]) + self.assertNotEqual(new_dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(new_dset) + + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + with dset.remove_columns(column_names=["col_1", "col_2", "col_3"]) as new_dset: + self.assertEqual(new_dset.num_columns, 0) + self.assertNotEqual(new_dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(new_dset) + + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + dset._format_columns = ["col_1", "col_2", "col_3"] + with dset.remove_columns(column_names=["col_1"]) as new_dset: + self.assertListEqual(new_dset._format_columns, ["col_2", "col_3"]) + self.assertEqual(new_dset.num_columns, 2) + self.assertListEqual(list(new_dset.column_names), ["col_2", "col_3"]) + self.assertNotEqual(new_dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(new_dset) + + def test_rename_column(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + fingerprint = dset._fingerprint + with dset.rename_column(original_column_name="col_1", new_column_name="new_name") as new_dset: + self.assertEqual(new_dset.num_columns, 3) + self.assertListEqual(list(new_dset.column_names), ["new_name", "col_2", "col_3"]) + self.assertListEqual(list(dset.column_names), ["col_1", "col_2", "col_3"]) + self.assertNotEqual(new_dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(new_dset) + + def test_rename_columns(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + fingerprint = dset._fingerprint + with dset.rename_columns({"col_1": "new_name"}) as new_dset: + self.assertEqual(new_dset.num_columns, 3) + self.assertListEqual(list(new_dset.column_names), ["new_name", "col_2", "col_3"]) + self.assertListEqual(list(dset.column_names), ["col_1", "col_2", "col_3"]) + self.assertNotEqual(new_dset._fingerprint, fingerprint) + + with dset.rename_columns({"col_1": "new_name", "col_2": "new_name2"}) as new_dset: + self.assertEqual(new_dset.num_columns, 3) + self.assertListEqual(list(new_dset.column_names), ["new_name", "new_name2", "col_3"]) + self.assertListEqual(list(dset.column_names), ["col_1", "col_2", "col_3"]) + self.assertNotEqual(new_dset._fingerprint, fingerprint) + + # Original column not in dataset + with self.assertRaises(ValueError): + dset.rename_columns({"not_there": "new_name"}) + + # Empty new name + with self.assertRaises(ValueError): + dset.rename_columns({"col_1": ""}) + + # Duplicates + with self.assertRaises(ValueError): + dset.rename_columns({"col_1": "new_name", "col_2": "new_name"}) + + def test_select_columns(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + fingerprint = dset._fingerprint + with dset.select_columns(column_names=[]) as new_dset: + self.assertEqual(new_dset.num_columns, 0) + self.assertListEqual(list(new_dset.column_names), []) + self.assertNotEqual(new_dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(new_dset) + + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + fingerprint = dset._fingerprint + with dset.select_columns(column_names="col_1") as new_dset: + self.assertEqual(new_dset.num_columns, 1) + self.assertListEqual(list(new_dset.column_names), ["col_1"]) + self.assertNotEqual(new_dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(new_dset) + + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + with dset.select_columns(column_names=["col_1", "col_2", "col_3"]) as new_dset: + self.assertEqual(new_dset.num_columns, 3) + self.assertListEqual(list(new_dset.column_names), ["col_1", "col_2", "col_3"]) + self.assertNotEqual(new_dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(new_dset) + + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + with dset.select_columns(column_names=["col_3", "col_2", "col_1"]) as new_dset: + self.assertEqual(new_dset.num_columns, 3) + self.assertListEqual(list(new_dset.column_names), ["col_3", "col_2", "col_1"]) + self.assertNotEqual(new_dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(new_dset) + + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + dset._format_columns = ["col_1", "col_2", "col_3"] + with dset.select_columns(column_names=["col_1"]) as new_dset: + self.assertListEqual(new_dset._format_columns, ["col_1"]) + self.assertEqual(new_dset.num_columns, 1) + self.assertListEqual(list(new_dset.column_names), ["col_1"]) + self.assertNotEqual(new_dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(new_dset) + + def test_concatenate(self, in_memory): + data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]} + info1 = DatasetInfo(description="Dataset1") + info2 = DatasetInfo(description="Dataset2") + with tempfile.TemporaryDirectory() as tmp_dir: + dset1, dset2, dset3 = ( + Dataset.from_dict(data1, info=info1), + Dataset.from_dict(data2, info=info2), + Dataset.from_dict(data3), + ) + dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) + + with concatenate_datasets([dset1, dset2, dset3]) as dset_concat: + self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2)) + self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3)) + self.assertListEqual(dset_concat["id"], [0, 1, 2, 3, 4, 5, 6, 7]) + self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3) + self.assertEqual(dset_concat.info.description, "Dataset1\n\nDataset2") + del dset1, dset2, dset3 + + def test_concatenate_formatted(self, in_memory): + data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]} + info1 = DatasetInfo(description="Dataset1") + info2 = DatasetInfo(description="Dataset2") + with tempfile.TemporaryDirectory() as tmp_dir: + dset1, dset2, dset3 = ( + Dataset.from_dict(data1, info=info1), + Dataset.from_dict(data2, info=info2), + Dataset.from_dict(data3), + ) + dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) + + dset1.set_format("numpy") + with concatenate_datasets([dset1, dset2, dset3]) as dset_concat: + self.assertEqual(dset_concat.format["type"], None) + dset2.set_format("numpy") + dset3.set_format("numpy") + with concatenate_datasets([dset1, dset2, dset3]) as dset_concat: + self.assertEqual(dset_concat.format["type"], "numpy") + del dset1, dset2, dset3 + + def test_concatenate_with_indices(self, in_memory): + data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7, 8]} + info1 = DatasetInfo(description="Dataset1") + info2 = DatasetInfo(description="Dataset2") + with tempfile.TemporaryDirectory() as tmp_dir: + dset1, dset2, dset3 = ( + Dataset.from_dict(data1, info=info1), + Dataset.from_dict(data2, info=info2), + Dataset.from_dict(data3), + ) + dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) + dset1, dset2, dset3 = dset1.select([2, 1, 0]), dset2.select([2, 1, 0]), dset3 + + with concatenate_datasets([dset3, dset2, dset1]) as dset_concat: + self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 3)) + self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3)) + self.assertListEqual(dset_concat["id"], [6, 7, 8, 5, 4, 3, 2, 1, 0]) + # in_memory = False: + # 3 cache files for the dset_concat._data table + # no cache file for the indices because it's in memory + # in_memory = True: + # no cache files since both dset_concat._data and dset_concat._indices are in memory + self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3) + self.assertEqual(dset_concat.info.description, "Dataset2\n\nDataset1") + + dset1 = dset1.rename_columns({"id": "id1"}) + dset2 = dset2.rename_columns({"id": "id2"}) + dset3 = dset3.rename_columns({"id": "id3"}) + with concatenate_datasets([dset1, dset2, dset3], axis=1) as dset_concat: + self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 3)) + self.assertEqual(len(dset_concat), len(dset1)) + self.assertListEqual(dset_concat["id1"], [2, 1, 0]) + self.assertListEqual(dset_concat["id2"], [5, 4, 3]) + self.assertListEqual(dset_concat["id3"], [6, 7, 8]) + # in_memory = False: + # 3 cache files for the dset_concat._data table + # no cache file for the indices because it's None + # in_memory = True: + # no cache files since dset_concat._data is in memory and dset_concat._indices is None + self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3) + self.assertIsNone(dset_concat._indices) + self.assertEqual(dset_concat.info.description, "Dataset1\n\nDataset2") + + with concatenate_datasets([dset1], axis=1) as dset_concat: + self.assertEqual(len(dset_concat), len(dset1)) + self.assertListEqual(dset_concat["id1"], [2, 1, 0]) + # in_memory = False: + # 1 cache file for the dset_concat._data table + # no cache file for the indices because it's in memory + # in_memory = True: + # no cache files since both dset_concat._data and dset_concat._indices are in memory + self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 1) + self.assertTrue(dset_concat._indices == dset1._indices) + self.assertEqual(dset_concat.info.description, "Dataset1") + del dset1, dset2, dset3 + + def test_concatenate_with_indices_from_disk(self, in_memory): + data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7]} + info1 = DatasetInfo(description="Dataset1") + info2 = DatasetInfo(description="Dataset2") + with tempfile.TemporaryDirectory() as tmp_dir: + dset1, dset2, dset3 = ( + Dataset.from_dict(data1, info=info1), + Dataset.from_dict(data2, info=info2), + Dataset.from_dict(data3), + ) + dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3) + dset1, dset2, dset3 = ( + dset1.select([2, 1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow")), + dset2.select([2, 1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow")), + dset3.select([1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i3.arrow")), + ) + + with concatenate_datasets([dset3, dset2, dset1]) as dset_concat: + self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2)) + self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3)) + self.assertListEqual(dset_concat["id"], [7, 6, 5, 4, 3, 2, 1, 0]) + # in_memory = False: + # 3 cache files for the dset_concat._data table, and 1 for the dset_concat._indices_table + # There is only 1 for the indices tables (i1.arrow) + # Indeed, the others are brought to memory since an offset is applied to them. + # in_memory = True: + # 1 cache file for i1.arrow since both dset_concat._data and dset_concat._indices are in memory + self.assertEqual(len(dset_concat.cache_files), 1 if in_memory else 3 + 1) + self.assertEqual(dset_concat.info.description, "Dataset2\n\nDataset1") + del dset1, dset2, dset3 + + def test_concatenate_pickle(self, in_memory): + data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7], "foo": ["bar", "bar"]} + info1 = DatasetInfo(description="Dataset1") + info2 = DatasetInfo(description="Dataset2") + with tempfile.TemporaryDirectory() as tmp_dir: + dset1, dset2, dset3 = ( + Dataset.from_dict(data1, info=info1), + Dataset.from_dict(data2, info=info2), + Dataset.from_dict(data3), + ) + # mix from in-memory and on-disk datasets + dset1, dset2 = self._to(in_memory, tmp_dir, dset1, dset2) + dset3 = self._to(not in_memory, tmp_dir, dset3) + dset1, dset2, dset3 = ( + dset1.select( + [2, 1, 0], + keep_in_memory=in_memory, + indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow") if not in_memory else None, + ), + dset2.select( + [2, 1, 0], + keep_in_memory=in_memory, + indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow") if not in_memory else None, + ), + dset3.select( + [1, 0], + keep_in_memory=in_memory, + indices_cache_file_name=os.path.join(tmp_dir, "i3.arrow") if not in_memory else None, + ), + ) + + dset3 = dset3.rename_column("foo", "new_foo") + dset3 = dset3.remove_columns("new_foo") + if in_memory: + dset3._data.table = Unpicklable() + else: + dset1._data.table, dset2._data.table = Unpicklable(), Unpicklable() + dset1, dset2, dset3 = (pickle.loads(pickle.dumps(d)) for d in (dset1, dset2, dset3)) + with concatenate_datasets([dset3, dset2, dset1]) as dset_concat: + if not in_memory: + dset_concat._data.table = Unpicklable() + with pickle.loads(pickle.dumps(dset_concat)) as dset_concat: + self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2)) + self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3)) + self.assertListEqual(dset_concat["id"], [7, 6, 5, 4, 3, 2, 1, 0]) + # in_memory = True: 1 cache file for dset3 + # in_memory = False: 2 caches files for dset1 and dset2, and 1 cache file for i1.arrow + self.assertEqual(len(dset_concat.cache_files), 1 if in_memory else 2 + 1) + self.assertEqual(dset_concat.info.description, "Dataset2\n\nDataset1") + del dset1, dset2, dset3 + + def test_flatten(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict( + {"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10}, + features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}), + ) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + fingerprint = dset._fingerprint + with dset.flatten() as dset: + self.assertListEqual(sorted(dset.column_names), ["a.b.c", "foo"]) + self.assertListEqual(sorted(dset.features.keys()), ["a.b.c", "foo"]) + self.assertDictEqual( + dset.features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")}) + ) + self.assertNotEqual(dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset) + + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict( + {"a": [{"en": "Thank you", "fr": "Merci"}] * 10, "foo": [1] * 10}, + features=Features({"a": Translation(languages=["en", "fr"]), "foo": Value("int64")}), + ) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + fingerprint = dset._fingerprint + with dset.flatten() as dset: + self.assertListEqual(sorted(dset.column_names), ["a.en", "a.fr", "foo"]) + self.assertListEqual(sorted(dset.features.keys()), ["a.en", "a.fr", "foo"]) + self.assertDictEqual( + dset.features, + Features({"a.en": Value("string"), "a.fr": Value("string"), "foo": Value("int64")}), + ) + self.assertNotEqual(dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset) + + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict( + {"a": [{"en": "the cat", "fr": ["le chat", "la chatte"], "de": "die katze"}] * 10, "foo": [1] * 10}, + features=Features( + {"a": TranslationVariableLanguages(languages=["en", "fr", "de"]), "foo": Value("int64")} + ), + ) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + fingerprint = dset._fingerprint + with dset.flatten() as dset: + self.assertListEqual(sorted(dset.column_names), ["a.language", "a.translation", "foo"]) + self.assertListEqual(sorted(dset.features.keys()), ["a.language", "a.translation", "foo"]) + self.assertDictEqual( + dset.features, + Features( + { + "a.language": Sequence(Value("string")), + "a.translation": Sequence(Value("string")), + "foo": Value("int64"), + } + ), + ) + self.assertNotEqual(dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset) + + @require_pil + def test_flatten_complex_image(self, in_memory): + # decoding turned on + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict( + {"a": [np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)] * 10, "foo": [1] * 10}, + features=Features({"a": Image(), "foo": Value("int64")}), + ) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + fingerprint = dset._fingerprint + with dset.flatten() as dset: + self.assertListEqual(sorted(dset.column_names), ["a", "foo"]) + self.assertListEqual(sorted(dset.features.keys()), ["a", "foo"]) + self.assertDictEqual(dset.features, Features({"a": Image(), "foo": Value("int64")})) + self.assertNotEqual(dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset) + + # decoding turned on + nesting + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict( + {"a": [{"b": np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)}] * 10, "foo": [1] * 10}, + features=Features({"a": {"b": Image()}, "foo": Value("int64")}), + ) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + fingerprint = dset._fingerprint + with dset.flatten() as dset: + self.assertListEqual(sorted(dset.column_names), ["a.b", "foo"]) + self.assertListEqual(sorted(dset.features.keys()), ["a.b", "foo"]) + self.assertDictEqual(dset.features, Features({"a.b": Image(), "foo": Value("int64")})) + self.assertNotEqual(dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset) + + # decoding turned off + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict( + {"a": [np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)] * 10, "foo": [1] * 10}, + features=Features({"a": Image(decode=False), "foo": Value("int64")}), + ) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + fingerprint = dset._fingerprint + with dset.flatten() as dset: + self.assertListEqual(sorted(dset.column_names), ["a.bytes", "a.path", "foo"]) + self.assertListEqual(sorted(dset.features.keys()), ["a.bytes", "a.path", "foo"]) + self.assertDictEqual( + dset.features, + Features({"a.bytes": Value("binary"), "a.path": Value("string"), "foo": Value("int64")}), + ) + self.assertNotEqual(dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset) + + # decoding turned off + nesting + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict( + {"a": [{"b": np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)}] * 10, "foo": [1] * 10}, + features=Features({"a": {"b": Image(decode=False)}, "foo": Value("int64")}), + ) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + fingerprint = dset._fingerprint + with dset.flatten() as dset: + self.assertListEqual(sorted(dset.column_names), ["a.b.bytes", "a.b.path", "foo"]) + self.assertListEqual(sorted(dset.features.keys()), ["a.b.bytes", "a.b.path", "foo"]) + self.assertDictEqual( + dset.features, + Features( + {"a.b.bytes": Value("binary"), "a.b.path": Value("string"), "foo": Value("int64")} + ), + ) + self.assertNotEqual(dset._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset) + + def test_map(self, in_memory): + # standard + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + fingerprint = dset._fingerprint + with dset.map( + lambda x: {"name": x["filename"][:-2], "id": int(x["filename"].split("_")[-1])} + ) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test.features, + Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}), + ) + self.assertListEqual(dset_test["id"], list(range(30))) + self.assertNotEqual(dset_test._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test) + + # no transform + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + fingerprint = dset._fingerprint + with dset.map(lambda x: None) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertEqual(dset_test._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test) + + # with indices + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map( + lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True + ) as dset_test_with_indices: + self.assertEqual(len(dset_test_with_indices), 30) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test_with_indices.features, + Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}), + ) + self.assertListEqual(dset_test_with_indices["id"], list(range(30))) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices) + + # interrupted + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + + def func(x, i): + if i == 4: + raise KeyboardInterrupt() + return {"name": x["filename"][:-2], "id": i} + + tmp_file = os.path.join(tmp_dir, "test.arrow") + self.assertRaises( + KeyboardInterrupt, + dset.map, + function=func, + with_indices=True, + cache_file_name=tmp_file, + writer_batch_size=2, + ) + self.assertFalse(os.path.exists(tmp_file)) + with dset.map( + lambda x, i: {"name": x["filename"][:-2], "id": i}, + with_indices=True, + cache_file_name=tmp_file, + writer_batch_size=2, + ) as dset_test_with_indices: + self.assertTrue(os.path.exists(tmp_file)) + self.assertEqual(len(dset_test_with_indices), 30) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test_with_indices.features, + Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}), + ) + self.assertListEqual(dset_test_with_indices["id"], list(range(30))) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices) + + # formatted + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + dset.set_format("numpy", columns=["col_1"]) + with dset.map(lambda x: {"col_1_plus_one": x["col_1"] + 1}) as dset_test: + self.assertEqual(len(dset_test), 4) + self.assertEqual(dset_test.format["type"], "numpy") + self.assertIsInstance(dset_test["col_1"], np.ndarray) + self.assertIsInstance(dset_test["col_1_plus_one"], np.ndarray) + self.assertListEqual(sorted(dset_test[0].keys()), ["col_1", "col_1_plus_one"]) + self.assertListEqual(sorted(dset_test.column_names), ["col_1", "col_1_plus_one", "col_2", "col_3"]) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test) + + def test_map_multiprocessing(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: # standard + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + fingerprint = dset._fingerprint + with dset.map(picklable_map_function, num_proc=2) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test.features, + Features({"filename": Value("string"), "id": Value("int64")}), + ) + self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2) + if not in_memory: + self.assertIn("_of_00002.arrow", dset_test.cache_files[0]["filename"]) + self.assertListEqual(dset_test["id"], list(range(30))) + self.assertNotEqual(dset_test._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test) + + with tempfile.TemporaryDirectory() as tmp_dir: # num_proc > num rows + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + fingerprint = dset._fingerprint + with dset.select([0, 1], keep_in_memory=True).map(picklable_map_function, num_proc=10) as dset_test: + self.assertEqual(len(dset_test), 2) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test.features, + Features({"filename": Value("string"), "id": Value("int64")}), + ) + self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2) + self.assertListEqual(dset_test["id"], list(range(2))) + self.assertNotEqual(dset_test._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test) + + with tempfile.TemporaryDirectory() as tmp_dir: # with_indices + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + fingerprint = dset._fingerprint + with dset.map(picklable_map_function_with_indices, num_proc=3, with_indices=True) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test.features, + Features({"filename": Value("string"), "id": Value("int64")}), + ) + self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3) + self.assertListEqual(dset_test["id"], list(range(30))) + self.assertNotEqual(dset_test._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test) + + with tempfile.TemporaryDirectory() as tmp_dir: # with_rank + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + fingerprint = dset._fingerprint + with dset.map(picklable_map_function_with_rank, num_proc=3, with_rank=True) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test.features, + Features({"filename": Value("string"), "rank": Value("int64")}), + ) + self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3) + self.assertListEqual(dset_test["rank"], [0] * 10 + [1] * 10 + [2] * 10) + self.assertNotEqual(dset_test._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test) + + with tempfile.TemporaryDirectory() as tmp_dir: # with_indices AND with_rank + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + fingerprint = dset._fingerprint + with dset.map( + picklable_map_function_with_indices_and_rank, num_proc=3, with_indices=True, with_rank=True + ) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test.features, + Features({"filename": Value("string"), "id": Value("int64"), "rank": Value("int64")}), + ) + self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3) + self.assertListEqual(dset_test["id"], list(range(30))) + self.assertListEqual(dset_test["rank"], [0] * 10 + [1] * 10 + [2] * 10) + self.assertNotEqual(dset_test._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test) + + with tempfile.TemporaryDirectory() as tmp_dir: # new_fingerprint + new_fingerprint = "foobar" + invalid_new_fingerprint = "foobar/hey" + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + fingerprint = dset._fingerprint + self.assertRaises( + ValueError, dset.map, picklable_map_function, num_proc=2, new_fingerprint=invalid_new_fingerprint + ) + with dset.map(picklable_map_function, num_proc=2, new_fingerprint=new_fingerprint) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test.features, + Features({"filename": Value("string"), "id": Value("int64")}), + ) + self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2) + self.assertListEqual(dset_test["id"], list(range(30))) + self.assertNotEqual(dset_test._fingerprint, fingerprint) + self.assertEqual(dset_test._fingerprint, new_fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test) + file_names = sorted(Path(cache_file["filename"]).name for cache_file in dset_test.cache_files) + for i, file_name in enumerate(file_names): + self.assertIn(new_fingerprint + f"_{i:05d}", file_name) + + with tempfile.TemporaryDirectory() as tmp_dir: # lambda (requires multiprocess from pathos) + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + fingerprint = dset._fingerprint + with dset.map(lambda x: {"id": int(x["filename"].split("_")[-1])}, num_proc=2) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test.features, + Features({"filename": Value("string"), "id": Value("int64")}), + ) + self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2) + self.assertListEqual(dset_test["id"], list(range(30))) + self.assertNotEqual(dset_test._fingerprint, fingerprint) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test) + + def test_map_new_features(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + features = Features({"filename": Value("string"), "label": ClassLabel(names=["positive", "negative"])}) + with dset.map( + lambda x, i: {"label": i % 2}, with_indices=True, features=features + ) as dset_test_with_indices: + self.assertEqual(len(dset_test_with_indices), 30) + self.assertDictEqual( + dset_test_with_indices.features, + features, + ) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices) + + def test_map_batched(self, in_memory): + def map_batched(example): + return {"filename_new": [x + "_extension" for x in example["filename"]]} + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map(map_batched, batched=True) as dset_test_batched: + self.assertEqual(len(dset_test_batched), 30) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test_batched.features, + Features({"filename": Value("string"), "filename_new": Value("string")}), + ) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched) + + # change batch size and drop the last batch + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + batch_size = 4 + with dset.map( + map_batched, batched=True, batch_size=batch_size, drop_last_batch=True + ) as dset_test_batched: + self.assertEqual(len(dset_test_batched), 30 // batch_size * batch_size) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test_batched.features, + Features({"filename": Value("string"), "filename_new": Value("string")}), + ) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched) + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.formatted_as("numpy", columns=["filename"]): + with dset.map(map_batched, batched=True) as dset_test_batched: + self.assertEqual(len(dset_test_batched), 30) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test_batched.features, + Features({"filename": Value("string"), "filename_new": Value("string")}), + ) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched) + + def map_batched_with_indices(example, idx): + return {"filename_new": [x + "_extension_" + str(idx) for x in example["filename"]]} + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map( + map_batched_with_indices, batched=True, with_indices=True + ) as dset_test_with_indices_batched: + self.assertEqual(len(dset_test_with_indices_batched), 30) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test_with_indices_batched.features, + Features({"filename": Value("string"), "filename_new": Value("string")}), + ) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices_batched) + + # check remove columns for even if the function modifies input in-place + def map_batched_modifying_inputs_inplace(example): + result = {"filename_new": [x + "_extension" for x in example["filename"]]} + del example["filename"] + return result + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map( + map_batched_modifying_inputs_inplace, batched=True, remove_columns="filename" + ) as dset_test_modifying_inputs_inplace: + self.assertEqual(len(dset_test_modifying_inputs_inplace), 30) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual( + dset_test_modifying_inputs_inplace.features, + Features({"filename_new": Value("string")}), + ) + assert_arrow_metadata_are_synced_with_dataset_features(dset_test_modifying_inputs_inplace) + + def test_map_nested(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict({"field": ["a", "b"]}) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + with dset.map(lambda example: {"otherfield": {"capital": example["field"].capitalize()}}) as dset: + with dset.map(lambda example: {"otherfield": {"append_x": example["field"] + "x"}}) as dset: + self.assertEqual(dset[0], {"field": "a", "otherfield": {"append_x": "ax"}}) + + def test_map_return_example_as_dict_value(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict({"en": ["aa", "bb"], "fr": ["cc", "dd"]}) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + with dset.map(lambda example: {"translation": example}) as dset: + self.assertEqual(dset[0], {"en": "aa", "fr": "cc", "translation": {"en": "aa", "fr": "cc"}}) + + def test_map_fn_kwargs(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict({"id": range(10)}) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + fn_kwargs = {"offset": 3} + with dset.map( + lambda example, offset: {"id+offset": example["id"] + offset}, fn_kwargs=fn_kwargs + ) as mapped_dset: + assert mapped_dset["id+offset"] == list(range(3, 13)) + with dset.map( + lambda id, offset: {"id+offset": id + offset}, fn_kwargs=fn_kwargs, input_columns="id" + ) as mapped_dset: + assert mapped_dset["id+offset"] == list(range(3, 13)) + with dset.map( + lambda id, i, offset: {"id+offset": i + offset}, + fn_kwargs=fn_kwargs, + input_columns="id", + with_indices=True, + ) as mapped_dset: + assert mapped_dset["id+offset"] == list(range(3, 13)) + + def test_map_caching(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + self._caplog.clear() + with self._caplog.at_level(INFO, logger=get_logger().name): + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with patch( + "datasets.arrow_dataset.Dataset._map_single", + autospec=Dataset._map_single, + side_effect=Dataset._map_single, + ) as mock_map_single: + with dset.map(lambda x: {"foo": "bar"}) as dset_test1: + dset_test1_data_files = list(dset_test1.cache_files) + self.assertEqual(mock_map_single.call_count, 1) + with dset.map(lambda x: {"foo": "bar"}) as dset_test2: + self.assertEqual(dset_test1_data_files, dset_test2.cache_files) + self.assertEqual(len(dset_test2.cache_files), 1 - int(in_memory)) + self.assertTrue(("Loading cached processed dataset" in self._caplog.text) ^ in_memory) + self.assertEqual(mock_map_single.call_count, 2 if in_memory else 1) + + with tempfile.TemporaryDirectory() as tmp_dir: + self._caplog.clear() + with self._caplog.at_level(INFO, logger=get_logger().name): + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map(lambda x: {"foo": "bar"}) as dset_test1: + dset_test1_data_files = list(dset_test1.cache_files) + with dset.map(lambda x: {"foo": "bar"}, load_from_cache_file=False) as dset_test2: + self.assertEqual(dset_test1_data_files, dset_test2.cache_files) + self.assertEqual(len(dset_test2.cache_files), 1 - int(in_memory)) + self.assertNotIn("Loading cached processed dataset", self._caplog.text) + + with tempfile.TemporaryDirectory() as tmp_dir: + self._caplog.clear() + with self._caplog.at_level(INFO, logger=get_logger().name): + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with patch( + "datasets.arrow_dataset.Pool", + new_callable=PickableMagicMock, + side_effect=datasets.arrow_dataset.Pool, + ) as mock_pool: + with dset.map(lambda x: {"foo": "bar"}, num_proc=2) as dset_test1: + dset_test1_data_files = list(dset_test1.cache_files) + self.assertEqual(mock_pool.call_count, 1) + with dset.map(lambda x: {"foo": "bar"}, num_proc=2) as dset_test2: + self.assertEqual(dset_test1_data_files, dset_test2.cache_files) + self.assertTrue( + (len(re.findall("Loading cached processed dataset", self._caplog.text)) == 1) + ^ in_memory + ) + self.assertEqual(mock_pool.call_count, 2 if in_memory else 1) + + with tempfile.TemporaryDirectory() as tmp_dir: + self._caplog.clear() + with self._caplog.at_level(INFO, logger=get_logger().name): + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map(lambda x: {"foo": "bar"}, num_proc=2) as dset_test1: + dset_test1_data_files = list(dset_test1.cache_files) + with dset.map(lambda x: {"foo": "bar"}, num_proc=2, load_from_cache_file=False) as dset_test2: + self.assertEqual(dset_test1_data_files, dset_test2.cache_files) + self.assertEqual(len(dset_test2.cache_files), (1 - int(in_memory)) * 2) + self.assertNotIn("Loading cached processed dataset", self._caplog.text) + + if not in_memory: + try: + self._caplog.clear() + with tempfile.TemporaryDirectory() as tmp_dir: + with self._caplog.at_level(INFO, logger=get_logger().name): + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + datasets.disable_caching() + with dset.map(lambda x: {"foo": "bar"}) as dset_test1: + with dset.map(lambda x: {"foo": "bar"}) as dset_test2: + self.assertNotEqual(dset_test1.cache_files, dset_test2.cache_files) + self.assertEqual(len(dset_test1.cache_files), 1) + self.assertEqual(len(dset_test2.cache_files), 1) + self.assertNotIn("Loading cached processed dataset", self._caplog.text) + # make sure the arrow files are going to be removed + self.assertIn("tmp", dset_test1.cache_files[0]["filename"]) + self.assertIn("tmp", dset_test2.cache_files[0]["filename"]) + finally: + datasets.enable_caching() + + def test_map_return_pa_table(self, in_memory): + def func_return_single_row_pa_table(x): + return pa.table({"id": [0], "text": ["a"]}) + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map(func_return_single_row_pa_table) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual( + dset_test.features, + Features({"id": Value("int64"), "text": Value("string")}), + ) + self.assertEqual(dset_test[0]["id"], 0) + self.assertEqual(dset_test[0]["text"], "a") + + # Batched + def func_return_single_row_pa_table_batched(x): + batch_size = len(x[next(iter(x))]) + return pa.table({"id": [0] * batch_size, "text": ["a"] * batch_size}) + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map(func_return_single_row_pa_table_batched, batched=True) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual( + dset_test.features, + Features({"id": Value("int64"), "text": Value("string")}), + ) + self.assertEqual(dset_test[0]["id"], 0) + self.assertEqual(dset_test[0]["text"], "a") + + # Error when returning a table with more than one row in the non-batched mode + def func_return_multi_row_pa_table(x): + return pa.table({"id": [0, 1], "text": ["a", "b"]}) + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + self.assertRaises(ValueError, dset.map, func_return_multi_row_pa_table) + + def test_map_return_pd_dataframe(self, in_memory): + def func_return_single_row_pd_dataframe(x): + return pd.DataFrame({"id": [0], "text": ["a"]}) + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map(func_return_single_row_pd_dataframe) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual( + dset_test.features, + Features({"id": Value("int64"), "text": Value("string")}), + ) + self.assertEqual(dset_test[0]["id"], 0) + self.assertEqual(dset_test[0]["text"], "a") + + # Batched + def func_return_single_row_pd_dataframe_batched(x): + batch_size = len(x[next(iter(x))]) + return pd.DataFrame({"id": [0] * batch_size, "text": ["a"] * batch_size}) + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map(func_return_single_row_pd_dataframe_batched, batched=True) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual( + dset_test.features, + Features({"id": Value("int64"), "text": Value("string")}), + ) + self.assertEqual(dset_test[0]["id"], 0) + self.assertEqual(dset_test[0]["text"], "a") + + # Error when returning a table with more than one row in the non-batched mode + def func_return_multi_row_pd_dataframe(x): + return pd.DataFrame({"id": [0, 1], "text": ["a", "b"]}) + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + self.assertRaises(ValueError, dset.map, func_return_multi_row_pd_dataframe) + + @require_torch + def test_map_torch(self, in_memory): + import torch + + def func(example): + return {"tensor": torch.tensor([1.0, 2, 3])} + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map(func) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual( + dset_test.features, + Features({"filename": Value("string"), "tensor": Sequence(Value("float32"))}), + ) + self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) + + @require_tf + def test_map_tf(self, in_memory): + import tensorflow as tf + + def func(example): + return {"tensor": tf.constant([1.0, 2, 3])} + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map(func) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual( + dset_test.features, + Features({"filename": Value("string"), "tensor": Sequence(Value("float32"))}), + ) + self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) + + @require_jax + def test_map_jax(self, in_memory): + import jax.numpy as jnp + + def func(example): + return {"tensor": jnp.asarray([1.0, 2, 3])} + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map(func) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual( + dset_test.features, + Features({"filename": Value("string"), "tensor": Sequence(Value("float32"))}), + ) + self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) + + def test_map_numpy(self, in_memory): + def func(example): + return {"tensor": np.array([1.0, 2, 3])} + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map(func) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual( + dset_test.features, + Features({"filename": Value("string"), "tensor": Sequence(Value("float64"))}), + ) + self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) + + @require_torch + def test_map_tensor_batched(self, in_memory): + import torch + + def func(batch): + return {"tensor": torch.tensor([[1.0, 2, 3]] * len(batch["filename"]))} + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map(func, batched=True) as dset_test: + self.assertEqual(len(dset_test), 30) + self.assertDictEqual( + dset_test.features, + Features({"filename": Value("string"), "tensor": Sequence(Value("float32"))}), + ) + self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3]) + + def test_map_input_columns(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + with dset.map(lambda col_1: {"label": col_1 % 2}, input_columns="col_1") as mapped_dset: + self.assertEqual(mapped_dset[0].keys(), {"col_1", "col_2", "col_3", "label"}) + self.assertEqual( + mapped_dset.features, + Features( + { + "col_1": Value("int64"), + "col_2": Value("string"), + "col_3": Value("bool"), + "label": Value("int64"), + } + ), + ) + + def test_map_remove_columns(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.map(lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True) as dset: + self.assertTrue("id" in dset[0]) + self.assertDictEqual( + dset.features, + Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}), + ) + assert_arrow_metadata_are_synced_with_dataset_features(dset) + with dset.map(lambda x: x, remove_columns=["id"]) as mapped_dset: + self.assertTrue("id" not in mapped_dset[0]) + self.assertDictEqual( + mapped_dset.features, Features({"filename": Value("string"), "name": Value("string")}) + ) + assert_arrow_metadata_are_synced_with_dataset_features(mapped_dset) + with mapped_dset.with_format("numpy", columns=mapped_dset.column_names) as mapped_dset: + with mapped_dset.map( + lambda x: {"name": 1}, remove_columns=mapped_dset.column_names + ) as mapped_dset: + self.assertTrue("filename" not in mapped_dset[0]) + self.assertTrue("name" in mapped_dset[0]) + self.assertDictEqual(mapped_dset.features, Features({"name": Value(dtype="int64")})) + assert_arrow_metadata_are_synced_with_dataset_features(mapped_dset) + # empty dataset + columns_names = dset.column_names + with dset.select([]) as empty_dset: + self.assertEqual(len(empty_dset), 0) + with empty_dset.map(lambda x: {}, remove_columns=columns_names[0]) as mapped_dset: + self.assertListEqual(columns_names[1:], mapped_dset.column_names) + assert_arrow_metadata_are_synced_with_dataset_features(mapped_dset) + + def test_map_stateful_callable(self, in_memory): + # be sure that the state of the map callable is unaffected + # before processing the dataset examples + + class ExampleCounter: + def __init__(self, batched=False): + self.batched = batched + # state + self.cnt = 0 + + def __call__(self, example): + if self.batched: + self.cnt += len(example) + else: + self.cnt += 1 + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + ex_cnt = ExampleCounter() + dset.map(ex_cnt) + self.assertEqual(ex_cnt.cnt, len(dset)) + + ex_cnt = ExampleCounter(batched=True) + dset.map(ex_cnt) + self.assertEqual(ex_cnt.cnt, len(dset)) + + @require_not_windows + def test_map_crash_subprocess(self, in_memory): + # be sure that a crash in one of the subprocess will not + # hang dataset.map() call forever + + def do_crash(row): + import os + + os.kill(os.getpid(), 9) + return row + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with pytest.raises(RuntimeError) as excinfo: + dset.map(do_crash, num_proc=2) + assert str(excinfo.value) == ( + "One of the subprocesses has abruptly died during map operation." + "To debug the error, disable multiprocessing." + ) + + def test_filter(self, in_memory): + # keep only first five examples + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + fingerprint = dset._fingerprint + with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five: + self.assertEqual(len(dset_filter_first_five), 5) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_filter_first_five.features, Features({"filename": Value("string")})) + self.assertNotEqual(dset_filter_first_five._fingerprint, fingerprint) + + # filter filenames with even id at the end + formatted + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + dset.set_format("numpy") + fingerprint = dset._fingerprint + with dset.filter(lambda x: (int(x["filename"][-1]) % 2 == 0)) as dset_filter_even_num: + self.assertEqual(len(dset_filter_even_num), 15) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_filter_even_num.features, Features({"filename": Value("string")})) + self.assertNotEqual(dset_filter_even_num._fingerprint, fingerprint) + self.assertEqual(dset_filter_even_num.format["type"], "numpy") + + def test_filter_with_indices_mapping(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + dset = Dataset.from_dict({"col": [0, 1, 2]}) + with self._to(in_memory, tmp_dir, dset) as dset: + with dset.filter(lambda x: x["col"] > 0) as dset: + self.assertListEqual(dset["col"], [1, 2]) + with dset.filter(lambda x: x["col"] < 2) as dset: + self.assertListEqual(dset["col"], [1]) + + def test_filter_empty(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + self.assertIsNone(dset._indices, None) + + tmp_file = os.path.join(tmp_dir, "test.arrow") + with dset.filter(lambda _: False, cache_file_name=tmp_file) as dset: + self.assertEqual(len(dset), 0) + self.assertIsNotNone(dset._indices, None) + + tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow") + with dset.filter(lambda _: False, cache_file_name=tmp_file_2) as dset2: + self.assertEqual(len(dset2), 0) + self.assertEqual(dset._indices, dset2._indices) + + def test_filter_batched(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + dset = Dataset.from_dict({"col": [0, 1, 2]}) + with self._to(in_memory, tmp_dir, dset) as dset: + with dset.filter(lambda x: [i > 0 for i in x["col"]], batched=True) as dset: + self.assertListEqual(dset["col"], [1, 2]) + with dset.filter(lambda x: [i < 2 for i in x["col"]], batched=True) as dset: + self.assertListEqual(dset["col"], [1]) + + def test_filter_input_columns(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + dset = Dataset.from_dict({"col_1": [0, 1, 2], "col_2": ["a", "b", "c"]}) + with self._to(in_memory, tmp_dir, dset) as dset: + with dset.filter(lambda x: x > 0, input_columns=["col_1"]) as filtered_dset: + self.assertListEqual(filtered_dset.column_names, dset.column_names) + self.assertListEqual(filtered_dset["col_1"], [1, 2]) + self.assertListEqual(filtered_dset["col_2"], ["b", "c"]) + + def test_filter_fn_kwargs(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict({"id": range(10)}) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + fn_kwargs = {"max_offset": 3} + with dset.filter( + lambda example, max_offset: example["id"] < max_offset, fn_kwargs=fn_kwargs + ) as filtered_dset: + assert len(filtered_dset) == 3 + with dset.filter( + lambda id, max_offset: id < max_offset, fn_kwargs=fn_kwargs, input_columns="id" + ) as filtered_dset: + assert len(filtered_dset) == 3 + with dset.filter( + lambda id, i, max_offset: i < max_offset, + fn_kwargs=fn_kwargs, + input_columns="id", + with_indices=True, + ) as filtered_dset: + assert len(filtered_dset) == 3 + + def test_filter_multiprocessing(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + fingerprint = dset._fingerprint + with dset.filter(picklable_filter_function, num_proc=2) as dset_filter_first_ten: + self.assertEqual(len(dset_filter_first_ten), 10) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_filter_first_ten.features, Features({"filename": Value("string")})) + self.assertEqual(len(dset_filter_first_ten.cache_files), 0 if in_memory else 2) + self.assertNotEqual(dset_filter_first_ten._fingerprint, fingerprint) + + def test_filter_caching(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + self._caplog.clear() + with self._caplog.at_level(INFO, logger=get_logger().name): + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five1: + dset_test1_data_files = list(dset_filter_first_five1.cache_files) + with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five2: + self.assertEqual(dset_test1_data_files, dset_filter_first_five2.cache_files) + self.assertEqual(len(dset_filter_first_five2.cache_files), 0 if in_memory else 2) + self.assertTrue(("Loading cached processed dataset" in self._caplog.text) ^ in_memory) + + def test_keep_features_after_transform_specified(self, in_memory): + features = Features( + {"tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"]))} + ) + + def invert_labels(x): + return {"labels": [(1 - label) for label in x["labels"]]} + + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict( + {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features + ) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + with dset.map(invert_labels, features=features) as inverted_dset: + self.assertEqual(inverted_dset.features.type, features.type) + self.assertDictEqual(inverted_dset.features, features) + assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset) + + def test_keep_features_after_transform_unspecified(self, in_memory): + features = Features( + {"tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"]))} + ) + + def invert_labels(x): + return {"labels": [(1 - label) for label in x["labels"]]} + + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict( + {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features + ) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + with dset.map(invert_labels) as inverted_dset: + self.assertEqual(inverted_dset.features.type, features.type) + self.assertDictEqual(inverted_dset.features, features) + assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset) + + def test_keep_features_after_transform_to_file(self, in_memory): + features = Features( + {"tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"]))} + ) + + def invert_labels(x): + return {"labels": [(1 - label) for label in x["labels"]]} + + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict( + {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features + ) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + tmp_file = os.path.join(tmp_dir, "test.arrow") + dset.map(invert_labels, cache_file_name=tmp_file) + with Dataset.from_file(tmp_file) as inverted_dset: + self.assertEqual(inverted_dset.features.type, features.type) + self.assertDictEqual(inverted_dset.features, features) + + def test_keep_features_after_transform_to_memory(self, in_memory): + features = Features( + {"tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"]))} + ) + + def invert_labels(x): + return {"labels": [(1 - label) for label in x["labels"]]} + + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict( + {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features + ) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + with dset.map(invert_labels, keep_in_memory=True) as inverted_dset: + self.assertEqual(inverted_dset.features.type, features.type) + self.assertDictEqual(inverted_dset.features, features) + + def test_keep_features_after_loading_from_cache(self, in_memory): + features = Features( + {"tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"]))} + ) + + def invert_labels(x): + return {"labels": [(1 - label) for label in x["labels"]]} + + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict( + {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features + ) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + tmp_file1 = os.path.join(tmp_dir, "test1.arrow") + tmp_file2 = os.path.join(tmp_dir, "test2.arrow") + # TODO: Why mapped twice? + inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file1) + inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file2) + self.assertGreater(len(inverted_dset.cache_files), 0) + self.assertEqual(inverted_dset.features.type, features.type) + self.assertDictEqual(inverted_dset.features, features) + del inverted_dset + + def test_keep_features_with_new_features(self, in_memory): + features = Features( + {"tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"]))} + ) + + def invert_labels(x): + return {"labels": [(1 - label) for label in x["labels"]], "labels2": x["labels"]} + + expected_features = Features( + { + "tokens": Sequence(Value("string")), + "labels": Sequence(ClassLabel(names=["negative", "positive"])), + "labels2": Sequence(Value("int64")), + } + ) + + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict( + {"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features + ) as dset: + with self._to(in_memory, tmp_dir, dset) as dset: + with dset.map(invert_labels) as inverted_dset: + self.assertEqual(inverted_dset.features.type, expected_features.type) + self.assertDictEqual(inverted_dset.features, expected_features) + assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset) + + def test_select(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + # select every two example + indices = list(range(0, len(dset), 2)) + tmp_file = os.path.join(tmp_dir, "test.arrow") + fingerprint = dset._fingerprint + with dset.select(indices, indices_cache_file_name=tmp_file) as dset_select_even: + self.assertIsNotNone(dset_select_even._indices) # an indices mapping is created + self.assertTrue(os.path.exists(tmp_file)) + self.assertEqual(len(dset_select_even), 15) + for row in dset_select_even: + self.assertEqual(int(row["filename"][-1]) % 2, 0) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_select_even.features, Features({"filename": Value("string")})) + self.assertNotEqual(dset_select_even._fingerprint, fingerprint) + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + indices = list(range(0, len(dset))) + with dset.select(indices) as dset_select_all: + # no indices mapping, since the indices are contiguous + # (in this case the arrow table is simply sliced, which is more efficient) + self.assertIsNone(dset_select_all._indices) + self.assertEqual(len(dset_select_all), len(dset)) + self.assertListEqual(list(dset_select_all), list(dset)) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_select_all.features, Features({"filename": Value("string")})) + self.assertNotEqual(dset_select_all._fingerprint, fingerprint) + indices = range(0, len(dset)) + with dset.select(indices) as dset_select_all: + # same but with range + self.assertIsNone(dset_select_all._indices) + self.assertEqual(len(dset_select_all), len(dset)) + self.assertListEqual(list(dset_select_all), list(dset)) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_select_all.features, Features({"filename": Value("string")})) + self.assertNotEqual(dset_select_all._fingerprint, fingerprint) + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + bad_indices = list(range(5)) + bad_indices[-1] = len(dset) + 10 # out of bounds + tmp_file = os.path.join(tmp_dir, "test.arrow") + self.assertRaises( + Exception, + dset.select, + indices=bad_indices, + indices_cache_file_name=tmp_file, + writer_batch_size=2, + ) + self.assertFalse(os.path.exists(tmp_file)) + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + indices = iter(range(len(dset))) # iterator of contiguous indices + with dset.select(indices) as dset_select_all: + # no indices mapping, since the indices are contiguous + self.assertIsNone(dset_select_all._indices) + self.assertEqual(len(dset_select_all), len(dset)) + indices = reversed(range(len(dset))) # iterator of not contiguous indices + tmp_file = os.path.join(tmp_dir, "test.arrow") + with dset.select(indices, indices_cache_file_name=tmp_file) as dset_select_all: + # new indices mapping, since the indices are not contiguous + self.assertIsNotNone(dset_select_all._indices) + self.assertEqual(len(dset_select_all), len(dset)) + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + bad_indices = list(range(5)) + bad_indices[3] = "foo" # wrong type + tmp_file = os.path.join(tmp_dir, "test.arrow") + self.assertRaises( + Exception, + dset.select, + indices=bad_indices, + indices_cache_file_name=tmp_file, + writer_batch_size=2, + ) + self.assertFalse(os.path.exists(tmp_file)) + dset.set_format("numpy") + with dset.select( + range(5), + indices_cache_file_name=tmp_file, + writer_batch_size=2, + ) as dset_select_five: + self.assertIsNone(dset_select_five._indices) + self.assertEqual(len(dset_select_five), 5) + self.assertEqual(dset_select_five.format["type"], "numpy") + for i, row in enumerate(dset_select_five): + self.assertEqual(int(row["filename"][-1]), i) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_select_five.features, Features({"filename": Value("string")})) + + def test_select_then_map(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.select([0]) as d1: + with d1.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d1: + self.assertEqual(d1[0]["id"], 0) + with dset.select([1]) as d2: + with d2.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d2: + self.assertEqual(d2[0]["id"], 1) + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + with dset.select([0], indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow")) as d1: + with d1.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d1: + self.assertEqual(d1[0]["id"], 0) + with dset.select([1], indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow")) as d2: + with d2.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d2: + self.assertEqual(d2[0]["id"], 1) + + def test_pickle_after_many_transforms_on_disk(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + self.assertEqual(len(dset.cache_files), 0 if in_memory else 1) + with dset.rename_column("filename", "file") as dset: + self.assertListEqual(dset.column_names, ["file"]) + with dset.select(range(5)) as dset: + self.assertEqual(len(dset), 5) + with dset.map(lambda x: {"id": int(x["file"][-1])}) as dset: + self.assertListEqual(sorted(dset.column_names), ["file", "id"]) + with dset.rename_column("id", "number") as dset: + self.assertListEqual(sorted(dset.column_names), ["file", "number"]) + with dset.select([1, 0]) as dset: + self.assertEqual(dset[0]["file"], "my_name-train_1") + self.assertEqual(dset[0]["number"], 1) + + self.assertEqual(dset._indices["indices"].to_pylist(), [1, 0]) + if not in_memory: + self.assertIn( + ("rename_columns", (["file", "number"],), {}), + dset._data.replays, + ) + if not in_memory: + dset._data.table = Unpicklable() # check that we don't pickle the entire table + + pickled = pickle.dumps(dset) + with pickle.loads(pickled) as loaded: + self.assertEqual(loaded[0]["file"], "my_name-train_1") + self.assertEqual(loaded[0]["number"], 1) + + def test_shuffle(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + tmp_file = os.path.join(tmp_dir, "test.arrow") + fingerprint = dset._fingerprint + + with dset.shuffle(seed=1234, keep_in_memory=True) as dset_shuffled: + self.assertEqual(len(dset_shuffled), 30) + self.assertEqual(dset_shuffled[0]["filename"], "my_name-train_28") + self.assertEqual(dset_shuffled[2]["filename"], "my_name-train_10") + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_shuffled.features, Features({"filename": Value("string")})) + self.assertNotEqual(dset_shuffled._fingerprint, fingerprint) + + with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset_shuffled: + self.assertEqual(len(dset_shuffled), 30) + self.assertEqual(dset_shuffled[0]["filename"], "my_name-train_28") + self.assertEqual(dset_shuffled[2]["filename"], "my_name-train_10") + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_shuffled.features, Features({"filename": Value("string")})) + self.assertNotEqual(dset_shuffled._fingerprint, fingerprint) + + # Reproducibility + tmp_file = os.path.join(tmp_dir, "test_2.arrow") + with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset_shuffled_2: + self.assertListEqual(dset_shuffled["filename"], dset_shuffled_2["filename"]) + + # Compatible with temp_seed + with temp_seed(42), dset.shuffle() as d1: + with temp_seed(42), dset.shuffle() as d2, dset.shuffle() as d3: + self.assertListEqual(d1["filename"], d2["filename"]) + self.assertEqual(d1._fingerprint, d2._fingerprint) + self.assertNotEqual(d3["filename"], d2["filename"]) + self.assertNotEqual(d3._fingerprint, d2._fingerprint) + + def test_sort(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + # Sort on a single key + with self._create_dummy_dataset(in_memory=in_memory, tmp_dir=tmp_dir) as dset: + # Keep only 10 examples + tmp_file = os.path.join(tmp_dir, "test.arrow") + with dset.select(range(10), indices_cache_file_name=tmp_file) as dset: + tmp_file = os.path.join(tmp_dir, "test_2.arrow") + with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset: + self.assertEqual(len(dset), 10) + self.assertEqual(dset[0]["filename"], "my_name-train_8") + self.assertEqual(dset[1]["filename"], "my_name-train_9") + # Sort + tmp_file = os.path.join(tmp_dir, "test_3.arrow") + fingerprint = dset._fingerprint + with dset.sort("filename", indices_cache_file_name=tmp_file) as dset_sorted: + for i, row in enumerate(dset_sorted): + self.assertEqual(int(row["filename"][-1]), i) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_sorted.features, Features({"filename": Value("string")})) + self.assertNotEqual(dset_sorted._fingerprint, fingerprint) + # Sort reversed + tmp_file = os.path.join(tmp_dir, "test_4.arrow") + fingerprint = dset._fingerprint + with dset.sort("filename", indices_cache_file_name=tmp_file, reverse=True) as dset_sorted: + for i, row in enumerate(dset_sorted): + self.assertEqual(int(row["filename"][-1]), len(dset_sorted) - 1 - i) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_sorted.features, Features({"filename": Value("string")})) + self.assertNotEqual(dset_sorted._fingerprint, fingerprint) + # formatted + dset.set_format("numpy") + with dset.sort("filename") as dset_sorted_formatted: + self.assertEqual(dset_sorted_formatted.format["type"], "numpy") + # Sort on multiple keys + with self._create_dummy_dataset(in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True) as dset: + tmp_file = os.path.join(tmp_dir, "test_5.arrow") + fingerprint = dset._fingerprint + # Throw error when reverse is a list of bools that does not match the length of column_names + with pytest.raises(ValueError): + dset.sort(["col_1", "col_2", "col_3"], reverse=[False]) + with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset: + # Sort + with dset.sort(["col_1", "col_2", "col_3"], reverse=[False, True, False]) as dset_sorted: + for i, row in enumerate(dset_sorted): + self.assertEqual(row["col_1"], i) + self.assertDictEqual( + dset.features, + Features( + { + "col_1": Value("int64"), + "col_2": Value("string"), + "col_3": Value("bool"), + } + ), + ) + self.assertDictEqual( + dset_sorted.features, + Features( + { + "col_1": Value("int64"), + "col_2": Value("string"), + "col_3": Value("bool"), + } + ), + ) + self.assertNotEqual(dset_sorted._fingerprint, fingerprint) + # Sort reversed + with dset.sort(["col_1", "col_2", "col_3"], reverse=[True, False, True]) as dset_sorted: + for i, row in enumerate(dset_sorted): + self.assertEqual(row["col_1"], len(dset_sorted) - 1 - i) + self.assertDictEqual( + dset.features, + Features( + { + "col_1": Value("int64"), + "col_2": Value("string"), + "col_3": Value("bool"), + } + ), + ) + self.assertDictEqual( + dset_sorted.features, + Features( + { + "col_1": Value("int64"), + "col_2": Value("string"), + "col_3": Value("bool"), + } + ), + ) + self.assertNotEqual(dset_sorted._fingerprint, fingerprint) + # formatted + dset.set_format("numpy") + with dset.sort( + ["col_1", "col_2", "col_3"], reverse=[False, True, False] + ) as dset_sorted_formatted: + self.assertEqual(dset_sorted_formatted.format["type"], "numpy") + + @require_tf + def test_export(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + # Export the data + tfrecord_path = os.path.join(tmp_dir, "test.tfrecord") + with dset.map( + lambda ex, i: { + "id": i, + "question": f"Question {i}", + "answers": {"text": [f"Answer {i}-0", f"Answer {i}-1"], "answer_start": [0, 1]}, + }, + with_indices=True, + remove_columns=["filename"], + ) as formatted_dset: + with formatted_dset.flatten() as formatted_dset: + formatted_dset.set_format("numpy") + formatted_dset.export(filename=tfrecord_path, format="tfrecord") + + # Import the data + import tensorflow as tf + + tf_dset = tf.data.TFRecordDataset([tfrecord_path]) + feature_description = { + "id": tf.io.FixedLenFeature([], tf.int64), + "question": tf.io.FixedLenFeature([], tf.string), + "answers.text": tf.io.VarLenFeature(tf.string), + "answers.answer_start": tf.io.VarLenFeature(tf.int64), + } + tf_parsed_dset = tf_dset.map( + lambda example_proto: tf.io.parse_single_example(example_proto, feature_description) + ) + # Test that keys match original dataset + for i, ex in enumerate(tf_parsed_dset): + self.assertEqual(ex.keys(), formatted_dset[i].keys()) + # Test for equal number of elements + self.assertEqual(i, len(formatted_dset) - 1) + + def test_to_csv(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + # File path argument + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + file_path = os.path.join(tmp_dir, "test_path.csv") + bytes_written = dset.to_csv(path_or_buf=file_path) + + self.assertTrue(os.path.isfile(file_path)) + self.assertEqual(bytes_written, os.path.getsize(file_path)) + csv_dset = pd.read_csv(file_path) + + self.assertEqual(csv_dset.shape, dset.shape) + self.assertListEqual(list(csv_dset.columns), list(dset.column_names)) + + # File buffer argument + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + file_path = os.path.join(tmp_dir, "test_buffer.csv") + with open(file_path, "wb+") as buffer: + bytes_written = dset.to_csv(path_or_buf=buffer) + + self.assertTrue(os.path.isfile(file_path)) + self.assertEqual(bytes_written, os.path.getsize(file_path)) + csv_dset = pd.read_csv(file_path) + + self.assertEqual(csv_dset.shape, dset.shape) + self.assertListEqual(list(csv_dset.columns), list(dset.column_names)) + + # After a select/shuffle transform + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + dset = dset.select(range(0, len(dset), 2)).shuffle() + file_path = os.path.join(tmp_dir, "test_path.csv") + bytes_written = dset.to_csv(path_or_buf=file_path) + + self.assertTrue(os.path.isfile(file_path)) + self.assertEqual(bytes_written, os.path.getsize(file_path)) + csv_dset = pd.read_csv(file_path) + + self.assertEqual(csv_dset.shape, dset.shape) + self.assertListEqual(list(csv_dset.columns), list(dset.column_names)) + + # With array features + with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: + file_path = os.path.join(tmp_dir, "test_path.csv") + bytes_written = dset.to_csv(path_or_buf=file_path) + + self.assertTrue(os.path.isfile(file_path)) + self.assertEqual(bytes_written, os.path.getsize(file_path)) + csv_dset = pd.read_csv(file_path) + + self.assertEqual(csv_dset.shape, dset.shape) + self.assertListEqual(list(csv_dset.columns), list(dset.column_names)) + + def test_to_dict(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + # Full + dset_to_dict = dset.to_dict() + self.assertIsInstance(dset_to_dict, dict) + self.assertListEqual(sorted(dset_to_dict.keys()), sorted(dset.column_names)) + + for col_name in dset.column_names: + self.assertLessEqual(len(dset_to_dict[col_name]), len(dset)) + + # With index mapping + with dset.select([1, 0, 3]) as dset: + dset_to_dict = dset.to_dict() + self.assertIsInstance(dset_to_dict, dict) + self.assertEqual(len(dset_to_dict), 3) + self.assertListEqual(sorted(dset_to_dict.keys()), sorted(dset.column_names)) + + for col_name in dset.column_names: + self.assertIsInstance(dset_to_dict[col_name], list) + self.assertEqual(len(dset_to_dict[col_name]), len(dset)) + + def test_to_list(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + dset_to_list = dset.to_list() + self.assertIsInstance(dset_to_list, list) + for row in dset_to_list: + self.assertIsInstance(row, dict) + self.assertListEqual(sorted(row.keys()), sorted(dset.column_names)) + + # With index mapping + with dset.select([1, 0, 3]) as dset: + dset_to_list = dset.to_list() + self.assertIsInstance(dset_to_list, list) + self.assertEqual(len(dset_to_list), 3) + for row in dset_to_list: + self.assertIsInstance(row, dict) + self.assertListEqual(sorted(row.keys()), sorted(dset.column_names)) + + def test_to_pandas(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + # Batched + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + batch_size = dset.num_rows - 1 + to_pandas_generator = dset.to_pandas(batched=True, batch_size=batch_size) + + for batch in to_pandas_generator: + self.assertIsInstance(batch, pd.DataFrame) + self.assertListEqual(sorted(batch.columns), sorted(dset.column_names)) + for col_name in dset.column_names: + self.assertLessEqual(len(batch[col_name]), batch_size) + + # Full + dset_to_pandas = dset.to_pandas() + self.assertIsInstance(dset_to_pandas, pd.DataFrame) + self.assertListEqual(sorted(dset_to_pandas.columns), sorted(dset.column_names)) + for col_name in dset.column_names: + self.assertEqual(len(dset_to_pandas[col_name]), len(dset)) + + # With index mapping + with dset.select([1, 0, 3]) as dset: + dset_to_pandas = dset.to_pandas() + self.assertIsInstance(dset_to_pandas, pd.DataFrame) + self.assertEqual(len(dset_to_pandas), 3) + self.assertListEqual(sorted(dset_to_pandas.columns), sorted(dset.column_names)) + + for col_name in dset.column_names: + self.assertEqual(len(dset_to_pandas[col_name]), dset.num_rows) + + def test_to_parquet(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + # File path argument + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + file_path = os.path.join(tmp_dir, "test_path.parquet") + dset.to_parquet(path_or_buf=file_path) + + self.assertTrue(os.path.isfile(file_path)) + # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match + parquet_dset = pd.read_parquet(file_path) + + self.assertEqual(parquet_dset.shape, dset.shape) + self.assertListEqual(list(parquet_dset.columns), list(dset.column_names)) + + # File buffer argument + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + file_path = os.path.join(tmp_dir, "test_buffer.parquet") + with open(file_path, "wb+") as buffer: + dset.to_parquet(path_or_buf=buffer) + + self.assertTrue(os.path.isfile(file_path)) + # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match + parquet_dset = pd.read_parquet(file_path) + + self.assertEqual(parquet_dset.shape, dset.shape) + self.assertListEqual(list(parquet_dset.columns), list(dset.column_names)) + + # After a select/shuffle transform + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + dset = dset.select(range(0, len(dset), 2)).shuffle() + file_path = os.path.join(tmp_dir, "test_path.parquet") + dset.to_parquet(path_or_buf=file_path) + + self.assertTrue(os.path.isfile(file_path)) + # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match + parquet_dset = pd.read_parquet(file_path) + + self.assertEqual(parquet_dset.shape, dset.shape) + self.assertListEqual(list(parquet_dset.columns), list(dset.column_names)) + + # With array features + with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: + file_path = os.path.join(tmp_dir, "test_path.parquet") + dset.to_parquet(path_or_buf=file_path) + + self.assertTrue(os.path.isfile(file_path)) + # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match + parquet_dset = pd.read_parquet(file_path) + + self.assertEqual(parquet_dset.shape, dset.shape) + self.assertListEqual(list(parquet_dset.columns), list(dset.column_names)) + + @require_sqlalchemy + def test_to_sql(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + # Destionation specified as database URI string + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + file_path = os.path.join(tmp_dir, "test_path.sqlite") + _ = dset.to_sql("data", "sqlite:///" + file_path) + + self.assertTrue(os.path.isfile(file_path)) + sql_dset = pd.read_sql("data", "sqlite:///" + file_path) + + self.assertEqual(sql_dset.shape, dset.shape) + self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) + + # Destionation specified as sqlite3 connection + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + import sqlite3 + + file_path = os.path.join(tmp_dir, "test_path.sqlite") + with contextlib.closing(sqlite3.connect(file_path)) as con: + _ = dset.to_sql("data", con, if_exists="replace") + + self.assertTrue(os.path.isfile(file_path)) + sql_dset = pd.read_sql("data", "sqlite:///" + file_path) + + self.assertEqual(sql_dset.shape, dset.shape) + self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) + + # Test writing to a database in chunks + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + file_path = os.path.join(tmp_dir, "test_path.sqlite") + _ = dset.to_sql("data", "sqlite:///" + file_path, batch_size=1, if_exists="replace") + + self.assertTrue(os.path.isfile(file_path)) + sql_dset = pd.read_sql("data", "sqlite:///" + file_path) + + self.assertEqual(sql_dset.shape, dset.shape) + self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) + + # After a select/shuffle transform + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + dset = dset.select(range(0, len(dset), 2)).shuffle() + file_path = os.path.join(tmp_dir, "test_path.sqlite") + _ = dset.to_sql("data", "sqlite:///" + file_path, if_exists="replace") + + self.assertTrue(os.path.isfile(file_path)) + sql_dset = pd.read_sql("data", "sqlite:///" + file_path) + + self.assertEqual(sql_dset.shape, dset.shape) + self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) + + # With array features + with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset: + file_path = os.path.join(tmp_dir, "test_path.sqlite") + _ = dset.to_sql("data", "sqlite:///" + file_path, if_exists="replace") + + self.assertTrue(os.path.isfile(file_path)) + sql_dset = pd.read_sql("data", "sqlite:///" + file_path) + + self.assertEqual(sql_dset.shape, dset.shape) + self.assertListEqual(list(sql_dset.columns), list(dset.column_names)) + + def test_train_test_split(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + fingerprint = dset._fingerprint + dset_dict = dset.train_test_split(test_size=10, shuffle=False) + self.assertListEqual(list(dset_dict.keys()), ["train", "test"]) + dset_train = dset_dict["train"] + dset_test = dset_dict["test"] + + self.assertEqual(len(dset_train), 20) + self.assertEqual(len(dset_test), 10) + self.assertEqual(dset_train[0]["filename"], "my_name-train_0") + self.assertEqual(dset_train[-1]["filename"], "my_name-train_19") + self.assertEqual(dset_test[0]["filename"], "my_name-train_20") + self.assertEqual(dset_test[-1]["filename"], "my_name-train_29") + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_train.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_test.features, Features({"filename": Value("string")})) + self.assertNotEqual(dset_train._fingerprint, fingerprint) + self.assertNotEqual(dset_test._fingerprint, fingerprint) + self.assertNotEqual(dset_train._fingerprint, dset_test._fingerprint) + + dset_dict = dset.train_test_split(test_size=0.5, shuffle=False) + self.assertListEqual(list(dset_dict.keys()), ["train", "test"]) + dset_train = dset_dict["train"] + dset_test = dset_dict["test"] + + self.assertEqual(len(dset_train), 15) + self.assertEqual(len(dset_test), 15) + self.assertEqual(dset_train[0]["filename"], "my_name-train_0") + self.assertEqual(dset_train[-1]["filename"], "my_name-train_14") + self.assertEqual(dset_test[0]["filename"], "my_name-train_15") + self.assertEqual(dset_test[-1]["filename"], "my_name-train_29") + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_train.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_test.features, Features({"filename": Value("string")})) + + dset_dict = dset.train_test_split(train_size=10, shuffle=False) + self.assertListEqual(list(dset_dict.keys()), ["train", "test"]) + dset_train = dset_dict["train"] + dset_test = dset_dict["test"] + + self.assertEqual(len(dset_train), 10) + self.assertEqual(len(dset_test), 20) + self.assertEqual(dset_train[0]["filename"], "my_name-train_0") + self.assertEqual(dset_train[-1]["filename"], "my_name-train_9") + self.assertEqual(dset_test[0]["filename"], "my_name-train_10") + self.assertEqual(dset_test[-1]["filename"], "my_name-train_29") + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_train.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_test.features, Features({"filename": Value("string")})) + + dset.set_format("numpy") + dset_dict = dset.train_test_split(train_size=10, seed=42) + self.assertListEqual(list(dset_dict.keys()), ["train", "test"]) + dset_train = dset_dict["train"] + dset_test = dset_dict["test"] + + self.assertEqual(len(dset_train), 10) + self.assertEqual(len(dset_test), 20) + self.assertEqual(dset_train.format["type"], "numpy") + self.assertEqual(dset_test.format["type"], "numpy") + self.assertNotEqual(dset_train[0]["filename"].item(), "my_name-train_0") + self.assertNotEqual(dset_train[-1]["filename"].item(), "my_name-train_9") + self.assertNotEqual(dset_test[0]["filename"].item(), "my_name-train_10") + self.assertNotEqual(dset_test[-1]["filename"].item(), "my_name-train_29") + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_train.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_test.features, Features({"filename": Value("string")})) + del dset_test, dset_train, dset_dict # DatasetDict + + def test_shard(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset(in_memory, tmp_dir) as dset: + tmp_file = os.path.join(tmp_dir, "test.arrow") + with dset.select(range(10), indices_cache_file_name=tmp_file) as dset: + self.assertEqual(len(dset), 10) + # Shard + tmp_file_1 = os.path.join(tmp_dir, "test_1.arrow") + fingerprint = dset._fingerprint + with dset.shard(num_shards=8, index=1, indices_cache_file_name=tmp_file_1) as dset_sharded: + self.assertEqual(2, len(dset_sharded)) + self.assertEqual(["my_name-train_1", "my_name-train_9"], dset_sharded["filename"]) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_sharded.features, Features({"filename": Value("string")})) + self.assertNotEqual(dset_sharded._fingerprint, fingerprint) + # Shard contiguous + tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow") + with dset.shard( + num_shards=3, index=0, contiguous=True, indices_cache_file_name=tmp_file_2 + ) as dset_sharded_contiguous: + self.assertEqual([f"my_name-train_{i}" for i in (0, 1, 2, 3)], dset_sharded_contiguous["filename"]) + self.assertDictEqual(dset.features, Features({"filename": Value("string")})) + self.assertDictEqual(dset_sharded_contiguous.features, Features({"filename": Value("string")})) + # Test lengths of sharded contiguous + self.assertEqual( + [4, 3, 3], + [ + len(dset.shard(3, index=i, contiguous=True, indices_cache_file_name=tmp_file_2 + str(i))) + for i in range(3) + ], + ) + # formatted + dset.set_format("numpy") + with dset.shard(num_shards=3, index=0) as dset_sharded_formatted: + self.assertEqual(dset_sharded_formatted.format["type"], "numpy") + + def test_flatten_indices(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + self.assertIsNone(dset._indices) + + tmp_file = os.path.join(tmp_dir, "test.arrow") + with dset.select(range(0, 10, 2), indices_cache_file_name=tmp_file) as dset: + self.assertEqual(len(dset), 5) + + self.assertIsNotNone(dset._indices) + + tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow") + fingerprint = dset._fingerprint + dset.set_format("numpy") + with dset.flatten_indices(cache_file_name=tmp_file_2) as dset: + self.assertEqual(len(dset), 5) + self.assertEqual(len(dset.data), len(dset)) + self.assertIsNone(dset._indices) + self.assertNotEqual(dset._fingerprint, fingerprint) + self.assertEqual(dset.format["type"], "numpy") + # Test unique works + dset.unique(dset.column_names[0]) + assert_arrow_metadata_are_synced_with_dataset_features(dset) + + # Empty indices mapping + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir) as dset: + self.assertIsNone(dset._indices, None) + + tmp_file = os.path.join(tmp_dir, "test.arrow") + with dset.filter(lambda _: False, cache_file_name=tmp_file) as dset: + self.assertEqual(len(dset), 0) + + self.assertIsNotNone(dset._indices, None) + + tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow") + fingerprint = dset._fingerprint + dset.set_format("numpy") + with dset.flatten_indices(cache_file_name=tmp_file_2) as dset: + self.assertEqual(len(dset), 0) + self.assertEqual(len(dset.data), len(dset)) + self.assertIsNone(dset._indices, None) + self.assertNotEqual(dset._fingerprint, fingerprint) + self.assertEqual(dset.format["type"], "numpy") + # Test unique works + dset.unique(dset.column_names[0]) + assert_arrow_metadata_are_synced_with_dataset_features(dset) + + @require_tf + @require_torch + def test_format_vectors(self, in_memory): + import numpy as np + import tensorflow as tf + import torch + + with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset( + in_memory, tmp_dir + ) as dset, dset.map(lambda ex, i: {"vec": np.ones(3) * i}, with_indices=True) as dset: + columns = dset.column_names + + self.assertIsNotNone(dset[0]) + self.assertIsNotNone(dset[:2]) + for col in columns: + self.assertIsInstance(dset[0][col], (str, list)) + self.assertIsInstance(dset[:2][col], list) + self.assertDictEqual( + dset.features, Features({"filename": Value("string"), "vec": Sequence(Value("float64"))}) + ) + + dset.set_format("tensorflow") + self.assertIsNotNone(dset[0]) + self.assertIsNotNone(dset[:2]) + for col in columns: + self.assertIsInstance(dset[0][col], (tf.Tensor, tf.RaggedTensor)) + self.assertIsInstance(dset[:2][col], (tf.Tensor, tf.RaggedTensor)) + self.assertIsInstance(dset[col], (tf.Tensor, tf.RaggedTensor)) + self.assertTupleEqual(tuple(dset[:2]["vec"].shape), (2, 3)) + self.assertTupleEqual(tuple(dset["vec"][:2].shape), (2, 3)) + + dset.set_format("numpy") + self.assertIsNotNone(dset[0]) + self.assertIsNotNone(dset[:2]) + self.assertIsInstance(dset[0]["filename"], np.str_) + self.assertIsInstance(dset[:2]["filename"], np.ndarray) + self.assertIsInstance(dset["filename"], np.ndarray) + self.assertIsInstance(dset[0]["vec"], np.ndarray) + self.assertIsInstance(dset[:2]["vec"], np.ndarray) + self.assertIsInstance(dset["vec"], np.ndarray) + self.assertTupleEqual(dset[:2]["vec"].shape, (2, 3)) + self.assertTupleEqual(dset["vec"][:2].shape, (2, 3)) + + dset.set_format("torch", columns=["vec"]) + self.assertIsNotNone(dset[0]) + self.assertIsNotNone(dset[:2]) + # torch.Tensor is only for numerical columns + self.assertIsInstance(dset[0]["vec"], torch.Tensor) + self.assertIsInstance(dset[:2]["vec"], torch.Tensor) + self.assertIsInstance(dset["vec"][:2], torch.Tensor) + self.assertTupleEqual(dset[:2]["vec"].shape, (2, 3)) + self.assertTupleEqual(dset["vec"][:2].shape, (2, 3)) + + @require_tf + @require_torch + def test_format_ragged_vectors(self, in_memory): + import numpy as np + import tensorflow as tf + import torch + + with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset( + in_memory, tmp_dir + ) as dset, dset.map(lambda ex, i: {"vec": np.ones(3 + i) * i}, with_indices=True) as dset: + columns = dset.column_names + + self.assertIsNotNone(dset[0]) + self.assertIsNotNone(dset[:2]) + for col in columns: + self.assertIsInstance(dset[0][col], (str, list)) + self.assertIsInstance(dset[:2][col], list) + self.assertDictEqual( + dset.features, Features({"filename": Value("string"), "vec": Sequence(Value("float64"))}) + ) + + dset.set_format("tensorflow") + self.assertIsNotNone(dset[0]) + self.assertIsNotNone(dset[:2]) + for col in columns: + self.assertIsInstance(dset[0][col], tf.Tensor) + self.assertIsInstance(dset[:2][col], tf.RaggedTensor if col == "vec" else tf.Tensor) + self.assertIsInstance(dset[col], tf.RaggedTensor if col == "vec" else tf.Tensor) + # dim is None for ragged vectors in tensorflow + self.assertListEqual(dset[:2]["vec"].shape.as_list(), [2, None]) + self.assertListEqual(dset["vec"][:2].shape.as_list(), [2, None]) + + dset.set_format("numpy") + self.assertIsNotNone(dset[0]) + self.assertIsNotNone(dset[:2]) + self.assertIsInstance(dset[0]["filename"], np.str_) + self.assertIsInstance(dset[:2]["filename"], np.ndarray) + self.assertIsInstance(dset["filename"], np.ndarray) + self.assertIsInstance(dset[0]["vec"], np.ndarray) + self.assertIsInstance(dset[:2]["vec"], np.ndarray) + self.assertIsInstance(dset["vec"], np.ndarray) + # array is flat for ragged vectors in numpy + self.assertTupleEqual(dset[:2]["vec"].shape, (2,)) + self.assertTupleEqual(dset["vec"][:2].shape, (2,)) + + dset.set_format("torch") + self.assertIsNotNone(dset[0]) + self.assertIsNotNone(dset[:2]) + self.assertIsInstance(dset[0]["filename"], str) + self.assertIsInstance(dset[:2]["filename"], list) + self.assertIsInstance(dset["filename"], list) + self.assertIsInstance(dset[0]["vec"], torch.Tensor) + self.assertIsInstance(dset[:2]["vec"][0], torch.Tensor) + self.assertIsInstance(dset["vec"][0], torch.Tensor) + # pytorch doesn't support ragged tensors, so we should have lists + self.assertIsInstance(dset[:2]["vec"], list) + self.assertIsInstance(dset[:2]["vec"][0], torch.Tensor) + self.assertIsInstance(dset["vec"][:2], list) + self.assertIsInstance(dset["vec"][0], torch.Tensor) + + @require_tf + @require_torch + def test_format_nested(self, in_memory): + import numpy as np + import tensorflow as tf + import torch + + with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset( + in_memory, tmp_dir + ) as dset, dset.map(lambda ex: {"nested": [{"foo": np.ones(3)}] * len(ex["filename"])}, batched=True) as dset: + self.assertDictEqual( + dset.features, Features({"filename": Value("string"), "nested": {"foo": Sequence(Value("float64"))}}) + ) + + dset.set_format("tensorflow") + self.assertIsNotNone(dset[0]) + self.assertIsInstance(dset[0]["nested"]["foo"], (tf.Tensor, tf.RaggedTensor)) + self.assertIsNotNone(dset[:2]) + self.assertIsInstance(dset[:2]["nested"][0]["foo"], (tf.Tensor, tf.RaggedTensor)) + self.assertIsInstance(dset["nested"][0]["foo"], (tf.Tensor, tf.RaggedTensor)) + + dset.set_format("numpy") + self.assertIsNotNone(dset[0]) + self.assertIsInstance(dset[0]["nested"]["foo"], np.ndarray) + self.assertIsNotNone(dset[:2]) + self.assertIsInstance(dset[:2]["nested"][0]["foo"], np.ndarray) + self.assertIsInstance(dset["nested"][0]["foo"], np.ndarray) + + dset.set_format("torch", columns="nested") + self.assertIsNotNone(dset[0]) + self.assertIsInstance(dset[0]["nested"]["foo"], torch.Tensor) + self.assertIsNotNone(dset[:2]) + self.assertIsInstance(dset[:2]["nested"][0]["foo"], torch.Tensor) + self.assertIsInstance(dset["nested"][0]["foo"], torch.Tensor) + + def test_format_pandas(self, in_memory): + import pandas as pd + + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + dset.set_format("pandas") + self.assertIsInstance(dset[0], pd.DataFrame) + self.assertIsInstance(dset[:2], pd.DataFrame) + self.assertIsInstance(dset["col_1"], pd.Series) + + def test_transmit_format_single(self, in_memory): + @transmit_format + def my_single_transform(self, return_factory, *args, **kwargs): + return return_factory() + + with tempfile.TemporaryDirectory() as tmp_dir: + return_factory = partial( + self._create_dummy_dataset, in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True + ) + with return_factory() as dset: + dset.set_format("numpy", columns=["col_1"]) + prev_format = dset.format + with my_single_transform(dset, return_factory) as transformed_dset: + self.assertDictEqual(transformed_dset.format, prev_format) + + def test_transmit_format_dict(self, in_memory): + @transmit_format + def my_split_transform(self, return_factory, *args, **kwargs): + return DatasetDict({"train": return_factory()}) + + with tempfile.TemporaryDirectory() as tmp_dir: + return_factory = partial( + self._create_dummy_dataset, in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True + ) + with return_factory() as dset: + dset.set_format("numpy", columns=["col_1"]) + prev_format = dset.format + transformed_dset = my_split_transform(dset, return_factory)["train"] + self.assertDictEqual(transformed_dset.format, prev_format) + + del transformed_dset # DatasetDict + + def test_with_format(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + with dset.with_format("numpy", columns=["col_1"]) as dset2: + dset.set_format("numpy", columns=["col_1"]) + self.assertDictEqual(dset.format, dset2.format) + self.assertEqual(dset._fingerprint, dset2._fingerprint) + # dset.reset_format() + # self.assertNotEqual(dset.format, dset2.format) + # self.assertNotEqual(dset._fingerprint, dset2._fingerprint) + + def test_with_transform(self, in_memory): + with tempfile.TemporaryDirectory() as tmp_dir: + with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset: + transform = lambda x: {"foo": x["col_1"]} # noqa: E731 + with dset.with_transform(transform, columns=["col_1"]) as dset2: + dset.set_transform(transform, columns=["col_1"]) + self.assertDictEqual(dset.format, dset2.format) + self.assertEqual(dset._fingerprint, dset2._fingerprint) + dset.reset_format() + self.assertNotEqual(dset.format, dset2.format) + self.assertNotEqual(dset._fingerprint, dset2._fingerprint) + + @require_tf + def test_tf_dataset_conversion(self, in_memory): + tmp_dir = tempfile.TemporaryDirectory() + for num_workers in [0, 1, 2]: + if num_workers > 0 and sys.platform == "win32" and not in_memory: + continue # This test hangs on the Py3.10 test worker, but it runs fine locally on my Windows machine + with self._create_dummy_dataset(in_memory, tmp_dir.name, array_features=True) as dset: + tf_dataset = dset.to_tf_dataset(columns="col_3", batch_size=2, num_workers=num_workers) + batch = next(iter(tf_dataset)) + self.assertEqual(batch.shape.as_list(), [2, 4]) + self.assertEqual(batch.dtype.name, "int64") + with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: + tf_dataset = dset.to_tf_dataset(columns="col_1", batch_size=2, num_workers=num_workers) + batch = next(iter(tf_dataset)) + self.assertEqual(batch.shape.as_list(), [2]) + self.assertEqual(batch.dtype.name, "int64") + with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: + # Check that it works with all default options (except batch_size because the dummy dataset only has 4) + tf_dataset = dset.to_tf_dataset(batch_size=2, num_workers=num_workers) + batch = next(iter(tf_dataset)) + self.assertEqual(batch["col_1"].shape.as_list(), [2]) + self.assertEqual(batch["col_2"].shape.as_list(), [2]) + self.assertEqual(batch["col_1"].dtype.name, "int64") + self.assertEqual(batch["col_2"].dtype.name, "string") # Assert that we're converting strings properly + with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: + # Check that when we use a transform that creates a new column from existing column values + # but don't load the old columns that the new column depends on in the final dataset, + # that they're still kept around long enough to be used in the transform + transform_dset = dset.with_transform( + lambda x: {"new_col": [val * 2 for val in x["col_1"]], "col_1": x["col_1"]} + ) + tf_dataset = transform_dset.to_tf_dataset(columns="new_col", batch_size=2, num_workers=num_workers) + batch = next(iter(tf_dataset)) + self.assertEqual(batch.shape.as_list(), [2]) + self.assertEqual(batch.dtype.name, "int64") + del transform_dset + del tf_dataset # For correct cleanup + + @require_tf + def test_tf_index_reshuffling(self, in_memory): + # This test checks that when we do two epochs over a tf.data.Dataset from to_tf_dataset + # that we get a different shuffle order each time + # It also checks that when we aren't shuffling, that the dataset order is fully preserved + # even when loading is split across multiple workers + data = {"col_1": list(range(20))} + for num_workers in [0, 1, 2, 3]: + with Dataset.from_dict(data) as dset: + tf_dataset = dset.to_tf_dataset(batch_size=10, shuffle=True, num_workers=num_workers) + indices = [] + for batch in tf_dataset: + indices.append(batch["col_1"]) + indices = np.concatenate([arr.numpy() for arr in indices]) + second_indices = [] + for batch in tf_dataset: + second_indices.append(batch["col_1"]) + second_indices = np.concatenate([arr.numpy() for arr in second_indices]) + self.assertFalse(np.array_equal(indices, second_indices)) + self.assertEqual(len(indices), len(np.unique(indices))) + self.assertEqual(len(second_indices), len(np.unique(second_indices))) + + tf_dataset = dset.to_tf_dataset(batch_size=1, shuffle=False, num_workers=num_workers) + for i, batch in enumerate(tf_dataset): + # Assert that the unshuffled order is fully preserved even when multiprocessing + self.assertEqual(i, batch["col_1"].numpy()) + + @require_tf + def test_tf_label_renaming(self, in_memory): + # Protect TF-specific imports in here + import tensorflow as tf + + from datasets.utils.tf_utils import minimal_tf_collate_fn_with_renaming + + tmp_dir = tempfile.TemporaryDirectory() + with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: + with dset.rename_columns({"col_1": "features", "col_2": "label"}) as new_dset: + tf_dataset = new_dset.to_tf_dataset(collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4) + batch = next(iter(tf_dataset)) + self.assertTrue("labels" in batch and "features" in batch) + + tf_dataset = new_dset.to_tf_dataset( + columns=["features", "labels"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4 + ) + batch = next(iter(tf_dataset)) + self.assertTrue("labels" in batch and "features" in batch) + + tf_dataset = new_dset.to_tf_dataset( + columns=["features", "label"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4 + ) + batch = next(iter(tf_dataset)) + self.assertTrue("labels" in batch and "features" in batch) # Assert renaming was handled correctly + + tf_dataset = new_dset.to_tf_dataset( + columns=["features"], + label_cols=["labels"], + collate_fn=minimal_tf_collate_fn_with_renaming, + batch_size=4, + ) + batch = next(iter(tf_dataset)) + self.assertEqual(len(batch), 2) + # Assert that we don't have any empty entries here + self.assertTrue(isinstance(batch[0], tf.Tensor) and isinstance(batch[1], tf.Tensor)) + + tf_dataset = new_dset.to_tf_dataset( + columns=["features"], + label_cols=["label"], + collate_fn=minimal_tf_collate_fn_with_renaming, + batch_size=4, + ) + batch = next(iter(tf_dataset)) + self.assertEqual(len(batch), 2) + # Assert that we don't have any empty entries here + self.assertTrue(isinstance(batch[0], tf.Tensor) and isinstance(batch[1], tf.Tensor)) + + tf_dataset = new_dset.to_tf_dataset( + columns=["features"], + collate_fn=minimal_tf_collate_fn_with_renaming, + batch_size=4, + ) + batch = next(iter(tf_dataset)) + # Assert that labels didn't creep in when we don't ask for them + # just because the collate_fn added them + self.assertTrue(isinstance(batch, tf.Tensor)) + + del tf_dataset # For correct cleanup + + @require_tf + def test_tf_dataset_options(self, in_memory): + tmp_dir = tempfile.TemporaryDirectory() + # Test that batch_size option works as expected + with self._create_dummy_dataset(in_memory, tmp_dir.name, array_features=True) as dset: + tf_dataset = dset.to_tf_dataset(columns="col_3", batch_size=2) + batch = next(iter(tf_dataset)) + self.assertEqual(batch.shape.as_list(), [2, 4]) + self.assertEqual(batch.dtype.name, "int64") + # Test that batch_size=None (optional) works as expected + with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: + tf_dataset = dset.to_tf_dataset(columns="col_3", batch_size=None) + single_example = next(iter(tf_dataset)) + self.assertEqual(single_example.shape.as_list(), []) + self.assertEqual(single_example.dtype.name, "int64") + # Assert that we can batch it with `tf.data.Dataset.batch` method + batched_dataset = tf_dataset.batch(batch_size=2) + batch = next(iter(batched_dataset)) + self.assertEqual(batch.shape.as_list(), [2]) + self.assertEqual(batch.dtype.name, "int64") + # Test that batching a batch_size=None dataset produces the same results as using batch_size arg + with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: + batch_size = 2 + tf_dataset_no_batch = dset.to_tf_dataset(columns="col_3") + tf_dataset_batch = dset.to_tf_dataset(columns="col_3", batch_size=batch_size) + self.assertEqual(tf_dataset_no_batch.element_spec, tf_dataset_batch.unbatch().element_spec) + self.assertEqual(tf_dataset_no_batch.cardinality(), tf_dataset_batch.cardinality() * batch_size) + for batch_1, batch_2 in zip(tf_dataset_no_batch.batch(batch_size=batch_size), tf_dataset_batch): + self.assertEqual(batch_1.shape, batch_2.shape) + self.assertEqual(batch_1.dtype, batch_2.dtype) + self.assertListEqual(batch_1.numpy().tolist(), batch_2.numpy().tolist()) + # Test that requesting label_cols works as expected + with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: + tf_dataset = dset.to_tf_dataset(columns="col_1", label_cols=["col_2", "col_3"], batch_size=4) + batch = next(iter(tf_dataset)) + self.assertEqual(len(batch), 2) + self.assertEqual(set(batch[1].keys()), {"col_2", "col_3"}) + self.assertEqual(batch[0].dtype.name, "int64") + # Assert data comes out as expected and isn't shuffled + self.assertEqual(batch[0].numpy().tolist(), [3, 2, 1, 0]) + self.assertEqual(batch[1]["col_2"].numpy().tolist(), [b"a", b"b", b"c", b"d"]) + self.assertEqual(batch[1]["col_3"].numpy().tolist(), [0, 1, 0, 1]) + # Check that incomplete batches are dropped if requested + with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: + tf_dataset = dset.to_tf_dataset(columns="col_1", batch_size=3) + tf_dataset_with_drop = dset.to_tf_dataset(columns="col_1", batch_size=3, drop_remainder=True) + self.assertEqual(len(tf_dataset), 2) # One batch of 3 and one batch of 1 + self.assertEqual(len(tf_dataset_with_drop), 1) # Incomplete batch of 1 is dropped + # Test that `NotImplementedError` is raised `batch_size` is None and `num_workers` is > 0 + if sys.version_info >= (3, 8): + with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset: + with self.assertRaisesRegex( + NotImplementedError, "`batch_size` must be specified when using multiple workers" + ): + dset.to_tf_dataset(columns="col_1", batch_size=None, num_workers=2) + del tf_dataset # For correct cleanup + del tf_dataset_with_drop + + +class MiscellaneousDatasetTest(TestCase): + def test_from_pandas(self): + data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} + df = pd.DataFrame.from_dict(data) + with Dataset.from_pandas(df) as dset: + self.assertListEqual(dset["col_1"], data["col_1"]) + self.assertListEqual(dset["col_2"], data["col_2"]) + self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) + self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")})) + + features = Features({"col_1": Value("int64"), "col_2": Value("string")}) + with Dataset.from_pandas(df, features=features) as dset: + self.assertListEqual(dset["col_1"], data["col_1"]) + self.assertListEqual(dset["col_2"], data["col_2"]) + self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) + self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")})) + + features = Features({"col_1": Value("int64"), "col_2": Value("string")}) + with Dataset.from_pandas(df, features=features, info=DatasetInfo(features=features)) as dset: + self.assertListEqual(dset["col_1"], data["col_1"]) + self.assertListEqual(dset["col_2"], data["col_2"]) + self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"]) + self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")})) + + features = Features({"col_1": Sequence(Value("string")), "col_2": Value("string")}) + self.assertRaises(TypeError, Dataset.from_pandas, df, features=features) + + def test_from_dict(self): + data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"], "col_3": pa.array([True, False, True, False])} + with Dataset.from_dict(data) as dset: + self.assertListEqual(dset["col_1"], data["col_1"]) + self.assertListEqual(dset["col_2"], data["col_2"]) + self.assertListEqual(dset["col_3"], data["col_3"].to_pylist()) + self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"]) + self.assertDictEqual( + dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) + ) + + features = Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) + with Dataset.from_dict(data, features=features) as dset: + self.assertListEqual(dset["col_1"], data["col_1"]) + self.assertListEqual(dset["col_2"], data["col_2"]) + self.assertListEqual(dset["col_3"], data["col_3"].to_pylist()) + self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"]) + self.assertDictEqual( + dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) + ) + + features = Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) + with Dataset.from_dict(data, features=features, info=DatasetInfo(features=features)) as dset: + self.assertListEqual(dset["col_1"], data["col_1"]) + self.assertListEqual(dset["col_2"], data["col_2"]) + self.assertListEqual(dset["col_3"], data["col_3"].to_pylist()) + self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"]) + self.assertDictEqual( + dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}) + ) + + features = Features({"col_1": Value("string"), "col_2": Value("string"), "col_3": Value("int32")}) + with Dataset.from_dict(data, features=features) as dset: + # the integers are converted to strings + self.assertListEqual(dset["col_1"], [str(x) for x in data["col_1"]]) + self.assertListEqual(dset["col_2"], data["col_2"]) + self.assertListEqual(dset["col_3"], [int(x) for x in data["col_3"].to_pylist()]) + self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"]) + self.assertDictEqual( + dset.features, Features({"col_1": Value("string"), "col_2": Value("string"), "col_3": Value("int32")}) + ) + + features = Features({"col_1": Value("int64"), "col_2": Value("int64"), "col_3": Value("bool")}) + self.assertRaises(ValueError, Dataset.from_dict, data, features=features) + + def test_concatenate_mixed_memory_and_disk(self): + data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]} + info1 = DatasetInfo(description="Dataset1") + info2 = DatasetInfo(description="Dataset2") + with tempfile.TemporaryDirectory() as tmp_dir: + with Dataset.from_dict(data1, info=info1).map( + cache_file_name=os.path.join(tmp_dir, "d1.arrow") + ) as dset1, Dataset.from_dict(data2, info=info2).map( + cache_file_name=os.path.join(tmp_dir, "d2.arrow") + ) as dset2, Dataset.from_dict(data3) as dset3: + with concatenate_datasets([dset1, dset2, dset3]) as concatenated_dset: + self.assertEqual(len(concatenated_dset), len(dset1) + len(dset2) + len(dset3)) + self.assertListEqual(concatenated_dset["id"], dset1["id"] + dset2["id"] + dset3["id"]) + + @require_transformers + @pytest.mark.integration + def test_set_format_encode(self): + from transformers import BertTokenizer + + tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") + + def encode(batch): + return tokenizer(batch["text"], padding="longest", return_tensors="np") + + with Dataset.from_dict({"text": ["hello there", "foo"]}) as dset: + dset.set_transform(transform=encode) + self.assertEqual(str(dset[:2]), str(encode({"text": ["hello there", "foo"]}))) + + @require_tf + def test_tf_string_encoding(self): + data = {"col_1": ["á", "é", "í", "ó", "ú"], "col_2": ["à", "è", "ì", "ò", "ù"]} + with Dataset.from_dict(data) as dset: + tf_dset_wo_batch = dset.to_tf_dataset(columns=["col_1", "col_2"]) + for tf_row, row in zip(tf_dset_wo_batch, dset): + self.assertEqual(tf_row["col_1"].numpy().decode("utf-8"), row["col_1"]) + self.assertEqual(tf_row["col_2"].numpy().decode("utf-8"), row["col_2"]) + + tf_dset_w_batch = dset.to_tf_dataset(columns=["col_1", "col_2"], batch_size=2) + for tf_row, row in zip(tf_dset_w_batch.unbatch(), dset): + self.assertEqual(tf_row["col_1"].numpy().decode("utf-8"), row["col_1"]) + self.assertEqual(tf_row["col_2"].numpy().decode("utf-8"), row["col_2"]) + + self.assertEqual(tf_dset_w_batch.unbatch().element_spec, tf_dset_wo_batch.element_spec) + self.assertEqual(tf_dset_w_batch.element_spec, tf_dset_wo_batch.batch(2).element_spec) + + +def test_cast_with_sliced_list(): + old_features = Features({"foo": Sequence(Value("int64"))}) + new_features = Features({"foo": Sequence(Value("int32"))}) + dataset = Dataset.from_dict({"foo": [[i] * (i % 3) for i in range(20)]}, features=old_features) + casted_dataset = dataset.cast(new_features, batch_size=2) # small batch size to slice the ListArray + assert dataset["foo"] == casted_dataset["foo"] + assert casted_dataset.features == new_features + + +@pytest.mark.parametrize("include_nulls", [False, True]) +def test_class_encode_column_with_none(include_nulls): + dataset = Dataset.from_dict({"col_1": ["a", "b", "c", None, "d", None]}) + dataset = dataset.class_encode_column("col_1", include_nulls=include_nulls) + class_names = ["a", "b", "c", "d"] + if include_nulls: + class_names += ["None"] + assert isinstance(dataset.features["col_1"], ClassLabel) + assert set(dataset.features["col_1"].names) == set(class_names) + assert (None in dataset.unique("col_1")) == (not include_nulls) + + +@pytest.mark.parametrize("null_placement", ["first", "last"]) +def test_sort_with_none(null_placement): + dataset = Dataset.from_dict({"col_1": ["item_2", "item_3", "item_1", None, "item_4", None]}) + dataset = dataset.sort("col_1", null_placement=null_placement) + if null_placement == "first": + assert dataset["col_1"] == [None, None, "item_1", "item_2", "item_3", "item_4"] + else: + assert dataset["col_1"] == ["item_1", "item_2", "item_3", "item_4", None, None] + + +def test_update_metadata_with_features(dataset_dict): + table1 = pa.Table.from_pydict(dataset_dict) + features1 = Features.from_arrow_schema(table1.schema) + features2 = features1.copy() + features2["col_2"] = ClassLabel(num_classes=len(table1)) + assert features1 != features2 + + table2 = update_metadata_with_features(table1, features2) + metadata = json.loads(table2.schema.metadata[b"huggingface"].decode()) + assert features2 == Features.from_dict(metadata["info"]["features"]) + + with Dataset(table1) as dset1, Dataset(table2) as dset2: + assert dset1.features == features1 + assert dset2.features == features2 + + +@pytest.mark.parametrize("dataset_type", ["in_memory", "memory_mapped", "mixed"]) +@pytest.mark.parametrize("axis, expected_shape", [(0, (4, 3)), (1, (2, 6))]) +def test_concatenate_datasets(dataset_type, axis, expected_shape, dataset_dict, arrow_path): + table = { + "in_memory": InMemoryTable.from_pydict(dataset_dict), + "memory_mapped": MemoryMappedTable.from_file(arrow_path), + } + tables = [ + table[dataset_type if dataset_type != "mixed" else "memory_mapped"].slice(0, 2), # shape = (2, 3) + table[dataset_type if dataset_type != "mixed" else "in_memory"].slice(2, 4), # shape = (2, 3) + ] + if axis == 1: # don't duplicate columns + tables[1] = tables[1].rename_columns([col + "_bis" for col in tables[1].column_names]) + datasets = [Dataset(table) for table in tables] + dataset = concatenate_datasets(datasets, axis=axis) + assert dataset.shape == expected_shape + assert_arrow_metadata_are_synced_with_dataset_features(dataset) + + +def test_concatenate_datasets_new_columns(): + dataset1 = Dataset.from_dict({"col_1": ["a", "b", "c"]}) + dataset2 = Dataset.from_dict({"col_1": ["d", "e", "f"], "col_2": [True, False, True]}) + dataset = concatenate_datasets([dataset1, dataset2]) + assert dataset.data.shape == (6, 2) + assert dataset.features == Features({"col_1": Value("string"), "col_2": Value("bool")}) + assert dataset[:] == {"col_1": ["a", "b", "c", "d", "e", "f"], "col_2": [None, None, None, True, False, True]} + dataset3 = Dataset.from_dict({"col_3": ["a_1"]}) + dataset = concatenate_datasets([dataset, dataset3]) + assert dataset.data.shape == (7, 3) + assert dataset.features == Features({"col_1": Value("string"), "col_2": Value("bool"), "col_3": Value("string")}) + assert dataset[:] == { + "col_1": ["a", "b", "c", "d", "e", "f", None], + "col_2": [None, None, None, True, False, True, None], + "col_3": [None, None, None, None, None, None, "a_1"], + } + + +@pytest.mark.parametrize("axis", [0, 1]) +def test_concatenate_datasets_complex_features(axis): + n = 5 + dataset1 = Dataset.from_dict( + {"col_1": [0] * n, "col_2": list(range(n))}, + features=Features({"col_1": Value("int32"), "col_2": ClassLabel(num_classes=n)}), + ) + if axis == 1: + dataset2 = dataset1.rename_columns({col: col + "_" for col in dataset1.column_names}) + expected_features = Features({**dataset1.features, **dataset2.features}) + else: + dataset2 = dataset1 + expected_features = dataset1.features + assert concatenate_datasets([dataset1, dataset2], axis=axis).features == expected_features + + +@pytest.mark.parametrize("other_dataset_type", ["in_memory", "memory_mapped", "concatenation"]) +@pytest.mark.parametrize("axis, expected_shape", [(0, (8, 3)), (1, (4, 6))]) +def test_concatenate_datasets_with_concatenation_tables( + axis, expected_shape, other_dataset_type, dataset_dict, arrow_path +): + def _create_concatenation_table(axis): + if axis == 0: # shape: (4, 3) = (4, 1) + (4, 2) + concatenation_table = ConcatenationTable.from_blocks( + [ + [ + InMemoryTable.from_pydict({"col_1": dataset_dict["col_1"]}), + MemoryMappedTable.from_file(arrow_path).remove_column(0), + ] + ] + ) + elif axis == 1: # shape: (4, 3) = (1, 3) + (3, 3) + concatenation_table = ConcatenationTable.from_blocks( + [ + [InMemoryTable.from_pydict(dataset_dict).slice(0, 1)], + [MemoryMappedTable.from_file(arrow_path).slice(1, 4)], + ] + ) + return concatenation_table + + concatenation_table = _create_concatenation_table(axis) + assert concatenation_table.shape == (4, 3) + + if other_dataset_type == "in_memory": + other_table = InMemoryTable.from_pydict(dataset_dict) + elif other_dataset_type == "memory_mapped": + other_table = MemoryMappedTable.from_file(arrow_path) + elif other_dataset_type == "concatenation": + other_table = _create_concatenation_table(axis) + assert other_table.shape == (4, 3) + + tables = [concatenation_table, other_table] + + if axis == 1: # don't duplicate columns + tables[1] = tables[1].rename_columns([col + "_bis" for col in tables[1].column_names]) + + for tables in [tables, reversed(tables)]: + datasets = [Dataset(table) for table in tables] + dataset = concatenate_datasets(datasets, axis=axis) + assert dataset.shape == expected_shape + + +def test_concatenate_datasets_duplicate_columns(dataset): + with pytest.raises(ValueError) as excinfo: + concatenate_datasets([dataset, dataset], axis=1) + assert "duplicated" in str(excinfo.value) + + +def test_interleave_datasets(): + d1 = Dataset.from_dict({"a": [0, 1, 2]}) + d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) + d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0]) + dataset = interleave_datasets([d1, d2, d3]) + expected_length = 3 * min(len(d1), len(d2), len(d3)) + expected_values = [x["a"] for x in itertools.chain(*zip(d1, d2, d3))] + assert isinstance(dataset, Dataset) + assert len(dataset) == expected_length + assert dataset["a"] == expected_values + assert dataset._fingerprint == interleave_datasets([d1, d2, d3])._fingerprint + + +def test_interleave_datasets_probabilities(): + seed = 42 + probabilities = [0.3, 0.5, 0.2] + d1 = Dataset.from_dict({"a": [0, 1, 2]}) + d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) + d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0]) + dataset = interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed) + expected_length = 7 # hardcoded + expected_values = [10, 11, 20, 12, 0, 21, 13] # hardcoded + assert isinstance(dataset, Dataset) + assert len(dataset) == expected_length + assert dataset["a"] == expected_values + assert ( + dataset._fingerprint == interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed)._fingerprint + ) + + +def test_interleave_datasets_oversampling_strategy(): + d1 = Dataset.from_dict({"a": [0, 1, 2]}) + d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) + d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0]) + dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted") + expected_length = 3 * max(len(d1), len(d2), len(d3)) + expected_values = [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 20] # hardcoded + assert isinstance(dataset, Dataset) + assert len(dataset) == expected_length + assert dataset["a"] == expected_values + assert dataset._fingerprint == interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")._fingerprint + + +def test_interleave_datasets_probabilities_oversampling_strategy(): + seed = 42 + probabilities = [0.3, 0.5, 0.2] + d1 = Dataset.from_dict({"a": [0, 1, 2]}) + d2 = Dataset.from_dict({"a": [10, 11, 12, 13]}) + d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0]) + dataset = interleave_datasets( + [d1, d2, d3], stopping_strategy="all_exhausted", probabilities=probabilities, seed=seed + ) + expected_length = 16 # hardcoded + expected_values = [10, 11, 20, 12, 0, 21, 13, 10, 1, 11, 12, 22, 13, 20, 10, 2] # hardcoded + assert isinstance(dataset, Dataset) + assert len(dataset) == expected_length + assert dataset["a"] == expected_values + assert ( + dataset._fingerprint + == interleave_datasets( + [d1, d2, d3], stopping_strategy="all_exhausted", probabilities=probabilities, seed=seed + )._fingerprint + ) + + +@pytest.mark.parametrize("batch_size", [4, 5]) +@pytest.mark.parametrize("drop_last_batch", [False, True]) +def test_dataset_iter_batch(batch_size, drop_last_batch): + n = 25 + dset = Dataset.from_dict({"i": list(range(n))}) + all_col_values = list(range(n)) + batches = [] + for i, batch in enumerate(dset.iter(batch_size, drop_last_batch=drop_last_batch)): + assert batch == {"i": all_col_values[i * batch_size : (i + 1) * batch_size]} + batches.append(batch) + if drop_last_batch: + assert all(len(batch["i"]) == batch_size for batch in batches) + else: + assert all(len(batch["i"]) == batch_size for batch in batches[:-1]) + assert len(batches[-1]["i"]) <= batch_size + + +@pytest.mark.parametrize( + "column, expected_dtype", + [(["a", "b", "c", "d"], "string"), ([1, 2, 3, 4], "int64"), ([1.0, 2.0, 3.0, 4.0], "float64")], +) +@pytest.mark.parametrize("in_memory", [False, True]) +@pytest.mark.parametrize( + "transform", + [ + None, + ("shuffle", (42,), {}), + ("with_format", ("pandas",), {}), + ("class_encode_column", ("col_2",), {}), + ("select", (range(3),), {}), + ], +) +def test_dataset_add_column(column, expected_dtype, in_memory, transform, dataset_dict, arrow_path): + column_name = "col_4" + original_dataset = ( + Dataset(InMemoryTable.from_pydict(dataset_dict)) + if in_memory + else Dataset(MemoryMappedTable.from_file(arrow_path)) + ) + if transform is not None: + transform_name, args, kwargs = transform + original_dataset: Dataset = getattr(original_dataset, transform_name)(*args, **kwargs) + column = column[:3] if transform is not None and transform_name == "select" else column + dataset = original_dataset.add_column(column_name, column) + assert dataset.data.shape == (3, 4) if transform is not None and transform_name == "select" else (4, 4) + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + # Sort expected features as in the original dataset + expected_features = {feature: expected_features[feature] for feature in original_dataset.features} + # Add new column feature + expected_features[column_name] = expected_dtype + assert dataset.data.column_names == list(expected_features.keys()) + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + assert len(dataset.data.blocks) == 1 if in_memory else 2 # multiple InMemoryTables are consolidated as one + assert dataset.format["type"] == original_dataset.format["type"] + assert dataset._fingerprint != original_dataset._fingerprint + dataset.reset_format() + original_dataset.reset_format() + assert all(dataset[col] == original_dataset[col] for col in original_dataset.column_names) + assert set(dataset["col_4"]) == set(column) + if dataset._indices is not None: + dataset_indices = dataset._indices["indices"].to_pylist() + expected_dataset_indices = original_dataset._indices["indices"].to_pylist() + assert dataset_indices == expected_dataset_indices + assert_arrow_metadata_are_synced_with_dataset_features(dataset) + + +@pytest.mark.parametrize( + "transform", + [None, ("shuffle", (42,), {}), ("with_format", ("pandas",), {}), ("class_encode_column", ("col_2",), {})], +) +@pytest.mark.parametrize("in_memory", [False, True]) +@pytest.mark.parametrize( + "item", + [ + {"col_1": "2", "col_2": 2, "col_3": 2.0}, + {"col_1": "2", "col_2": "2", "col_3": "2"}, + {"col_1": 2, "col_2": 2, "col_3": 2}, + {"col_1": 2.0, "col_2": 2.0, "col_3": 2.0}, + ], +) +def test_dataset_add_item(item, in_memory, dataset_dict, arrow_path, transform): + dataset_to_test = ( + Dataset(InMemoryTable.from_pydict(dataset_dict)) + if in_memory + else Dataset(MemoryMappedTable.from_file(arrow_path)) + ) + if transform is not None: + transform_name, args, kwargs = transform + dataset_to_test: Dataset = getattr(dataset_to_test, transform_name)(*args, **kwargs) + dataset = dataset_to_test.add_item(item) + assert dataset.data.shape == (5, 3) + expected_features = dataset_to_test.features + assert sorted(dataset.data.column_names) == sorted(expected_features.keys()) + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature] == expected_dtype + assert len(dataset.data.blocks) == 1 if in_memory else 2 # multiple InMemoryTables are consolidated as one + assert dataset.format["type"] == dataset_to_test.format["type"] + assert dataset._fingerprint != dataset_to_test._fingerprint + dataset.reset_format() + dataset_to_test.reset_format() + assert dataset[:-1] == dataset_to_test[:] + assert {k: int(v) for k, v in dataset[-1].items()} == {k: int(v) for k, v in item.items()} + if dataset._indices is not None: + dataset_indices = dataset._indices["indices"].to_pylist() + dataset_to_test_indices = dataset_to_test._indices["indices"].to_pylist() + assert dataset_indices == dataset_to_test_indices + [len(dataset_to_test._data)] + + +def test_dataset_add_item_new_columns(): + dataset = Dataset.from_dict({"col_1": [0, 1, 2]}, features=Features({"col_1": Value("uint8")})) + dataset = dataset.add_item({"col_1": 3, "col_2": "a"}) + assert dataset.data.shape == (4, 2) + assert dataset.features == Features({"col_1": Value("uint8"), "col_2": Value("string")}) + assert dataset[:] == {"col_1": [0, 1, 2, 3], "col_2": [None, None, None, "a"]} + dataset = dataset.add_item({"col_3": True}) + assert dataset.data.shape == (5, 3) + assert dataset.features == Features({"col_1": Value("uint8"), "col_2": Value("string"), "col_3": Value("bool")}) + assert dataset[:] == { + "col_1": [0, 1, 2, 3, None], + "col_2": [None, None, None, "a", None], + "col_3": [None, None, None, None, True], + } + + +def test_dataset_add_item_introduce_feature_type(): + dataset = Dataset.from_dict({"col_1": [None, None, None]}) + dataset = dataset.add_item({"col_1": "a"}) + assert dataset.data.shape == (4, 1) + assert dataset.features == Features({"col_1": Value("string")}) + assert dataset[:] == {"col_1": [None, None, None, "a"]} + + +def test_dataset_filter_batched_indices(): + ds = Dataset.from_dict({"num": [0, 1, 2, 3]}) + ds = ds.filter(lambda num: num % 2 == 0, input_columns="num", batch_size=2) + assert all(item["num"] % 2 == 0 for item in ds) + + +@pytest.mark.parametrize("in_memory", [False, True]) +def test_dataset_from_file(in_memory, dataset, arrow_file): + filename = arrow_file + with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): + dataset_from_file = Dataset.from_file(filename, in_memory=in_memory) + assert dataset_from_file.features.type == dataset.features.type + assert dataset_from_file.features == dataset.features + assert dataset_from_file.cache_files == ([{"filename": filename}] if not in_memory else []) + + +def _check_csv_dataset(dataset, expected_features): + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_dataset_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = Dataset.from_csv(csv_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory) + _check_csv_dataset(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_dataset_from_csv_features(features, csv_path, tmp_path): + cache_dir = tmp_path / "cache" + # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" + default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = Dataset.from_csv(csv_path, features=features, cache_dir=cache_dir) + _check_csv_dataset(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_dataset_from_csv_split(split, csv_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + dataset = Dataset.from_csv(csv_path, cache_dir=cache_dir, split=split) + _check_csv_dataset(dataset, expected_features) + assert dataset.split == split if split else "train" + + +@pytest.mark.parametrize("path_type", [str, list]) +def test_dataset_from_csv_path_type(path_type, csv_path, tmp_path): + if issubclass(path_type, str): + path = csv_path + elif issubclass(path_type, list): + path = [csv_path] + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + dataset = Dataset.from_csv(path, cache_dir=cache_dir) + _check_csv_dataset(dataset, expected_features) + + +def _check_json_dataset(dataset, expected_features): + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_dataset_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = Dataset.from_json(jsonl_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory) + _check_json_dataset(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_dataset_from_json_features(features, jsonl_path, tmp_path): + cache_dir = tmp_path / "cache" + default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = Dataset.from_json(jsonl_path, features=features, cache_dir=cache_dir) + _check_json_dataset(dataset, expected_features) + + +def test_dataset_from_json_with_class_label_feature(jsonl_str_path, tmp_path): + features = Features( + {"col_1": ClassLabel(names=["s0", "s1", "s2", "s3"]), "col_2": Value("int64"), "col_3": Value("float64")} + ) + cache_dir = tmp_path / "cache" + dataset = Dataset.from_json(jsonl_str_path, features=features, cache_dir=cache_dir) + assert dataset.features["col_1"].dtype == "int64" + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_dataset_from_json_split(split, jsonl_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + dataset = Dataset.from_json(jsonl_path, cache_dir=cache_dir, split=split) + _check_json_dataset(dataset, expected_features) + assert dataset.split == split if split else "train" + + +@pytest.mark.parametrize("path_type", [str, list]) +def test_dataset_from_json_path_type(path_type, jsonl_path, tmp_path): + if issubclass(path_type, str): + path = jsonl_path + elif issubclass(path_type, list): + path = [jsonl_path] + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + dataset = Dataset.from_json(path, cache_dir=cache_dir) + _check_json_dataset(dataset, expected_features) + + +def _check_parquet_dataset(dataset, expected_features): + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_dataset_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = Dataset.from_parquet(parquet_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory) + _check_parquet_dataset(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_dataset_from_parquet_features(features, parquet_path, tmp_path): + cache_dir = tmp_path / "cache" + default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = Dataset.from_parquet(parquet_path, features=features, cache_dir=cache_dir) + _check_parquet_dataset(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_dataset_from_parquet_split(split, parquet_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + dataset = Dataset.from_parquet(parquet_path, cache_dir=cache_dir, split=split) + _check_parquet_dataset(dataset, expected_features) + assert dataset.split == split if split else "train" + + +@pytest.mark.parametrize("path_type", [str, list]) +def test_dataset_from_parquet_path_type(path_type, parquet_path, tmp_path): + if issubclass(path_type, str): + path = parquet_path + elif issubclass(path_type, list): + path = [parquet_path] + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + dataset = Dataset.from_parquet(path, cache_dir=cache_dir) + _check_parquet_dataset(dataset, expected_features) + + +def _check_text_dataset(dataset, expected_features): + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 4 + assert dataset.num_columns == 1 + assert dataset.column_names == ["text"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_dataset_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"text": "string"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = Dataset.from_text(text_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory) + _check_text_dataset(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"text": "string"}, + {"text": "int32"}, + {"text": "float32"}, + ], +) +def test_dataset_from_text_features(features, text_path, tmp_path): + cache_dir = tmp_path / "cache" + default_expected_features = {"text": "string"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = Dataset.from_text(text_path, features=features, cache_dir=cache_dir) + _check_text_dataset(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_dataset_from_text_split(split, text_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"text": "string"} + dataset = Dataset.from_text(text_path, cache_dir=cache_dir, split=split) + _check_text_dataset(dataset, expected_features) + assert dataset.split == split if split else "train" + + +@pytest.mark.parametrize("path_type", [str, list]) +def test_dataset_from_text_path_type(path_type, text_path, tmp_path): + if issubclass(path_type, str): + path = text_path + elif issubclass(path_type, list): + path = [text_path] + cache_dir = tmp_path / "cache" + expected_features = {"text": "string"} + dataset = Dataset.from_text(path, cache_dir=cache_dir) + _check_text_dataset(dataset, expected_features) + + +@pytest.fixture +def data_generator(): + def _gen(): + data = [ + {"col_1": "0", "col_2": 0, "col_3": 0.0}, + {"col_1": "1", "col_2": 1, "col_3": 1.0}, + {"col_1": "2", "col_2": 2, "col_3": 2.0}, + {"col_1": "3", "col_2": 3, "col_3": 3.0}, + ] + for item in data: + yield item + + return _gen + + +def _check_generator_dataset(dataset, expected_features): + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_dataset_from_generator_keep_in_memory(keep_in_memory, data_generator, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = Dataset.from_generator(data_generator, cache_dir=cache_dir, keep_in_memory=keep_in_memory) + _check_generator_dataset(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_dataset_from_generator_features(features, data_generator, tmp_path): + cache_dir = tmp_path / "cache" + default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = Dataset.from_generator(data_generator, features=features, cache_dir=cache_dir) + _check_generator_dataset(dataset, expected_features) + + +@require_not_windows +@require_dill_gt_0_3_2 +@require_pyspark +def test_from_spark(): + import pyspark + + spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() + data = [ + ("0", 0, 0.0), + ("1", 1, 1.0), + ("2", 2, 2.0), + ("3", 3, 3.0), + ] + df = spark.createDataFrame(data, "col_1: string, col_2: int, col_3: float") + dataset = Dataset.from_spark(df) + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + + +@require_not_windows +@require_dill_gt_0_3_2 +@require_pyspark +def test_from_spark_features(): + import PIL.Image + import pyspark + + spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() + data = [(0, np.arange(4 * 4 * 3).reshape(4, 4, 3).tolist())] + df = spark.createDataFrame(data, "idx: int, image: array>>") + features = Features({"idx": Value("int64"), "image": Image()}) + dataset = Dataset.from_spark( + df, + features=features, + ) + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 1 + assert dataset.num_columns == 2 + assert dataset.column_names == ["idx", "image"] + assert isinstance(dataset[0]["image"], PIL.Image.Image) + assert dataset.features == features + assert_arrow_metadata_are_synced_with_dataset_features(dataset) + + +@require_not_windows +@require_dill_gt_0_3_2 +@require_pyspark +def test_from_spark_different_cache(): + import pyspark + + spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() + df = spark.createDataFrame([("0", 0)], "col_1: string, col_2: int") + dataset = Dataset.from_spark(df) + assert isinstance(dataset, Dataset) + different_df = spark.createDataFrame([("1", 1)], "col_1: string, col_2: int") + different_dataset = Dataset.from_spark(different_df) + assert isinstance(different_dataset, Dataset) + assert dataset[0]["col_1"] == "0" + # Check to make sure that the second dataset wasn't read from the cache. + assert different_dataset[0]["col_1"] == "1" + + +def _check_sql_dataset(dataset, expected_features): + assert isinstance(dataset, Dataset) + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@require_sqlalchemy +@pytest.mark.parametrize("con_type", ["string", "engine"]) +def test_dataset_from_sql_con_type(con_type, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + if con_type == "string": + con = "sqlite:///" + sqlite_path + elif con_type == "engine": + import sqlalchemy + + con = sqlalchemy.create_engine("sqlite:///" + sqlite_path) + # # https://github.com/huggingface/datasets/issues/2832 needs to be fixed first for this to work + # with caplog.at_level(INFO): + # dataset = Dataset.from_sql( + # "dataset", + # con, + # cache_dir=cache_dir, + # ) + # if con_type == "string": + # assert "couldn't be hashed properly" not in caplog.text + # elif con_type == "engine": + # assert "couldn't be hashed properly" in caplog.text + dataset = Dataset.from_sql( + "dataset", + con, + cache_dir=cache_dir, + ) + _check_sql_dataset(dataset, expected_features) + + +@require_sqlalchemy +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_dataset_from_sql_features(features, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): + cache_dir = tmp_path / "cache" + default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = Dataset.from_sql("dataset", "sqlite:///" + sqlite_path, features=features, cache_dir=cache_dir) + _check_sql_dataset(dataset, expected_features) + + +@require_sqlalchemy +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_dataset_from_sql_keep_in_memory(keep_in_memory, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = Dataset.from_sql( + "dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory + ) + _check_sql_dataset(dataset, expected_features) + + +def test_dataset_to_json(dataset, tmp_path): + file_path = tmp_path / "test_path.jsonl" + bytes_written = dataset.to_json(path_or_buf=file_path) + assert file_path.is_file() + assert bytes_written == file_path.stat().st_size + df = pd.read_json(file_path, orient="records", lines=True) + assert df.shape == dataset.shape + assert list(df.columns) == list(dataset.column_names) + + +@pytest.mark.parametrize("in_memory", [False, True]) +@pytest.mark.parametrize( + "method_and_params", + [ + ("rename_column", (), {"original_column_name": "labels", "new_column_name": "label"}), + ("remove_columns", (), {"column_names": "labels"}), + ( + "cast", + (), + { + "features": Features( + { + "tokens": Sequence(Value("string")), + "labels": Sequence(Value("int16")), + "answers": Sequence( + { + "text": Value("string"), + "answer_start": Value("int32"), + } + ), + "id": Value("int32"), + } + ) + }, + ), + ("flatten", (), {}), + ], +) +def test_pickle_dataset_after_transforming_the_table(in_memory, method_and_params, arrow_file): + method, args, kwargs = method_and_params + with Dataset.from_file(arrow_file, in_memory=in_memory) as dataset, Dataset.from_file( + arrow_file, in_memory=in_memory + ) as reference_dataset: + out = getattr(dataset, method)(*args, **kwargs) + dataset = out if out is not None else dataset + pickled_dataset = pickle.dumps(dataset) + reloaded_dataset = pickle.loads(pickled_dataset) + + assert dataset._data != reference_dataset._data + assert dataset._data.table == reloaded_dataset._data.table + + +def test_dummy_dataset_serialize_fs(dataset, mockfs): + dataset_path = "mock://my_dataset" + dataset.save_to_disk(dataset_path, storage_options=mockfs.storage_options) + assert mockfs.isdir(dataset_path) + assert mockfs.glob(dataset_path + "/*") + reloaded = dataset.load_from_disk(dataset_path, storage_options=mockfs.storage_options) + assert len(reloaded) == len(dataset) + assert reloaded.features == dataset.features + assert reloaded.to_dict() == dataset.to_dict() + + +@pytest.mark.parametrize( + "uri_or_path", + [ + "relative/path", + "/absolute/path", + "s3://bucket/relative/path", + "hdfs://relative/path", + "hdfs:///absolute/path", + ], +) +def test_build_local_temp_path(uri_or_path): + extracted_path = strip_protocol(uri_or_path) + local_temp_path = Dataset._build_local_temp_path(extracted_path).as_posix() + extracted_path_without_anchor = Path(extracted_path).relative_to(Path(extracted_path).anchor).as_posix() + path_relative_to_tmp_dir = local_temp_path.split("tmp")[-1].split("/", 1)[1] + + assert ( + "tmp" in local_temp_path + and "hdfs" not in path_relative_to_tmp_dir + and "s3" not in path_relative_to_tmp_dir + and not local_temp_path.startswith(extracted_path_without_anchor) + and local_temp_path.endswith(extracted_path_without_anchor) + ), f"Local temp path: {local_temp_path}" + + +class TaskTemplatesTest(TestCase): + def test_task_text_classification(self): + labels = sorted(["pos", "neg"]) + features_before_cast = Features( + { + "input_text": Value("string"), + "input_labels": ClassLabel(names=labels), + } + ) + # Labels are cast to tuple during `TextClassification.__post_init_`, so we do the same here + features_after_cast = Features( + { + "text": Value("string"), + "labels": ClassLabel(names=labels), + } + ) + # Label names are added in `DatasetInfo.__post_init__` so not needed here + task_without_labels = TextClassification(text_column="input_text", label_column="input_labels") + info1 = DatasetInfo( + features=features_before_cast, + task_templates=task_without_labels, + ) + # Label names are required when passing a TextClassification template directly to `Dataset.prepare_for_task` + # However they also can be used to define `DatasetInfo` so we include a test for this too + task_with_labels = TextClassification(text_column="input_text", label_column="input_labels") + info2 = DatasetInfo( + features=features_before_cast, + task_templates=task_with_labels, + ) + data = {"input_text": ["i love transformers!"], "input_labels": [1]} + # Test we can load from task name when label names not included in template (default behaviour) + with Dataset.from_dict(data, info=info1) as dset: + self.assertSetEqual({"input_text", "input_labels"}, set(dset.column_names)) + self.assertDictEqual(features_before_cast, dset.features) + with dset.prepare_for_task(task="text-classification") as dset: + self.assertSetEqual({"labels", "text"}, set(dset.column_names)) + self.assertDictEqual(features_after_cast, dset.features) + # Test we can load from task name when label names included in template + with Dataset.from_dict(data, info=info2) as dset: + self.assertSetEqual({"input_text", "input_labels"}, set(dset.column_names)) + self.assertDictEqual(features_before_cast, dset.features) + with dset.prepare_for_task(task="text-classification") as dset: + self.assertSetEqual({"labels", "text"}, set(dset.column_names)) + self.assertDictEqual(features_after_cast, dset.features) + # Test we can load from TextClassification template + info1.task_templates = None + with Dataset.from_dict(data, info=info1) as dset: + with dset.prepare_for_task(task=task_with_labels) as dset: + self.assertSetEqual({"labels", "text"}, set(dset.column_names)) + self.assertDictEqual(features_after_cast, dset.features) + + def test_task_question_answering(self): + features_before_cast = Features( + { + "input_context": Value("string"), + "input_question": Value("string"), + "input_answers": Sequence( + { + "text": Value("string"), + "answer_start": Value("int32"), + } + ), + } + ) + features_after_cast = Features( + { + "context": Value("string"), + "question": Value("string"), + "answers": Sequence( + { + "text": Value("string"), + "answer_start": Value("int32"), + } + ), + } + ) + task = QuestionAnsweringExtractive( + context_column="input_context", question_column="input_question", answers_column="input_answers" + ) + info = DatasetInfo(features=features_before_cast, task_templates=task) + data = { + "input_context": ["huggingface is going to the moon!"], + "input_question": ["where is huggingface going?"], + "input_answers": [{"text": ["to the moon!"], "answer_start": [2]}], + } + # Test we can load from task name + with Dataset.from_dict(data, info=info) as dset: + self.assertSetEqual( + {"input_context", "input_question", "input_answers.text", "input_answers.answer_start"}, + set(dset.flatten().column_names), + ) + self.assertDictEqual(features_before_cast, dset.features) + with dset.prepare_for_task(task="question-answering-extractive") as dset: + self.assertSetEqual( + {"context", "question", "answers.text", "answers.answer_start"}, + set(dset.flatten().column_names), + ) + self.assertDictEqual(features_after_cast, dset.features) + # Test we can load from QuestionAnsweringExtractive template + info.task_templates = None + with Dataset.from_dict(data, info=info) as dset: + with dset.prepare_for_task(task=task) as dset: + self.assertSetEqual( + {"context", "question", "answers.text", "answers.answer_start"}, + set(dset.flatten().column_names), + ) + self.assertDictEqual(features_after_cast, dset.features) + + def test_task_summarization(self): + # Include a dummy extra column `dummy` to test we drop it correctly + features_before_cast = Features( + {"input_text": Value("string"), "input_summary": Value("string"), "dummy": Value("string")} + ) + features_after_cast = Features({"text": Value("string"), "summary": Value("string")}) + task = Summarization(text_column="input_text", summary_column="input_summary") + info = DatasetInfo(features=features_before_cast, task_templates=task) + data = { + "input_text": ["jack and jill took a taxi to attend a super duper party in the city."], + "input_summary": ["jack and jill attend party"], + "dummy": ["123456"], + } + # Test we can load from task name + with Dataset.from_dict(data, info=info) as dset: + with dset.prepare_for_task(task="summarization") as dset: + self.assertSetEqual( + {"text", "summary"}, + set(dset.column_names), + ) + self.assertDictEqual(features_after_cast, dset.features) + # Test we can load from Summarization template + info.task_templates = None + with Dataset.from_dict(data, info=info) as dset: + with dset.prepare_for_task(task=task) as dset: + self.assertSetEqual( + {"text", "summary"}, + set(dset.column_names), + ) + self.assertDictEqual(features_after_cast, dset.features) + + def test_task_automatic_speech_recognition(self): + # Include a dummy extra column `dummy` to test we drop it correctly + features_before_cast = Features( + { + "input_audio": Audio(sampling_rate=16_000), + "input_transcription": Value("string"), + "dummy": Value("string"), + } + ) + features_after_cast = Features({"audio": Audio(sampling_rate=16_000), "transcription": Value("string")}) + task = AutomaticSpeechRecognition(audio_column="input_audio", transcription_column="input_transcription") + info = DatasetInfo(features=features_before_cast, task_templates=task) + data = { + "input_audio": [{"bytes": None, "path": "path/to/some/audio/file.wav"}], + "input_transcription": ["hello, my name is bob!"], + "dummy": ["123456"], + } + # Test we can load from task name + with Dataset.from_dict(data, info=info) as dset: + with dset.prepare_for_task(task="automatic-speech-recognition") as dset: + self.assertSetEqual( + {"audio", "transcription"}, + set(dset.column_names), + ) + self.assertDictEqual(features_after_cast, dset.features) + # Test we can load from Summarization template + info.task_templates = None + with Dataset.from_dict(data, info=info) as dset: + with dset.prepare_for_task(task=task) as dset: + self.assertSetEqual( + {"audio", "transcription"}, + set(dset.column_names), + ) + self.assertDictEqual(features_after_cast, dset.features) + + def test_task_with_no_template(self): + data = {"input_text": ["i love transformers!"], "input_labels": [1]} + with Dataset.from_dict(data) as dset: + with self.assertRaises(ValueError): + dset.prepare_for_task("text-classification") + + def test_task_with_incompatible_templates(self): + labels = sorted(["pos", "neg"]) + features = Features( + { + "input_text": Value("string"), + "input_labels": ClassLabel(names=labels), + } + ) + task = TextClassification(text_column="input_text", label_column="input_labels") + info = DatasetInfo( + features=features, + task_templates=task, + ) + data = {"input_text": ["i love transformers!"], "input_labels": [1]} + with Dataset.from_dict(data, info=info) as dset: + # Invalid task name + self.assertRaises(ValueError, dset.prepare_for_task, "this-task-does-not-exist") + # Invalid task type + self.assertRaises(ValueError, dset.prepare_for_task, 1) + + def test_task_with_multiple_compatible_task_templates(self): + features = Features( + { + "text1": Value("string"), + "text2": Value("string"), + } + ) + task1 = LanguageModeling(text_column="text1") + task2 = LanguageModeling(text_column="text2") + info = DatasetInfo( + features=features, + task_templates=[task1, task2], + ) + data = {"text1": ["i love transformers!"], "text2": ["i love datasets!"]} + with Dataset.from_dict(data, info=info) as dset: + self.assertRaises(ValueError, dset.prepare_for_task, "language-modeling", id=3) + with dset.prepare_for_task("language-modeling") as dset1: + self.assertEqual(dset1[0]["text"], "i love transformers!") + with dset.prepare_for_task("language-modeling", id=1) as dset2: + self.assertEqual(dset2[0]["text"], "i love datasets!") + + def test_task_templates_empty_after_preparation(self): + features = Features( + { + "input_text": Value("string"), + "input_labels": ClassLabel(names=["pos", "neg"]), + } + ) + task = TextClassification(text_column="input_text", label_column="input_labels") + info = DatasetInfo( + features=features, + task_templates=task, + ) + data = {"input_text": ["i love transformers!"], "input_labels": [1]} + with Dataset.from_dict(data, info=info) as dset: + with dset.prepare_for_task(task="text-classification") as dset: + self.assertIsNone(dset.info.task_templates) + + def test_align_labels_with_mapping_classification(self): + features = Features( + { + "input_text": Value("string"), + "input_labels": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]), + } + ) + data = {"input_text": ["a", "a", "b", "b", "c", "c"], "input_labels": [0, 0, 1, 1, 2, 2]} + label2id = {"CONTRADICTION": 0, "ENTAILMENT": 2, "NEUTRAL": 1} + id2label = {v: k for k, v in label2id.items()} + expected_labels = [2, 2, 1, 1, 0, 0] + expected_label_names = [id2label[idx] for idx in expected_labels] + with Dataset.from_dict(data, features=features) as dset: + with dset.align_labels_with_mapping(label2id, "input_labels") as dset: + self.assertListEqual(expected_labels, dset["input_labels"]) + aligned_label_names = [dset.features["input_labels"].int2str(idx) for idx in dset["input_labels"]] + self.assertListEqual(expected_label_names, aligned_label_names) + + def test_align_labels_with_mapping_ner(self): + features = Features( + { + "input_text": Value("string"), + "input_labels": Sequence( + ClassLabel( + names=[ + "b-per", + "i-per", + "o", + ] + ) + ), + } + ) + data = {"input_text": [["Optimus", "Prime", "is", "a", "Transformer"]], "input_labels": [[0, 1, 2, 2, 2]]} + label2id = {"B-PER": 2, "I-PER": 1, "O": 0} + id2label = {v: k for k, v in label2id.items()} + expected_labels = [[2, 1, 0, 0, 0]] + expected_label_names = [[id2label[idx] for idx in seq] for seq in expected_labels] + with Dataset.from_dict(data, features=features) as dset: + with dset.align_labels_with_mapping(label2id, "input_labels") as dset: + self.assertListEqual(expected_labels, dset["input_labels"]) + aligned_label_names = [ + dset.features["input_labels"].feature.int2str(idx) for idx in dset["input_labels"] + ] + self.assertListEqual(expected_label_names, aligned_label_names) + + def test_concatenate_with_no_task_templates(self): + info = DatasetInfo(task_templates=None) + data = {"text": ["i love transformers!"], "labels": [1]} + with Dataset.from_dict(data, info=info) as dset1, Dataset.from_dict( + data, info=info + ) as dset2, Dataset.from_dict(data, info=info) as dset3: + with concatenate_datasets([dset1, dset2, dset3]) as dset_concat: + self.assertEqual(dset_concat.info.task_templates, None) + + def test_concatenate_with_equal_task_templates(self): + labels = ["neg", "pos"] + task_template = TextClassification(text_column="text", label_column="labels") + info = DatasetInfo( + features=Features({"text": Value("string"), "labels": ClassLabel(names=labels)}), + # Label names are added in `DatasetInfo.__post_init__` so not included here + task_templates=TextClassification(text_column="text", label_column="labels"), + ) + data = {"text": ["i love transformers!"], "labels": [1]} + with Dataset.from_dict(data, info=info) as dset1, Dataset.from_dict( + data, info=info + ) as dset2, Dataset.from_dict(data, info=info) as dset3: + with concatenate_datasets([dset1, dset2, dset3]) as dset_concat: + self.assertListEqual(dset_concat.info.task_templates, [task_template]) + + def test_concatenate_with_mixed_task_templates_in_common(self): + tc_template = TextClassification(text_column="text", label_column="labels") + qa_template = QuestionAnsweringExtractive( + question_column="question", context_column="context", answers_column="answers" + ) + info1 = DatasetInfo( + task_templates=[qa_template], + features=Features( + { + "text": Value("string"), + "labels": ClassLabel(names=["pos", "neg"]), + "context": Value("string"), + "question": Value("string"), + "answers": Sequence( + { + "text": Value("string"), + "answer_start": Value("int32"), + } + ), + } + ), + ) + info2 = DatasetInfo( + task_templates=[qa_template, tc_template], + features=Features( + { + "text": Value("string"), + "labels": ClassLabel(names=["pos", "neg"]), + "context": Value("string"), + "question": Value("string"), + "answers": Sequence( + { + "text": Value("string"), + "answer_start": Value("int32"), + } + ), + } + ), + ) + data = { + "text": ["i love transformers!"], + "labels": [1], + "context": ["huggingface is going to the moon!"], + "question": ["where is huggingface going?"], + "answers": [{"text": ["to the moon!"], "answer_start": [2]}], + } + with Dataset.from_dict(data, info=info1) as dset1, Dataset.from_dict( + data, info=info2 + ) as dset2, Dataset.from_dict(data, info=info2) as dset3: + with concatenate_datasets([dset1, dset2, dset3]) as dset_concat: + self.assertListEqual(dset_concat.info.task_templates, [qa_template]) + + def test_concatenate_with_no_mixed_task_templates_in_common(self): + tc_template1 = TextClassification(text_column="text", label_column="labels") + tc_template2 = TextClassification(text_column="text", label_column="sentiment") + qa_template = QuestionAnsweringExtractive( + question_column="question", context_column="context", answers_column="answers" + ) + info1 = DatasetInfo( + features=Features( + { + "text": Value("string"), + "labels": ClassLabel(names=["pos", "neg"]), + "sentiment": ClassLabel(names=["pos", "neg", "neutral"]), + "context": Value("string"), + "question": Value("string"), + "answers": Sequence( + { + "text": Value("string"), + "answer_start": Value("int32"), + } + ), + } + ), + task_templates=[tc_template1], + ) + info2 = DatasetInfo( + features=Features( + { + "text": Value("string"), + "labels": ClassLabel(names=["pos", "neg"]), + "sentiment": ClassLabel(names=["pos", "neg", "neutral"]), + "context": Value("string"), + "question": Value("string"), + "answers": Sequence( + { + "text": Value("string"), + "answer_start": Value("int32"), + } + ), + } + ), + task_templates=[tc_template2], + ) + info3 = DatasetInfo( + features=Features( + { + "text": Value("string"), + "labels": ClassLabel(names=["pos", "neg"]), + "sentiment": ClassLabel(names=["pos", "neg", "neutral"]), + "context": Value("string"), + "question": Value("string"), + "answers": Sequence( + { + "text": Value("string"), + "answer_start": Value("int32"), + } + ), + } + ), + task_templates=[qa_template], + ) + data = { + "text": ["i love transformers!"], + "labels": [1], + "sentiment": [0], + "context": ["huggingface is going to the moon!"], + "question": ["where is huggingface going?"], + "answers": [{"text": ["to the moon!"], "answer_start": [2]}], + } + with Dataset.from_dict(data, info=info1) as dset1, Dataset.from_dict( + data, info=info2 + ) as dset2, Dataset.from_dict(data, info=info3) as dset3: + with concatenate_datasets([dset1, dset2, dset3]) as dset_concat: + self.assertEqual(dset_concat.info.task_templates, None) + + def test_task_text_classification_when_columns_removed(self): + labels = sorted(["pos", "neg"]) + features_before_map = Features( + { + "input_text": Value("string"), + "input_labels": ClassLabel(names=labels), + } + ) + features_after_map = Features({"new_column": Value("int64")}) + # Label names are added in `DatasetInfo.__post_init__` so not needed here + task = TextClassification(text_column="input_text", label_column="input_labels") + info = DatasetInfo( + features=features_before_map, + task_templates=task, + ) + data = {"input_text": ["i love transformers!"], "input_labels": [1]} + with Dataset.from_dict(data, info=info) as dset: + with dset.map(lambda x: {"new_column": 0}, remove_columns=dset.column_names) as dset: + self.assertDictEqual(dset.features, features_after_map) + + +class StratifiedTest(TestCase): + def test_errors_train_test_split_stratify(self): + ys = [ + np.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2]), + np.array([0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), + np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), + np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]), + np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]), + ] + for i in range(len(ys)): + features = Features({"text": Value("int64"), "label": ClassLabel(len(np.unique(ys[i])))}) + data = {"text": np.ones(len(ys[i])), "label": ys[i]} + d1 = Dataset.from_dict(data, features=features) + + # For checking stratify_by_column exist as key in self.features.keys() + if i == 0: + self.assertRaises(ValueError, d1.train_test_split, 0.33, stratify_by_column="labl") + + # For checking minimum class count error + elif i == 1: + self.assertRaises(ValueError, d1.train_test_split, 0.33, stratify_by_column="label") + + # For check typeof label as ClassLabel type + elif i == 2: + d1 = Dataset.from_dict(data) + self.assertRaises(ValueError, d1.train_test_split, 0.33, stratify_by_column="label") + + # For checking test_size should be greater than or equal to number of classes + elif i == 3: + self.assertRaises(ValueError, d1.train_test_split, 0.30, stratify_by_column="label") + + # For checking train_size should be greater than or equal to number of classes + elif i == 4: + self.assertRaises(ValueError, d1.train_test_split, 0.60, stratify_by_column="label") + + def test_train_test_split_startify(self): + ys = [ + np.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2]), + np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), + np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), + np.array([0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3]), + np.array([0] * 800 + [1] * 50), + ] + for y in ys: + features = Features({"text": Value("int64"), "label": ClassLabel(len(np.unique(y)))}) + data = {"text": np.ones(len(y)), "label": y} + d1 = Dataset.from_dict(data, features=features) + d1 = d1.train_test_split(test_size=0.33, stratify_by_column="label") + y = np.asanyarray(y) # To make it indexable for y[train] + test_size = np.ceil(0.33 * len(y)) + train_size = len(y) - test_size + npt.assert_array_equal(np.unique(d1["train"]["label"]), np.unique(d1["test"]["label"])) + + # checking classes proportion + p_train = np.bincount(np.unique(d1["train"]["label"], return_inverse=True)[1]) / float( + len(d1["train"]["label"]) + ) + p_test = np.bincount(np.unique(d1["test"]["label"], return_inverse=True)[1]) / float( + len(d1["test"]["label"]) + ) + npt.assert_array_almost_equal(p_train, p_test, 1) + assert len(d1["train"]["text"]) + len(d1["test"]["text"]) == y.size + assert len(d1["train"]["text"]) == train_size + assert len(d1["test"]["text"]) == test_size + + +def test_dataset_estimate_nbytes(): + ds = Dataset.from_dict({"a": ["0" * 100] * 100}) + assert 0.9 * ds._estimate_nbytes() < 100 * 100, "must be smaller than full dataset size" + + ds = Dataset.from_dict({"a": ["0" * 100] * 100}).select([0]) + assert 0.9 * ds._estimate_nbytes() < 100 * 100, "must be smaller than one chunk" + + ds = Dataset.from_dict({"a": ["0" * 100] * 100}) + ds = concatenate_datasets([ds] * 100) + assert 0.9 * ds._estimate_nbytes() < 100 * 100 * 100, "must be smaller than full dataset size" + assert 1.1 * ds._estimate_nbytes() > 100 * 100 * 100, "must be bigger than full dataset size" + + ds = Dataset.from_dict({"a": ["0" * 100] * 100}) + ds = concatenate_datasets([ds] * 100).select([0]) + assert 0.9 * ds._estimate_nbytes() < 100 * 100, "must be smaller than one chunk" + + +def test_dataset_to_iterable_dataset(dataset: Dataset): + iterable_dataset = dataset.to_iterable_dataset() + assert isinstance(iterable_dataset, IterableDataset) + assert list(iterable_dataset) == list(dataset) + assert iterable_dataset.features == dataset.features + iterable_dataset = dataset.to_iterable_dataset(num_shards=3) + assert isinstance(iterable_dataset, IterableDataset) + assert list(iterable_dataset) == list(dataset) + assert iterable_dataset.features == dataset.features + assert iterable_dataset.n_shards == 3 + with pytest.raises(ValueError): + dataset.to_iterable_dataset(num_shards=len(dataset) + 1) + with pytest.raises(NotImplementedError): + dataset.with_format("torch").to_iterable_dataset() + + +@require_pil +def test_dataset_format_with_unformatted_image(): + import PIL + + ds = Dataset.from_dict( + {"a": [np.arange(4 * 4 * 3).reshape(4, 4, 3)] * 10, "b": [[0, 1]] * 10}, + Features({"a": Image(), "b": Sequence(Value("int64"))}), + ) + ds.set_format("np", columns=["b"], output_all_columns=True) + assert isinstance(ds[0]["a"], PIL.Image.Image) + assert isinstance(ds[0]["b"], np.ndarray) + + +@pytest.mark.parametrize("batch_size", [1, 4]) +@require_torch +def test_dataset_with_torch_dataloader(dataset, batch_size): + from torch.utils.data import DataLoader + + from datasets import config + + dataloader = DataLoader(dataset, batch_size=batch_size) + with patch.object(dataset, "_getitem", wraps=dataset._getitem) as mock_getitem: + out = list(dataloader) + getitem_call_count = mock_getitem.call_count + assert len(out) == len(dataset) // batch_size + int(len(dataset) % batch_size > 0) + # calling dataset[list_of_indices] is much more efficient than [dataset[idx] for idx in list of indices] + if config.TORCH_VERSION >= version.parse("1.13.0"): + assert getitem_call_count == len(dataset) // batch_size + int(len(dataset) % batch_size > 0) + + +@pytest.mark.parametrize("return_lazy_dict", [True, False, "mix"]) +def test_map_cases(return_lazy_dict): + def f(x): + """May return a mix of LazyDict and regular Dict""" + if x["a"] < 2: + x["a"] = -1 + return dict(x) if return_lazy_dict is False else x + else: + return x if return_lazy_dict is True else {} + + ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) + ds = ds.map(f) + outputs = ds[:] + assert outputs == {"a": [-1, -1, 2, 3]} + + def f(x): + """May return a mix of LazyDict and regular Dict, but sometimes with None values""" + if x["a"] < 2: + x["a"] = None + return dict(x) if return_lazy_dict is False else x + else: + return x if return_lazy_dict is True else {} + + ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) + ds = ds.map(f) + outputs = ds[:] + assert outputs == {"a": [None, None, 2, 3]} + + def f(x): + """Return a LazyDict, but we remove a lazy column and add a new one""" + if x["a"] < 2: + x["b"] = -1 + return x + else: + x["b"] = x["a"] + return x + + ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) + ds = ds.map(f, remove_columns=["a"]) + outputs = ds[:] + assert outputs == {"b": [-1, -1, 2, 3]} + + # The formatted dataset version removes the lazy column from a different dictionary, hence it should be preserved in the output + ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) + ds = ds.with_format("numpy") + ds = ds.map(f, remove_columns=["a"]) + ds = ds.with_format(None) + outputs = ds[:] + assert outputs == {"a": [0, 1, 2, 3], "b": [-1, -1, 2, 3]} + + def f(x): + """May return a mix of LazyDict and regular Dict, but we replace a lazy column""" + if x["a"] < 2: + x["a"] = -1 + return dict(x) if return_lazy_dict is False else x + else: + x["a"] = x["a"] + return x if return_lazy_dict is True else {"a": x["a"]} + + ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) + ds = ds.map(f, remove_columns=["a"]) + outputs = ds[:] + assert outputs == ({"a": [-1, -1, 2, 3]} if return_lazy_dict is False else {}) + + def f(x): + """May return a mix of LazyDict and regular Dict, but we modify a nested lazy column in-place""" + if x["a"]["b"] < 2: + x["a"]["c"] = -1 + return dict(x) if return_lazy_dict is False else x + else: + x["a"]["c"] = x["a"]["b"] + return x if return_lazy_dict is True else {} + + ds = Dataset.from_dict({"a": [{"b": 0}, {"b": 1}, {"b": 2}, {"b": 3}]}) + ds = ds.map(f) + outputs = ds[:] + assert outputs == {"a": [{"b": 0, "c": -1}, {"b": 1, "c": -1}, {"b": 2, "c": 2}, {"b": 3, "c": 3}]} + + def f(x): + """May return a mix of LazyDict and regular Dict, but using an extension type""" + if x["a"][0][0] < 2: + x["a"] = [[-1]] + return dict(x) if return_lazy_dict is False else x + else: + return x if return_lazy_dict is True else {} + + features = Features({"a": Array2D(shape=(1, 1), dtype="int32")}) + ds = Dataset.from_dict({"a": [[[i]] for i in [0, 1, 2, 3]]}, features=features) + ds = ds.map(f) + outputs = ds[:] + assert outputs == {"a": [[[i]] for i in [-1, -1, 2, 3]]} + + def f(x): + """May return a mix of LazyDict and regular Dict, but using a nested extension type""" + if x["a"]["nested"][0][0] < 2: + x["a"] = {"nested": [[-1]]} + return dict(x) if return_lazy_dict is False else x + else: + return x if return_lazy_dict is True else {} + + features = Features({"a": {"nested": Array2D(shape=(1, 1), dtype="int64")}}) + ds = Dataset.from_dict({"a": [{"nested": [[i]]} for i in [0, 1, 2, 3]]}, features=features) + ds = ds.map(f) + outputs = ds[:] + assert outputs == {"a": [{"nested": [[i]]} for i in [-1, -1, 2, 3]]} + + +def test_dataset_getitem_raises(): + ds = Dataset.from_dict({"a": [0, 1, 2, 3]}) + with pytest.raises(TypeError): + ds[False] + with pytest.raises(TypeError): + ds._getitem(True) diff --git a/testbed/huggingface__datasets/tests/test_arrow_reader.py b/testbed/huggingface__datasets/tests/test_arrow_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..f952478c6a2060cebdabe1a3cbe41f496f0958d2 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_arrow_reader.py @@ -0,0 +1,194 @@ +import os +import tempfile +from pathlib import Path +from unittest import TestCase + +import pyarrow as pa +import pytest + +from datasets.arrow_dataset import Dataset +from datasets.arrow_reader import ArrowReader, BaseReader, FileInstructions, ReadInstruction, make_file_instructions +from datasets.info import DatasetInfo +from datasets.splits import NamedSplit, Split, SplitDict, SplitInfo + +from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases + + +class ReaderTest(BaseReader): + """ + Build a Dataset object out of Instruction instance(s). + This reader is made for testing. It mocks file reads. + """ + + def _get_table_from_filename(self, filename_skip_take, in_memory=False): + """Returns a Dataset instance from given (filename, skip, take).""" + filename, skip, take = ( + filename_skip_take["filename"], + filename_skip_take["skip"] if "skip" in filename_skip_take else None, + filename_skip_take["take"] if "take" in filename_skip_take else None, + ) + open(os.path.join(filename), "wb").close() + pa_table = pa.Table.from_pydict({"filename": [Path(filename).name] * 100}) + if take == -1: + take = len(pa_table) - skip + if skip is not None and take is not None: + pa_table = pa_table.slice(skip, take) + return pa_table + + +class BaseReaderTest(TestCase): + def test_read(self): + name = "my_name" + train_info = SplitInfo(name="train", num_examples=100) + test_info = SplitInfo(name="test", num_examples=100) + split_infos = [train_info, test_info] + split_dict = SplitDict() + split_dict.add(train_info) + split_dict.add(test_info) + info = DatasetInfo(splits=split_dict) + + with tempfile.TemporaryDirectory() as tmp_dir: + reader = ReaderTest(tmp_dir, info) + + instructions = "test[:33%]" + dset = Dataset(**reader.read(name, instructions, split_infos)) + self.assertEqual(dset["filename"][0], f"{name}-test") + self.assertEqual(dset.num_rows, 33) + self.assertEqual(dset.num_columns, 1) + + instructions1 = ["train", "test[:33%]"] + instructions2 = [Split.TRAIN, ReadInstruction.from_spec("test[:33%]")] + for instructions in [instructions1, instructions2]: + datasets_kwargs = [reader.read(name, instr, split_infos) for instr in instructions] + train_dset, test_dset = (Dataset(**dataset_kwargs) for dataset_kwargs in datasets_kwargs) + self.assertEqual(train_dset["filename"][0], f"{name}-train") + self.assertEqual(train_dset.num_rows, 100) + self.assertEqual(train_dset.num_columns, 1) + self.assertIsInstance(train_dset.split, NamedSplit) + self.assertEqual(str(train_dset.split), "train") + self.assertEqual(test_dset["filename"][0], f"{name}-test") + self.assertEqual(test_dset.num_rows, 33) + self.assertEqual(test_dset.num_columns, 1) + self.assertIsInstance(test_dset.split, NamedSplit) + self.assertEqual(str(test_dset.split), "test[:33%]") + del train_dset, test_dset + + def test_read_sharded(self): + name = "my_name" + train_info = SplitInfo(name="train", num_examples=1000, shard_lengths=[100] * 10) + split_infos = [train_info] + split_dict = SplitDict() + split_dict.add(train_info) + info = DatasetInfo(splits=split_dict) + + with tempfile.TemporaryDirectory() as tmp_dir: + reader = ReaderTest(tmp_dir, info) + + instructions = "train[:33%]" + dset = Dataset(**reader.read(name, instructions, split_infos)) + self.assertEqual(dset["filename"][0], f"{name}-train-00000-of-00010") + self.assertEqual(dset["filename"][-1], f"{name}-train-00003-of-00010") + self.assertEqual(dset.num_rows, 330) + self.assertEqual(dset.num_columns, 1) + + def test_read_files(self): + train_info = SplitInfo(name="train", num_examples=100) + test_info = SplitInfo(name="test", num_examples=100) + split_dict = SplitDict() + split_dict.add(train_info) + split_dict.add(test_info) + info = DatasetInfo(splits=split_dict) + + with tempfile.TemporaryDirectory() as tmp_dir: + reader = ReaderTest(tmp_dir, info) + + files = [ + {"filename": os.path.join(tmp_dir, "train")}, + {"filename": os.path.join(tmp_dir, "test"), "skip": 10, "take": 10}, + ] + dset = Dataset(**reader.read_files(files, original_instructions="train+test[10:20]")) + self.assertEqual(dset.num_rows, 110) + self.assertEqual(dset.num_columns, 1) + del dset + + +@pytest.mark.parametrize("in_memory", [False, True]) +def test_read_table(in_memory, dataset, arrow_file): + filename = arrow_file + with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): + table = ArrowReader.read_table(filename, in_memory=in_memory) + assert table.shape == dataset.data.shape + assert set(table.column_names) == set(dataset.data.column_names) + assert dict(table.to_pydict()) == dict(dataset.data.to_pydict()) # to_pydict returns OrderedDict + + +@pytest.mark.parametrize("in_memory", [False, True]) +def test_read_files(in_memory, dataset, arrow_file): + filename = arrow_file + reader = ArrowReader("", None) + with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): + dataset_kwargs = reader.read_files([{"filename": filename}], in_memory=in_memory) + assert dataset_kwargs.keys() == {"arrow_table", "info", "split"} + table = dataset_kwargs["arrow_table"] + assert table.shape == dataset.data.shape + assert set(table.column_names) == set(dataset.data.column_names) + assert dict(table.to_pydict()) == dict(dataset.data.to_pydict()) # to_pydict returns OrderedDict + + +def test_read_instruction_spec(): + assert ReadInstruction("train", to=10, unit="abs").to_spec() == "train[:10]" + assert ReadInstruction("train", from_=-80, to=10, unit="%").to_spec() == "train[-80%:10%]" + + spec_train_test = "train+test" + assert ReadInstruction.from_spec(spec_train_test).to_spec() == spec_train_test + + spec_train_abs = "train[2:10]" + assert ReadInstruction.from_spec(spec_train_abs).to_spec() == spec_train_abs + + spec_train_pct = "train[15%:-20%]" + assert ReadInstruction.from_spec(spec_train_pct).to_spec() == spec_train_pct + + spec_train_pct_rounding = "train[:10%](closest)" + assert ReadInstruction.from_spec(spec_train_pct_rounding).to_spec() == "train[:10%]" + + spec_train_pct_rounding = "train[:10%](pct1_dropremainder)" + assert ReadInstruction.from_spec(spec_train_pct_rounding).to_spec() == spec_train_pct_rounding + + spec_train_test_pct_rounding = "train[:10%](pct1_dropremainder)+test[-10%:](pct1_dropremainder)" + assert ReadInstruction.from_spec(spec_train_test_pct_rounding).to_spec() == spec_train_test_pct_rounding + + +def test_make_file_instructions(): + name = "dummy" + split_infos = [SplitInfo(name="train", num_examples=100)] + instruction = "train[:33%]" + filetype_suffix = "arrow" + prefix_path = "prefix" + + file_instructions = make_file_instructions(name, split_infos, instruction, filetype_suffix, prefix_path) + assert isinstance(file_instructions, FileInstructions) + assert file_instructions.num_examples == 33 + assert file_instructions.file_instructions == [ + {"filename": os.path.join(prefix_path, f"{name}-train.arrow"), "skip": 0, "take": 33} + ] + + split_infos = [SplitInfo(name="train", num_examples=100, shard_lengths=[10] * 10)] + file_instructions = make_file_instructions(name, split_infos, instruction, filetype_suffix, prefix_path) + assert isinstance(file_instructions, FileInstructions) + assert file_instructions.num_examples == 33 + assert file_instructions.file_instructions == [ + {"filename": os.path.join(prefix_path, f"{name}-train-00000-of-00010.arrow"), "skip": 0, "take": -1}, + {"filename": os.path.join(prefix_path, f"{name}-train-00001-of-00010.arrow"), "skip": 0, "take": -1}, + {"filename": os.path.join(prefix_path, f"{name}-train-00002-of-00010.arrow"), "skip": 0, "take": -1}, + {"filename": os.path.join(prefix_path, f"{name}-train-00003-of-00010.arrow"), "skip": 0, "take": 3}, + ] + + +@pytest.mark.parametrize("name, expected_exception", [(None, TypeError), ("", ValueError)]) +def test_make_file_instructions_raises(name, expected_exception): + split_infos = [SplitInfo(name="train", num_examples=100)] + instruction = "train" + filetype_suffix = "arrow" + prefix_path = "prefix_path" + with pytest.raises(expected_exception): + _ = make_file_instructions(name, split_infos, instruction, filetype_suffix, prefix_path) diff --git a/testbed/huggingface__datasets/tests/test_arrow_writer.py b/testbed/huggingface__datasets/tests/test_arrow_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..b2483509b0ce1887a167a177102c4f21c231377a --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_arrow_writer.py @@ -0,0 +1,361 @@ +import copy +import os +import tempfile +from unittest import TestCase +from unittest.mock import patch + +import numpy as np +import pyarrow as pa +import pyarrow.parquet as pq +import pytest + +from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence +from datasets.features import Array2D, ClassLabel, Features, Image, Value +from datasets.features.features import Array2DExtensionType, cast_to_python_objects +from datasets.keyhash import DuplicatedKeysError, InvalidKeyError + +from .utils import require_pil + + +class TypedSequenceTest(TestCase): + def test_no_type(self): + arr = pa.array(TypedSequence([1, 2, 3])) + self.assertEqual(arr.type, pa.int64()) + + def test_array_type_forbidden(self): + with self.assertRaises(ValueError): + _ = pa.array(TypedSequence([1, 2, 3]), type=pa.int64()) + + def test_try_type_and_type_forbidden(self): + with self.assertRaises(ValueError): + _ = pa.array(TypedSequence([1, 2, 3], try_type=Value("bool"), type=Value("int64"))) + + def test_compatible_type(self): + arr = pa.array(TypedSequence([1, 2, 3], type=Value("int32"))) + self.assertEqual(arr.type, pa.int32()) + + def test_incompatible_type(self): + with self.assertRaises((TypeError, pa.lib.ArrowInvalid)): + _ = pa.array(TypedSequence(["foo", "bar"], type=Value("int64"))) + + def test_try_compatible_type(self): + arr = pa.array(TypedSequence([1, 2, 3], try_type=Value("int32"))) + self.assertEqual(arr.type, pa.int32()) + + def test_try_incompatible_type(self): + arr = pa.array(TypedSequence(["foo", "bar"], try_type=Value("int64"))) + self.assertEqual(arr.type, pa.string()) + + def test_compatible_extension_type(self): + arr = pa.array(TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64"))) + self.assertEqual(arr.type, Array2DExtensionType((1, 3), "int64")) + + def test_incompatible_extension_type(self): + with self.assertRaises((TypeError, pa.lib.ArrowInvalid)): + _ = pa.array(TypedSequence(["foo", "bar"], type=Array2D((1, 3), "int64"))) + + def test_try_compatible_extension_type(self): + arr = pa.array(TypedSequence([[[1, 2, 3]]], try_type=Array2D((1, 3), "int64"))) + self.assertEqual(arr.type, Array2DExtensionType((1, 3), "int64")) + + def test_try_incompatible_extension_type(self): + arr = pa.array(TypedSequence(["foo", "bar"], try_type=Array2D((1, 3), "int64"))) + self.assertEqual(arr.type, pa.string()) + + @require_pil + def test_exhaustive_cast(self): + import PIL.Image + + pil_image = PIL.Image.fromarray(np.arange(10, dtype=np.uint8).reshape(2, 5)) + with patch( + "datasets.arrow_writer.cast_to_python_objects", side_effect=cast_to_python_objects + ) as mock_cast_to_python_objects: + _ = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image], type=Image())) + args, kwargs = mock_cast_to_python_objects.call_args_list[-1] + self.assertIn("optimize_list_casting", kwargs) + self.assertFalse(kwargs["optimize_list_casting"]) + + +def _check_output(output, expected_num_chunks: int): + stream = pa.BufferReader(output) if isinstance(output, pa.Buffer) else pa.memory_map(output) + f = pa.ipc.open_stream(stream) + pa_table: pa.Table = f.read_all() + assert len(pa_table.to_batches()) == expected_num_chunks + assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} + del pa_table + + +@pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) +@pytest.mark.parametrize( + "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] +) +def test_write(fields, writer_batch_size): + output = pa.BufferOutputStream() + schema = pa.schema(fields) if fields else None + with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: + writer.write({"col_1": "foo", "col_2": 1}) + writer.write({"col_1": "bar", "col_2": 2}) + num_examples, num_bytes = writer.finalize() + assert num_examples == 2 + assert num_bytes > 0 + if not fields: + fields = {"col_1": pa.string(), "col_2": pa.int64()} + assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) + _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) + + +def test_write_with_features(): + output = pa.BufferOutputStream() + features = Features({"labels": ClassLabel(names=["neg", "pos"])}) + with ArrowWriter(stream=output, features=features) as writer: + writer.write({"labels": 0}) + writer.write({"labels": 1}) + num_examples, num_bytes = writer.finalize() + assert num_examples == 2 + assert num_bytes > 0 + assert writer._schema == features.arrow_schema + assert writer._schema.metadata == features.arrow_schema.metadata + stream = pa.BufferReader(output.getvalue()) + f = pa.ipc.open_stream(stream) + pa_table: pa.Table = f.read_all() + schema = pa_table.schema + assert pa_table.num_rows == 2 + assert schema == features.arrow_schema + assert schema.metadata == features.arrow_schema.metadata + assert features == Features.from_arrow_schema(schema) + + +@pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) +def test_key_datatype(writer_batch_size): + output = pa.BufferOutputStream() + with ArrowWriter( + stream=output, + writer_batch_size=writer_batch_size, + hash_salt="split_name", + check_duplicates=True, + ) as writer: + with pytest.raises(InvalidKeyError): + writer.write({"col_1": "foo", "col_2": 1}, key=[1, 2]) + num_examples, num_bytes = writer.finalize() + + +@pytest.mark.parametrize("writer_batch_size", [None, 2, 10]) +def test_duplicate_keys(writer_batch_size): + output = pa.BufferOutputStream() + with ArrowWriter( + stream=output, + writer_batch_size=writer_batch_size, + hash_salt="split_name", + check_duplicates=True, + ) as writer: + with pytest.raises(DuplicatedKeysError): + writer.write({"col_1": "foo", "col_2": 1}, key=10) + writer.write({"col_1": "bar", "col_2": 2}, key=10) + num_examples, num_bytes = writer.finalize() + + +@pytest.mark.parametrize("writer_batch_size", [None, 2, 10]) +def test_write_with_keys(writer_batch_size): + output = pa.BufferOutputStream() + with ArrowWriter( + stream=output, + writer_batch_size=writer_batch_size, + hash_salt="split_name", + check_duplicates=True, + ) as writer: + writer.write({"col_1": "foo", "col_2": 1}, key=1) + writer.write({"col_1": "bar", "col_2": 2}, key=2) + num_examples, num_bytes = writer.finalize() + assert num_examples == 2 + assert num_bytes > 0 + _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) + + +@pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) +@pytest.mark.parametrize( + "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] +) +def test_write_batch(fields, writer_batch_size): + output = pa.BufferOutputStream() + schema = pa.schema(fields) if fields else None + with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: + writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]}) + writer.write_batch({"col_1": [], "col_2": []}) + num_examples, num_bytes = writer.finalize() + assert num_examples == 2 + assert num_bytes > 0 + if not fields: + fields = {"col_1": pa.string(), "col_2": pa.int64()} + assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) + _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) + + +@pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) +@pytest.mark.parametrize( + "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] +) +def test_write_table(fields, writer_batch_size): + output = pa.BufferOutputStream() + schema = pa.schema(fields) if fields else None + with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: + writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]})) + num_examples, num_bytes = writer.finalize() + assert num_examples == 2 + assert num_bytes > 0 + if not fields: + fields = {"col_1": pa.string(), "col_2": pa.int64()} + assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) + _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) + + +@pytest.mark.parametrize("writer_batch_size", [None, 1, 10]) +@pytest.mark.parametrize( + "fields", [None, {"col_1": pa.string(), "col_2": pa.int64()}, {"col_1": pa.string(), "col_2": pa.int32()}] +) +def test_write_row(fields, writer_batch_size): + output = pa.BufferOutputStream() + schema = pa.schema(fields) if fields else None + with ArrowWriter(stream=output, schema=schema, writer_batch_size=writer_batch_size) as writer: + writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]})) + writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]})) + num_examples, num_bytes = writer.finalize() + assert num_examples == 2 + assert num_bytes > 0 + if not fields: + fields = {"col_1": pa.string(), "col_2": pa.int64()} + assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) + _check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1) + + +def test_write_file(): + with tempfile.TemporaryDirectory() as tmp_dir: + fields = {"col_1": pa.string(), "col_2": pa.int64()} + output = os.path.join(tmp_dir, "test.arrow") + with ArrowWriter(path=output, schema=pa.schema(fields)) as writer: + writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]}) + num_examples, num_bytes = writer.finalize() + assert num_examples == 2 + assert num_bytes > 0 + assert writer._schema == pa.schema(fields, metadata=writer._schema.metadata) + _check_output(output, 1) + + +def get_base_dtype(arr_type): + if pa.types.is_list(arr_type): + return get_base_dtype(arr_type.value_type) + else: + return arr_type + + +def change_first_primitive_element_in_list(lst, value): + if isinstance(lst[0], list): + change_first_primitive_element_in_list(lst[0], value) + else: + lst[0] = value + + +@pytest.mark.parametrize("optimized_int_type, expected_dtype", [(None, pa.int64()), (Value("int32"), pa.int32())]) +@pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]]) +def test_optimized_int_type_for_typed_sequence(sequence, optimized_int_type, expected_dtype): + arr = pa.array(TypedSequence(sequence, optimized_int_type=optimized_int_type)) + assert get_base_dtype(arr.type) == expected_dtype + + +@pytest.mark.parametrize( + "col, expected_dtype", + [ + ("attention_mask", pa.int8()), + ("special_tokens_mask", pa.int8()), + ("token_type_ids", pa.int8()), + ("input_ids", pa.int32()), + ("other", pa.int64()), + ], +) +@pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]]) +def test_optimized_typed_sequence(sequence, col, expected_dtype): + # in range + arr = pa.array(OptimizedTypedSequence(sequence, col=col)) + assert get_base_dtype(arr.type) == expected_dtype + + # not in range + if col != "other": + # avoids errors due to in-place modifications + sequence = copy.deepcopy(sequence) + value = np.iinfo(expected_dtype.to_pandas_dtype()).max + 1 + change_first_primitive_element_in_list(sequence, value) + arr = pa.array(OptimizedTypedSequence(sequence, col=col)) + assert get_base_dtype(arr.type) == pa.int64() + + +@pytest.mark.parametrize("raise_exception", [False, True]) +def test_arrow_writer_closes_stream(raise_exception, tmp_path): + path = str(tmp_path / "dataset-train.arrow") + try: + with ArrowWriter(path=path) as writer: + if raise_exception: + raise pa.lib.ArrowInvalid() + else: + writer.stream.close() + except pa.lib.ArrowInvalid: + pass + finally: + assert writer.stream.closed + + +def test_arrow_writer_with_filesystem(mockfs): + path = "mock://dataset-train.arrow" + with ArrowWriter(path=path, storage_options=mockfs.storage_options) as writer: + assert isinstance(writer._fs, type(mockfs)) + assert writer._fs.storage_options == mockfs.storage_options + writer.write({"col_1": "foo", "col_2": 1}) + writer.write({"col_1": "bar", "col_2": 2}) + num_examples, num_bytes = writer.finalize() + assert num_examples == 2 + assert num_bytes > 0 + assert mockfs.exists(path) + + +def test_parquet_writer_write(): + output = pa.BufferOutputStream() + with ParquetWriter(stream=output) as writer: + writer.write({"col_1": "foo", "col_2": 1}) + writer.write({"col_1": "bar", "col_2": 2}) + num_examples, num_bytes = writer.finalize() + assert num_examples == 2 + assert num_bytes > 0 + stream = pa.BufferReader(output.getvalue()) + pa_table: pa.Table = pq.read_table(stream) + assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} + + +@require_pil +@pytest.mark.parametrize("embed_local_files", [False, True]) +def test_writer_embed_local_files(tmp_path, embed_local_files): + import PIL.Image + + image_path = str(tmp_path / "test_image_rgb.jpg") + PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uint8)).save(image_path, format="png") + output = pa.BufferOutputStream() + with ParquetWriter( + stream=output, features=Features({"image": Image()}), embed_local_files=embed_local_files + ) as writer: + writer.write({"image": image_path}) + writer.finalize() + stream = pa.BufferReader(output.getvalue()) + pa_table: pa.Table = pq.read_table(stream) + out = pa_table.to_pydict() + if embed_local_files: + assert isinstance(out["image"][0]["path"], str) + with open(image_path, "rb") as f: + assert out["image"][0]["bytes"] == f.read() + else: + assert out["image"][0]["path"] == image_path + assert out["image"][0]["bytes"] is None + + +def test_always_nullable(): + non_nullable_schema = pa.schema([pa.field("col_1", pa.string(), nullable=False)]) + output = pa.BufferOutputStream() + with ArrowWriter(stream=output) as writer: + writer._build_writer(inferred_schema=non_nullable_schema) + assert writer._schema == pa.schema([pa.field("col_1", pa.string())]) diff --git a/testbed/huggingface__datasets/tests/test_beam.py b/testbed/huggingface__datasets/tests/test_beam.py new file mode 100644 index 0000000000000000000000000000000000000000..3cc91d4e4525da8266cf4021f1718884d4033a8e --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_beam.py @@ -0,0 +1,153 @@ +import os +import tempfile +from functools import partial +from unittest import TestCase +from unittest.mock import patch + +import datasets +import datasets.config + +from .utils import require_beam + + +class DummyBeamDataset(datasets.BeamBasedBuilder): + """Dummy beam dataset.""" + + def _info(self): + return datasets.DatasetInfo( + features=datasets.Features({"content": datasets.Value("string")}), + # No default supervised_keys. + supervised_keys=None, + ) + + def _split_generators(self, dl_manager, pipeline): + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"examples": get_test_dummy_examples()})] + + def _build_pcollection(self, pipeline, examples): + import apache_beam as beam + + return pipeline | "Load Examples" >> beam.Create(examples) + + +class NestedBeamDataset(datasets.BeamBasedBuilder): + """Dummy beam dataset.""" + + def _info(self): + return datasets.DatasetInfo( + features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string")})}), + # No default supervised_keys. + supervised_keys=None, + ) + + def _split_generators(self, dl_manager, pipeline): + return [ + datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"examples": get_test_nested_examples()}) + ] + + def _build_pcollection(self, pipeline, examples): + import apache_beam as beam + + return pipeline | "Load Examples" >> beam.Create(examples) + + +def get_test_dummy_examples(): + return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"])] + + +def get_test_nested_examples(): + return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"])] + + +class BeamBuilderTest(TestCase): + @require_beam + def test_download_and_prepare(self): + expected_num_examples = len(get_test_dummy_examples()) + with tempfile.TemporaryDirectory() as tmp_cache_dir: + builder = DummyBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner") + builder.download_and_prepare() + self.assertTrue( + os.path.exists( + os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train.arrow") + ) + ) + self.assertDictEqual(builder.info.features, datasets.Features({"content": datasets.Value("string")})) + dset = builder.as_dataset() + self.assertEqual(dset["train"].num_rows, expected_num_examples) + self.assertEqual(dset["train"].info.splits["train"].num_examples, expected_num_examples) + self.assertDictEqual(dset["train"][0], get_test_dummy_examples()[0][1]) + self.assertDictEqual( + dset["train"][expected_num_examples - 1], get_test_dummy_examples()[expected_num_examples - 1][1] + ) + self.assertTrue( + os.path.exists(os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", "dataset_info.json")) + ) + del dset + + @require_beam + def test_download_and_prepare_sharded(self): + import apache_beam as beam + + original_write_parquet = beam.io.parquetio.WriteToParquet + + expected_num_examples = len(get_test_dummy_examples()) + with tempfile.TemporaryDirectory() as tmp_cache_dir: + builder = DummyBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner") + with patch("apache_beam.io.parquetio.WriteToParquet") as write_parquet_mock: + write_parquet_mock.side_effect = partial(original_write_parquet, num_shards=2) + builder.download_and_prepare() + self.assertTrue( + os.path.exists( + os.path.join( + tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train-00000-of-00002.arrow" + ) + ) + ) + self.assertTrue( + os.path.exists( + os.path.join( + tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train-00000-of-00002.arrow" + ) + ) + ) + self.assertDictEqual(builder.info.features, datasets.Features({"content": datasets.Value("string")})) + dset = builder.as_dataset() + self.assertEqual(dset["train"].num_rows, expected_num_examples) + self.assertEqual(dset["train"].info.splits["train"].num_examples, expected_num_examples) + # Order is not preserved when sharding, so we just check that all the elements are there + self.assertListEqual(sorted(dset["train"]["content"]), sorted(["foo", "bar", "foobar"])) + self.assertTrue( + os.path.exists(os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", "dataset_info.json")) + ) + del dset + + @require_beam + def test_no_beam_options(self): + with tempfile.TemporaryDirectory() as tmp_cache_dir: + builder = DummyBeamDataset(cache_dir=tmp_cache_dir) + self.assertRaises(datasets.builder.MissingBeamOptions, builder.download_and_prepare) + + @require_beam + def test_nested_features(self): + expected_num_examples = len(get_test_nested_examples()) + with tempfile.TemporaryDirectory() as tmp_cache_dir: + builder = NestedBeamDataset(cache_dir=tmp_cache_dir, beam_runner="DirectRunner") + builder.download_and_prepare() + self.assertTrue( + os.path.exists( + os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", f"{builder.name}-train.arrow") + ) + ) + self.assertDictEqual( + builder.info.features, datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string")})}) + ) + dset = builder.as_dataset() + self.assertEqual(dset["train"].num_rows, expected_num_examples) + self.assertEqual(dset["train"].info.splits["train"].num_examples, expected_num_examples) + self.assertDictEqual(dset["train"][0], get_test_nested_examples()[0][1]) + self.assertDictEqual( + dset["train"][expected_num_examples - 1], get_test_nested_examples()[expected_num_examples - 1][1] + ) + self.assertTrue( + os.path.exists(os.path.join(tmp_cache_dir, builder.name, "default", "0.0.0", "dataset_info.json")) + ) + del dset diff --git a/testbed/huggingface__datasets/tests/test_builder.py b/testbed/huggingface__datasets/tests/test_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..54bae47ae0853292f1893b64af5dfa0dd3064592 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_builder.py @@ -0,0 +1,1278 @@ +import importlib +import os +import tempfile +import types +from contextlib import nullcontext as does_not_raise +from multiprocessing import Process +from pathlib import Path +from unittest import TestCase +from unittest.mock import patch + +import numpy as np +import pyarrow as pa +import pyarrow.parquet as pq +import pytest +from multiprocess.pool import Pool + +from datasets.arrow_dataset import Dataset +from datasets.arrow_reader import DatasetNotOnHfGcsError +from datasets.arrow_writer import ArrowWriter +from datasets.builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder +from datasets.dataset_dict import DatasetDict, IterableDatasetDict +from datasets.download.download_manager import DownloadMode +from datasets.features import Features, Value +from datasets.info import DatasetInfo, PostProcessedInfo +from datasets.iterable_dataset import IterableDataset +from datasets.splits import Split, SplitDict, SplitGenerator, SplitInfo +from datasets.streaming import xjoin +from datasets.utils.file_utils import is_local_path +from datasets.utils.info_utils import VerificationMode +from datasets.utils.logging import INFO, get_logger + +from .utils import ( + assert_arrow_memory_doesnt_increase, + assert_arrow_memory_increases, + require_beam, + require_faiss, + set_current_working_directory_to_temp_dir, +) + + +class DummyBuilder(DatasetBuilder): + def _info(self): + return DatasetInfo(features=Features({"text": Value("string")})) + + def _split_generators(self, dl_manager): + return [SplitGenerator(name=Split.TRAIN)] + + def _prepare_split(self, split_generator, **kwargs): + fname = f"{self.dataset_name}-{split_generator.name}.arrow" + with ArrowWriter(features=self.info.features, path=os.path.join(self._output_dir, fname)) as writer: + writer.write_batch({"text": ["foo"] * 100}) + num_examples, num_bytes = writer.finalize() + split_generator.split_info.num_examples = num_examples + split_generator.split_info.num_bytes = num_bytes + + +class DummyGeneratorBasedBuilder(GeneratorBasedBuilder): + def _info(self): + return DatasetInfo(features=Features({"text": Value("string")})) + + def _split_generators(self, dl_manager): + return [SplitGenerator(name=Split.TRAIN)] + + def _generate_examples(self): + for i in range(100): + yield i, {"text": "foo"} + + +class DummyArrowBasedBuilder(ArrowBasedBuilder): + def _info(self): + return DatasetInfo(features=Features({"text": Value("string")})) + + def _split_generators(self, dl_manager): + return [SplitGenerator(name=Split.TRAIN)] + + def _generate_tables(self): + for i in range(10): + yield i, pa.table({"text": ["foo"] * 10}) + + +class DummyBeamBasedBuilder(BeamBasedBuilder): + def _info(self): + return DatasetInfo(features=Features({"text": Value("string")})) + + def _split_generators(self, dl_manager): + return [SplitGenerator(name=Split.TRAIN)] + + def _build_pcollection(self, pipeline): + import apache_beam as beam + + def _process(item): + for i in range(10): + yield f"{i}_{item}", {"text": "foo"} + + return pipeline | "Initialize" >> beam.Create(range(10)) | "Extract content" >> beam.FlatMap(_process) + + +class DummyGeneratorBasedBuilderWithIntegers(GeneratorBasedBuilder): + def _info(self): + return DatasetInfo(features=Features({"id": Value("int8")})) + + def _split_generators(self, dl_manager): + return [SplitGenerator(name=Split.TRAIN)] + + def _generate_examples(self): + for i in range(100): + yield i, {"id": i} + + +class DummyGeneratorBasedBuilderConfig(BuilderConfig): + def __init__(self, content="foo", times=2, *args, **kwargs): + super().__init__(*args, **kwargs) + self.content = content + self.times = times + + +class DummyGeneratorBasedBuilderWithConfig(GeneratorBasedBuilder): + BUILDER_CONFIG_CLASS = DummyGeneratorBasedBuilderConfig + + def _info(self): + return DatasetInfo(features=Features({"text": Value("string")})) + + def _split_generators(self, dl_manager): + return [SplitGenerator(name=Split.TRAIN)] + + def _generate_examples(self): + for i in range(100): + yield i, {"text": self.config.content * self.config.times} + + +class DummyBuilderWithMultipleConfigs(DummyBuilder): + BUILDER_CONFIGS = [ + DummyGeneratorBasedBuilderConfig(name="a"), + DummyGeneratorBasedBuilderConfig(name="b"), + ] + + +class DummyBuilderWithDefaultConfig(DummyBuilderWithMultipleConfigs): + DEFAULT_CONFIG_NAME = "a" + + +class DummyBuilderWithDownload(DummyBuilder): + def __init__(self, *args, rel_path=None, abs_path=None, **kwargs): + super().__init__(*args, **kwargs) + self._rel_path = rel_path + self._abs_path = abs_path + + def _split_generators(self, dl_manager): + if self._rel_path is not None: + assert os.path.exists(dl_manager.download(self._rel_path)), "dl_manager must support relative paths" + if self._abs_path is not None: + assert os.path.exists(dl_manager.download(self._abs_path)), "dl_manager must support absolute paths" + return [SplitGenerator(name=Split.TRAIN)] + + +class DummyBuilderWithManualDownload(DummyBuilderWithMultipleConfigs): + @property + def manual_download_instructions(self): + return "To use the dataset you have to download some stuff manually and pass the data path to data_dir" + + def _split_generators(self, dl_manager): + if not os.path.exists(self.config.data_dir): + raise FileNotFoundError(f"data_dir {self.config.data_dir} doesn't exist.") + return [SplitGenerator(name=Split.TRAIN)] + + +class DummyArrowBasedBuilderWithShards(ArrowBasedBuilder): + def _info(self): + return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) + + def _split_generators(self, dl_manager): + return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepaths": [f"data{i}.txt" for i in range(4)]})] + + def _generate_tables(self, filepaths): + idx = 0 + for filepath in filepaths: + for i in range(10): + yield idx, pa.table({"id": range(10 * i, 10 * (i + 1)), "filepath": [filepath] * 10}) + idx += 1 + + +class DummyGeneratorBasedBuilderWithShards(GeneratorBasedBuilder): + def _info(self): + return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) + + def _split_generators(self, dl_manager): + return [SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepaths": [f"data{i}.txt" for i in range(4)]})] + + def _generate_examples(self, filepaths): + idx = 0 + for filepath in filepaths: + for i in range(100): + yield idx, {"id": i, "filepath": filepath} + idx += 1 + + +class DummyArrowBasedBuilderWithAmbiguousShards(ArrowBasedBuilder): + def _info(self): + return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) + + def _split_generators(self, dl_manager): + return [ + SplitGenerator( + name=Split.TRAIN, + gen_kwargs={ + "filepaths": [f"data{i}.txt" for i in range(4)], + "dummy_kwarg_with_different_length": [f"dummy_data{i}.txt" for i in range(3)], + }, + ) + ] + + def _generate_tables(self, filepaths, dummy_kwarg_with_different_length): + idx = 0 + for filepath in filepaths: + for i in range(10): + yield idx, pa.table({"id": range(10 * i, 10 * (i + 1)), "filepath": [filepath] * 10}) + idx += 1 + + +class DummyGeneratorBasedBuilderWithAmbiguousShards(GeneratorBasedBuilder): + def _info(self): + return DatasetInfo(features=Features({"id": Value("int8"), "filepath": Value("string")})) + + def _split_generators(self, dl_manager): + return [ + SplitGenerator( + name=Split.TRAIN, + gen_kwargs={ + "filepaths": [f"data{i}.txt" for i in range(4)], + "dummy_kwarg_with_different_length": [f"dummy_data{i}.txt" for i in range(3)], + }, + ) + ] + + def _generate_examples(self, filepaths, dummy_kwarg_with_different_length): + idx = 0 + for filepath in filepaths: + for i in range(100): + yield idx, {"id": i, "filepath": filepath} + idx += 1 + + +def _run_concurrent_download_and_prepare(tmp_dir): + builder = DummyBuilder(cache_dir=tmp_dir) + builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS) + return builder + + +def check_streaming(builder): + builders_module = importlib.import_module(builder.__module__) + assert builders_module._patched_for_streaming + assert builders_module.os.path.join is xjoin + + +class BuilderTest(TestCase): + def test_download_and_prepare(self): + with tempfile.TemporaryDirectory() as tmp_dir: + builder = DummyBuilder(cache_dir=tmp_dir) + builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) + self.assertTrue( + os.path.exists( + os.path.join( + tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" + ) + ) + ) + self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) + self.assertEqual(builder.info.splits["train"].num_examples, 100) + self.assertTrue( + os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) + ) + + def test_download_and_prepare_checksum_computation(self): + with tempfile.TemporaryDirectory() as tmp_dir: + builder_no_verification = DummyBuilder(cache_dir=tmp_dir) + builder_no_verification.download_and_prepare( + try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD + ) + self.assertTrue( + all(v["checksum"] is not None for _, v in builder_no_verification.info.download_checksums.items()) + ) + builder_with_verification = DummyBuilder(cache_dir=tmp_dir) + builder_with_verification.download_and_prepare( + try_from_hf_gcs=False, + download_mode=DownloadMode.FORCE_REDOWNLOAD, + verification_mode=VerificationMode.ALL_CHECKS, + ) + self.assertTrue( + all(v["checksum"] is None for _, v in builder_with_verification.info.download_checksums.items()) + ) + + def test_concurrent_download_and_prepare(self): + with tempfile.TemporaryDirectory() as tmp_dir: + processes = 2 + with Pool(processes=processes) as pool: + jobs = [ + pool.apply_async(_run_concurrent_download_and_prepare, kwds={"tmp_dir": tmp_dir}) + for _ in range(processes) + ] + builders = [job.get() for job in jobs] + for builder in builders: + self.assertTrue( + os.path.exists( + os.path.join( + tmp_dir, + builder.dataset_name, + "default", + "0.0.0", + f"{builder.dataset_name}-train.arrow", + ) + ) + ) + self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) + self.assertEqual(builder.info.splits["train"].num_examples, 100) + self.assertTrue( + os.path.exists( + os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json") + ) + ) + + def test_download_and_prepare_with_base_path(self): + with tempfile.TemporaryDirectory() as tmp_dir: + rel_path = "dummy1.data" + abs_path = os.path.join(tmp_dir, "dummy2.data") + # test relative path is missing + builder = DummyBuilderWithDownload(cache_dir=tmp_dir, rel_path=rel_path) + with self.assertRaises(FileNotFoundError): + builder.download_and_prepare( + try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir + ) + # test absolute path is missing + builder = DummyBuilderWithDownload(cache_dir=tmp_dir, abs_path=abs_path) + with self.assertRaises(FileNotFoundError): + builder.download_and_prepare( + try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir + ) + # test that they are both properly loaded when they exist + open(os.path.join(tmp_dir, rel_path), "w") + open(abs_path, "w") + builder = DummyBuilderWithDownload(cache_dir=tmp_dir, rel_path=rel_path, abs_path=abs_path) + builder.download_and_prepare( + try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD, base_path=tmp_dir + ) + self.assertTrue( + os.path.exists( + os.path.join( + tmp_dir, + builder.dataset_name, + "default", + "0.0.0", + f"{builder.dataset_name}-train.arrow", + ) + ) + ) + + def test_as_dataset_with_post_process(self): + def _post_process(self, dataset, resources_paths): + def char_tokenize(example): + return {"tokens": list(example["text"])} + + return dataset.map(char_tokenize, cache_file_name=resources_paths["tokenized_dataset"]) + + def _post_processing_resources(self, split): + return {"tokenized_dataset": f"tokenized_dataset-{split}.arrow"} + + with tempfile.TemporaryDirectory() as tmp_dir: + builder = DummyBuilder(cache_dir=tmp_dir) + builder.info.post_processed = PostProcessedInfo( + features=Features({"text": Value("string"), "tokens": [Value("string")]}) + ) + builder._post_process = types.MethodType(_post_process, builder) + builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) + os.makedirs(builder.cache_dir) + + builder.info.splits = SplitDict() + builder.info.splits.add(SplitInfo("train", num_examples=10)) + builder.info.splits.add(SplitInfo("test", num_examples=10)) + + for split in builder.info.splits: + with ArrowWriter( + path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"), + features=Features({"text": Value("string")}), + ) as writer: + writer.write_batch({"text": ["foo"] * 10}) + writer.finalize() + + with ArrowWriter( + path=os.path.join(builder.cache_dir, f"tokenized_dataset-{split}.arrow"), + features=Features({"text": Value("string"), "tokens": [Value("string")]}), + ) as writer: + writer.write_batch({"text": ["foo"] * 10, "tokens": [list("foo")] * 10}) + writer.finalize() + + dsets = builder.as_dataset() + self.assertIsInstance(dsets, DatasetDict) + self.assertListEqual(list(dsets.keys()), ["train", "test"]) + self.assertEqual(len(dsets["train"]), 10) + self.assertEqual(len(dsets["test"]), 10) + self.assertDictEqual( + dsets["train"].features, Features({"text": Value("string"), "tokens": [Value("string")]}) + ) + self.assertDictEqual( + dsets["test"].features, Features({"text": Value("string"), "tokens": [Value("string")]}) + ) + self.assertListEqual(dsets["train"].column_names, ["text", "tokens"]) + self.assertListEqual(dsets["test"].column_names, ["text", "tokens"]) + del dsets + + dset = builder.as_dataset("train") + self.assertIsInstance(dset, Dataset) + self.assertEqual(dset.split, "train") + self.assertEqual(len(dset), 10) + self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]})) + self.assertListEqual(dset.column_names, ["text", "tokens"]) + self.assertGreater(builder.info.post_processing_size, 0) + self.assertGreater( + builder.info.post_processed.resources_checksums["train"]["tokenized_dataset"]["num_bytes"], 0 + ) + del dset + + dset = builder.as_dataset("train+test[:30%]") + self.assertIsInstance(dset, Dataset) + self.assertEqual(dset.split, "train+test[:30%]") + self.assertEqual(len(dset), 13) + self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]})) + self.assertListEqual(dset.column_names, ["text", "tokens"]) + del dset + + dset = builder.as_dataset("all") + self.assertIsInstance(dset, Dataset) + self.assertEqual(dset.split, "train+test") + self.assertEqual(len(dset), 20) + self.assertDictEqual(dset.features, Features({"text": Value("string"), "tokens": [Value("string")]})) + self.assertListEqual(dset.column_names, ["text", "tokens"]) + del dset + + def _post_process(self, dataset, resources_paths): + return dataset.select([0, 1], keep_in_memory=True) + + with tempfile.TemporaryDirectory() as tmp_dir: + builder = DummyBuilder(cache_dir=tmp_dir) + builder._post_process = types.MethodType(_post_process, builder) + os.makedirs(builder.cache_dir) + + builder.info.splits = SplitDict() + builder.info.splits.add(SplitInfo("train", num_examples=10)) + builder.info.splits.add(SplitInfo("test", num_examples=10)) + + for split in builder.info.splits: + with ArrowWriter( + path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"), + features=Features({"text": Value("string")}), + ) as writer: + writer.write_batch({"text": ["foo"] * 10}) + writer.finalize() + + with ArrowWriter( + path=os.path.join(builder.cache_dir, f"small_dataset-{split}.arrow"), + features=Features({"text": Value("string")}), + ) as writer: + writer.write_batch({"text": ["foo"] * 2}) + writer.finalize() + + dsets = builder.as_dataset() + self.assertIsInstance(dsets, DatasetDict) + self.assertListEqual(list(dsets.keys()), ["train", "test"]) + self.assertEqual(len(dsets["train"]), 2) + self.assertEqual(len(dsets["test"]), 2) + self.assertDictEqual(dsets["train"].features, Features({"text": Value("string")})) + self.assertDictEqual(dsets["test"].features, Features({"text": Value("string")})) + self.assertListEqual(dsets["train"].column_names, ["text"]) + self.assertListEqual(dsets["test"].column_names, ["text"]) + del dsets + + dset = builder.as_dataset("train") + self.assertIsInstance(dset, Dataset) + self.assertEqual(dset.split, "train") + self.assertEqual(len(dset), 2) + self.assertDictEqual(dset.features, Features({"text": Value("string")})) + self.assertListEqual(dset.column_names, ["text"]) + del dset + + dset = builder.as_dataset("train+test[:30%]") + self.assertIsInstance(dset, Dataset) + self.assertEqual(dset.split, "train+test[:30%]") + self.assertEqual(len(dset), 2) + self.assertDictEqual(dset.features, Features({"text": Value("string")})) + self.assertListEqual(dset.column_names, ["text"]) + del dset + + @require_faiss + def test_as_dataset_with_post_process_with_index(self): + def _post_process(self, dataset, resources_paths): + if os.path.exists(resources_paths["index"]): + dataset.load_faiss_index("my_index", resources_paths["index"]) + return dataset + else: + dataset.add_faiss_index_from_external_arrays( + external_arrays=np.ones((len(dataset), 8)), string_factory="Flat", index_name="my_index" + ) + dataset.save_faiss_index("my_index", resources_paths["index"]) + return dataset + + def _post_processing_resources(self, split): + return {"index": f"Flat-{split}.faiss"} + + with tempfile.TemporaryDirectory() as tmp_dir: + builder = DummyBuilder(cache_dir=tmp_dir) + builder._post_process = types.MethodType(_post_process, builder) + builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) + os.makedirs(builder.cache_dir) + + builder.info.splits = SplitDict() + builder.info.splits.add(SplitInfo("train", num_examples=10)) + builder.info.splits.add(SplitInfo("test", num_examples=10)) + + for split in builder.info.splits: + with ArrowWriter( + path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{split}.arrow"), + features=Features({"text": Value("string")}), + ) as writer: + writer.write_batch({"text": ["foo"] * 10}) + writer.finalize() + + with ArrowWriter( + path=os.path.join(builder.cache_dir, f"small_dataset-{split}.arrow"), + features=Features({"text": Value("string")}), + ) as writer: + writer.write_batch({"text": ["foo"] * 2}) + writer.finalize() + + dsets = builder.as_dataset() + self.assertIsInstance(dsets, DatasetDict) + self.assertListEqual(list(dsets.keys()), ["train", "test"]) + self.assertEqual(len(dsets["train"]), 10) + self.assertEqual(len(dsets["test"]), 10) + self.assertDictEqual(dsets["train"].features, Features({"text": Value("string")})) + self.assertDictEqual(dsets["test"].features, Features({"text": Value("string")})) + self.assertListEqual(dsets["train"].column_names, ["text"]) + self.assertListEqual(dsets["test"].column_names, ["text"]) + self.assertListEqual(dsets["train"].list_indexes(), ["my_index"]) + self.assertListEqual(dsets["test"].list_indexes(), ["my_index"]) + self.assertGreater(builder.info.post_processing_size, 0) + self.assertGreater(builder.info.post_processed.resources_checksums["train"]["index"]["num_bytes"], 0) + del dsets + + dset = builder.as_dataset("train") + self.assertIsInstance(dset, Dataset) + self.assertEqual(dset.split, "train") + self.assertEqual(len(dset), 10) + self.assertDictEqual(dset.features, Features({"text": Value("string")})) + self.assertListEqual(dset.column_names, ["text"]) + self.assertListEqual(dset.list_indexes(), ["my_index"]) + del dset + + dset = builder.as_dataset("train+test[:30%]") + self.assertIsInstance(dset, Dataset) + self.assertEqual(dset.split, "train+test[:30%]") + self.assertEqual(len(dset), 13) + self.assertDictEqual(dset.features, Features({"text": Value("string")})) + self.assertListEqual(dset.column_names, ["text"]) + self.assertListEqual(dset.list_indexes(), ["my_index"]) + del dset + + def test_download_and_prepare_with_post_process(self): + def _post_process(self, dataset, resources_paths): + def char_tokenize(example): + return {"tokens": list(example["text"])} + + return dataset.map(char_tokenize, cache_file_name=resources_paths["tokenized_dataset"]) + + def _post_processing_resources(self, split): + return {"tokenized_dataset": f"tokenized_dataset-{split}.arrow"} + + with tempfile.TemporaryDirectory() as tmp_dir: + builder = DummyBuilder(cache_dir=tmp_dir) + builder.info.post_processed = PostProcessedInfo( + features=Features({"text": Value("string"), "tokens": [Value("string")]}) + ) + builder._post_process = types.MethodType(_post_process, builder) + builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) + builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) + self.assertTrue( + os.path.exists( + os.path.join( + tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" + ) + ) + ) + self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) + self.assertDictEqual( + builder.info.post_processed.features, + Features({"text": Value("string"), "tokens": [Value("string")]}), + ) + self.assertEqual(builder.info.splits["train"].num_examples, 100) + self.assertTrue( + os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) + ) + + def _post_process(self, dataset, resources_paths): + return dataset.select([0, 1], keep_in_memory=True) + + with tempfile.TemporaryDirectory() as tmp_dir: + builder = DummyBuilder(cache_dir=tmp_dir) + builder._post_process = types.MethodType(_post_process, builder) + builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) + self.assertTrue( + os.path.exists( + os.path.join( + tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" + ) + ) + ) + self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) + self.assertIsNone(builder.info.post_processed) + self.assertEqual(builder.info.splits["train"].num_examples, 100) + self.assertTrue( + os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) + ) + + def _post_process(self, dataset, resources_paths): + if os.path.exists(resources_paths["index"]): + dataset.load_faiss_index("my_index", resources_paths["index"]) + return dataset + else: + dataset = dataset.add_faiss_index_from_external_arrays( + external_arrays=np.ones((len(dataset), 8)), string_factory="Flat", index_name="my_index" + ) + dataset.save_faiss_index("my_index", resources_paths["index"]) + return dataset + + def _post_processing_resources(self, split): + return {"index": f"Flat-{split}.faiss"} + + with tempfile.TemporaryDirectory() as tmp_dir: + builder = DummyBuilder(cache_dir=tmp_dir) + builder._post_process = types.MethodType(_post_process, builder) + builder._post_processing_resources = types.MethodType(_post_processing_resources, builder) + builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) + self.assertTrue( + os.path.exists( + os.path.join( + tmp_dir, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.arrow" + ) + ) + ) + self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) + self.assertIsNone(builder.info.post_processed) + self.assertEqual(builder.info.splits["train"].num_examples, 100) + self.assertTrue( + os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) + ) + + def test_error_download_and_prepare(self): + def _prepare_split(self, split_generator, **kwargs): + raise ValueError() + + with tempfile.TemporaryDirectory() as tmp_dir: + builder = DummyBuilder(cache_dir=tmp_dir) + builder._prepare_split = types.MethodType(_prepare_split, builder) + self.assertRaises( + ValueError, + builder.download_and_prepare, + try_from_hf_gcs=False, + download_mode=DownloadMode.FORCE_REDOWNLOAD, + ) + self.assertRaises(FileNotFoundError, builder.as_dataset) + + def test_generator_based_download_and_prepare(self): + with tempfile.TemporaryDirectory() as tmp_dir: + builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir) + builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) + self.assertTrue( + os.path.exists( + os.path.join( + tmp_dir, + builder.dataset_name, + "default", + "0.0.0", + f"{builder.dataset_name}-train.arrow", + ) + ) + ) + self.assertDictEqual(builder.info.features, Features({"text": Value("string")})) + self.assertEqual(builder.info.splits["train"].num_examples, 100) + self.assertTrue( + os.path.exists(os.path.join(tmp_dir, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) + ) + + # Test that duplicated keys are ignored if verification_mode is "no_checks" + with tempfile.TemporaryDirectory() as tmp_dir: + builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir) + with patch("datasets.builder.ArrowWriter", side_effect=ArrowWriter) as mock_arrow_writer: + builder.download_and_prepare( + download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.NO_CHECKS + ) + mock_arrow_writer.assert_called_once() + args, kwargs = mock_arrow_writer.call_args_list[0] + self.assertFalse(kwargs["check_duplicates"]) + + mock_arrow_writer.reset_mock() + + builder.download_and_prepare( + download_mode=DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.BASIC_CHECKS + ) + mock_arrow_writer.assert_called_once() + args, kwargs = mock_arrow_writer.call_args_list[0] + self.assertTrue(kwargs["check_duplicates"]) + + def test_cache_dir_no_args(self): + with tempfile.TemporaryDirectory() as tmp_dir: + builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_dir=None, data_files=None) + relative_cache_dir_parts = Path(builder._relative_data_dir()).parts + self.assertTupleEqual(relative_cache_dir_parts, (builder.dataset_name, "default", "0.0.0")) + + def test_cache_dir_for_data_files(self): + with tempfile.TemporaryDirectory() as tmp_dir: + dummy_data1 = os.path.join(tmp_dir, "dummy_data1.txt") + with open(dummy_data1, "w", encoding="utf-8") as f: + f.writelines("foo bar") + dummy_data2 = os.path.join(tmp_dir, "dummy_data2.txt") + with open(dummy_data2, "w", encoding="utf-8") as f: + f.writelines("foo bar\n") + + builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data1) + other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data1) + self.assertEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1]) + self.assertEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"train": dummy_data1}) + self.assertEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={Split.TRAIN: dummy_data1}) + self.assertEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"train": [dummy_data1]}) + self.assertEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files={"test": dummy_data1}) + self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=dummy_data2) + self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data2]) + self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2]) + self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) + + builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2]) + other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data1, dummy_data2]) + self.assertEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilder(cache_dir=tmp_dir, data_files=[dummy_data2, dummy_data1]) + self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) + + builder = DummyGeneratorBasedBuilder( + cache_dir=tmp_dir, data_files={"train": dummy_data1, "test": dummy_data2} + ) + other_builder = DummyGeneratorBasedBuilder( + cache_dir=tmp_dir, data_files={"train": dummy_data1, "test": dummy_data2} + ) + self.assertEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilder( + cache_dir=tmp_dir, data_files={"train": [dummy_data1], "test": dummy_data2} + ) + self.assertEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilder( + cache_dir=tmp_dir, data_files={"train": dummy_data1, "validation": dummy_data2} + ) + self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilder( + cache_dir=tmp_dir, + data_files={"train": [dummy_data1, dummy_data2], "test": dummy_data2}, + ) + self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) + + def test_cache_dir_for_features(self): + with tempfile.TemporaryDirectory() as tmp_dir: + f1 = Features({"id": Value("int8")}) + f2 = Features({"id": Value("int32")}) + builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f1) + other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f1) + self.assertEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, features=f2) + self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) + + def test_cache_dir_for_config_kwargs(self): + with tempfile.TemporaryDirectory() as tmp_dir: + # create config on the fly + builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="foo", times=2) + other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, times=2, content="foo") + self.assertEqual(builder.cache_dir, other_builder.cache_dir) + self.assertIn("content=foo", builder.cache_dir) + self.assertIn("times=2", builder.cache_dir) + other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="bar", times=2) + self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyGeneratorBasedBuilderWithConfig(cache_dir=tmp_dir, content="foo") + self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) + + with tempfile.TemporaryDirectory() as tmp_dir: + # overwrite an existing config + builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="foo", times=2) + other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", times=2, content="foo") + self.assertEqual(builder.cache_dir, other_builder.cache_dir) + self.assertIn("content=foo", builder.cache_dir) + self.assertIn("times=2", builder.cache_dir) + other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="bar", times=2) + self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a", content="foo") + self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) + + def test_config_names(self): + with tempfile.TemporaryDirectory() as tmp_dir: + with self.assertRaises(ValueError) as error_context: + DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, data_files=None, data_dir=None) + self.assertIn("Please pick one among the available configs", str(error_context.exception)) + + builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="a") + self.assertEqual(builder.config.name, "a") + + builder = DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir, config_name="b") + self.assertEqual(builder.config.name, "b") + + with self.assertRaises(ValueError): + DummyBuilderWithMultipleConfigs(cache_dir=tmp_dir) + + builder = DummyBuilderWithDefaultConfig(cache_dir=tmp_dir) + self.assertEqual(builder.config.name, "a") + + def test_cache_dir_for_data_dir(self): + with tempfile.TemporaryDirectory() as tmp_dir, tempfile.TemporaryDirectory() as data_dir: + builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=data_dir) + other_builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=data_dir) + self.assertEqual(builder.cache_dir, other_builder.cache_dir) + other_builder = DummyBuilderWithManualDownload(cache_dir=tmp_dir, config_name="a", data_dir=tmp_dir) + self.assertNotEqual(builder.cache_dir, other_builder.cache_dir) + + +def test_arrow_based_download_and_prepare(tmp_path): + builder = DummyArrowBasedBuilder(cache_dir=tmp_path) + builder.download_and_prepare() + assert os.path.exists( + os.path.join( + tmp_path, + builder.dataset_name, + "default", + "0.0.0", + f"{builder.dataset_name}-train.arrow", + ) + ) + assert builder.info.features, Features({"text": Value("string")}) + assert builder.info.splits["train"].num_examples == 100 + assert os.path.exists(os.path.join(tmp_path, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) + + +@require_beam +def test_beam_based_download_and_prepare(tmp_path): + builder = DummyBeamBasedBuilder(cache_dir=tmp_path, beam_runner="DirectRunner") + builder.download_and_prepare() + assert os.path.exists( + os.path.join( + tmp_path, + builder.dataset_name, + "default", + "0.0.0", + f"{builder.dataset_name}-train.arrow", + ) + ) + assert builder.info.features, Features({"text": Value("string")}) + assert builder.info.splits["train"].num_examples == 100 + assert os.path.exists(os.path.join(tmp_path, builder.dataset_name, "default", "0.0.0", "dataset_info.json")) + + +@require_beam +def test_beam_based_as_dataset(tmp_path): + builder = DummyBeamBasedBuilder(cache_dir=tmp_path, beam_runner="DirectRunner") + builder.download_and_prepare() + dataset = builder.as_dataset() + assert dataset + assert isinstance(dataset["train"], Dataset) + assert len(dataset["train"]) > 0 + + +@pytest.mark.parametrize( + "split, expected_dataset_class, expected_dataset_length", + [ + (None, DatasetDict, 10), + ("train", Dataset, 10), + ("train+test[:30%]", Dataset, 13), + ], +) +@pytest.mark.parametrize("in_memory", [False, True]) +def test_builder_as_dataset(split, expected_dataset_class, expected_dataset_length, in_memory, tmp_path): + cache_dir = str(tmp_path) + builder = DummyBuilder(cache_dir=cache_dir) + os.makedirs(builder.cache_dir) + + builder.info.splits = SplitDict() + builder.info.splits.add(SplitInfo("train", num_examples=10)) + builder.info.splits.add(SplitInfo("test", num_examples=10)) + + for info_split in builder.info.splits: + with ArrowWriter( + path=os.path.join(builder.cache_dir, f"{builder.dataset_name}-{info_split}.arrow"), + features=Features({"text": Value("string")}), + ) as writer: + writer.write_batch({"text": ["foo"] * 10}) + writer.finalize() + + with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): + dataset = builder.as_dataset(split=split, in_memory=in_memory) + assert isinstance(dataset, expected_dataset_class) + if isinstance(dataset, DatasetDict): + assert list(dataset.keys()) == ["train", "test"] + datasets = dataset.values() + expected_splits = ["train", "test"] + elif isinstance(dataset, Dataset): + datasets = [dataset] + expected_splits = [split] + for dataset, expected_split in zip(datasets, expected_splits): + assert dataset.split == expected_split + assert len(dataset) == expected_dataset_length + assert dataset.features == Features({"text": Value("string")}) + dataset.column_names == ["text"] + + +@pytest.mark.parametrize("in_memory", [False, True]) +def test_generator_based_builder_as_dataset(in_memory, tmp_path): + cache_dir = tmp_path / "data" + cache_dir.mkdir() + cache_dir = str(cache_dir) + builder = DummyGeneratorBasedBuilder(cache_dir=cache_dir) + builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) + with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase(): + dataset = builder.as_dataset("train", in_memory=in_memory) + assert dataset.data.to_pydict() == {"text": ["foo"] * 100} + + +@pytest.mark.parametrize( + "writer_batch_size, default_writer_batch_size, expected_chunks", [(None, None, 1), (None, 5, 20), (10, None, 10)] +) +def test_custom_writer_batch_size(tmp_path, writer_batch_size, default_writer_batch_size, expected_chunks): + cache_dir = str(tmp_path) + if default_writer_batch_size: + DummyGeneratorBasedBuilder.DEFAULT_WRITER_BATCH_SIZE = default_writer_batch_size + builder = DummyGeneratorBasedBuilder(cache_dir=cache_dir, writer_batch_size=writer_batch_size) + assert builder._writer_batch_size == (writer_batch_size or default_writer_batch_size) + builder.download_and_prepare(try_from_hf_gcs=False, download_mode=DownloadMode.FORCE_REDOWNLOAD) + dataset = builder.as_dataset("train") + assert len(dataset.data[0].chunks) == expected_chunks + + +def test_builder_as_streaming_dataset(tmp_path): + dummy_builder = DummyGeneratorBasedBuilder(cache_dir=str(tmp_path)) + check_streaming(dummy_builder) + dsets = dummy_builder.as_streaming_dataset() + assert isinstance(dsets, IterableDatasetDict) + assert isinstance(dsets["train"], IterableDataset) + assert len(list(dsets["train"])) == 100 + dset = dummy_builder.as_streaming_dataset(split="train") + assert isinstance(dset, IterableDataset) + assert len(list(dset)) == 100 + + +@require_beam +def test_beam_based_builder_as_streaming_dataset(tmp_path): + builder = DummyBeamBasedBuilder(cache_dir=tmp_path) + check_streaming(builder) + with pytest.raises(DatasetNotOnHfGcsError): + builder.as_streaming_dataset() + + +def _run_test_builder_streaming_works_in_subprocesses(builder): + check_streaming(builder) + dset = builder.as_streaming_dataset(split="train") + assert isinstance(dset, IterableDataset) + assert len(list(dset)) == 100 + + +def test_builder_streaming_works_in_subprocess(tmp_path): + dummy_builder = DummyGeneratorBasedBuilder(cache_dir=str(tmp_path)) + p = Process(target=_run_test_builder_streaming_works_in_subprocesses, args=(dummy_builder,)) + p.start() + p.join() + + +class DummyBuilderWithVersion(GeneratorBasedBuilder): + VERSION = "2.0.0" + + def _info(self): + return DatasetInfo(features=Features({"text": Value("string")})) + + def _split_generators(self, dl_manager): + pass + + def _generate_examples(self): + pass + + +class DummyBuilderWithBuilderConfigs(GeneratorBasedBuilder): + BUILDER_CONFIGS = [BuilderConfig(name="custom", version="2.0.0")] + + def _info(self): + return DatasetInfo(features=Features({"text": Value("string")})) + + def _split_generators(self, dl_manager): + pass + + def _generate_examples(self): + pass + + +class CustomBuilderConfig(BuilderConfig): + def __init__(self, date=None, language=None, version="2.0.0", **kwargs): + name = f"{date}.{language}" + super().__init__(name=name, version=version, **kwargs) + self.date = date + self.language = language + + +class DummyBuilderWithCustomBuilderConfigs(GeneratorBasedBuilder): + BUILDER_CONFIGS = [CustomBuilderConfig(date="20220501", language="en")] + BUILDER_CONFIG_CLASS = CustomBuilderConfig + + def _info(self): + return DatasetInfo(features=Features({"text": Value("string")})) + + def _split_generators(self, dl_manager): + pass + + def _generate_examples(self): + pass + + +@pytest.mark.parametrize( + "builder_class, kwargs", + [ + (DummyBuilderWithVersion, {}), + (DummyBuilderWithBuilderConfigs, {"config_name": "custom"}), + (DummyBuilderWithCustomBuilderConfigs, {"config_name": "20220501.en"}), + (DummyBuilderWithCustomBuilderConfigs, {"date": "20220501", "language": "ca"}), + ], +) +def test_builder_config_version(builder_class, kwargs, tmp_path): + cache_dir = str(tmp_path) + builder = builder_class(cache_dir=cache_dir, **kwargs) + assert builder.config.version == "2.0.0" + + +def test_builder_download_and_prepare_with_absolute_output_dir(tmp_path): + builder = DummyGeneratorBasedBuilder() + output_dir = str(tmp_path) + builder.download_and_prepare(output_dir) + assert builder._output_dir.startswith(tmp_path.resolve().as_posix()) + assert os.path.exists(os.path.join(output_dir, "dataset_info.json")) + assert os.path.exists(os.path.join(output_dir, f"{builder.dataset_name}-train.arrow")) + assert not os.path.exists(os.path.join(output_dir + ".incomplete")) + + +def test_builder_download_and_prepare_with_relative_output_dir(): + with set_current_working_directory_to_temp_dir(): + builder = DummyGeneratorBasedBuilder() + output_dir = "test-out" + builder.download_and_prepare(output_dir) + assert Path(builder._output_dir).resolve().as_posix().startswith(Path(output_dir).resolve().as_posix()) + assert os.path.exists(os.path.join(output_dir, "dataset_info.json")) + assert os.path.exists(os.path.join(output_dir, f"{builder.dataset_name}-train.arrow")) + assert not os.path.exists(os.path.join(output_dir + ".incomplete")) + + +def test_builder_with_filesystem_download_and_prepare(tmp_path, mockfs): + builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) + builder.download_and_prepare("mock://my_dataset", storage_options=mockfs.storage_options) + assert builder._output_dir.startswith("mock://my_dataset") + assert is_local_path(builder._cache_downloaded_dir) + assert isinstance(builder._fs, type(mockfs)) + assert builder._fs.storage_options == mockfs.storage_options + assert mockfs.exists("my_dataset/dataset_info.json") + assert mockfs.exists(f"my_dataset/{builder.dataset_name}-train.arrow") + assert not mockfs.exists("my_dataset.incomplete") + + +def test_builder_with_filesystem_download_and_prepare_reload(tmp_path, mockfs, caplog): + builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) + mockfs.makedirs("my_dataset") + DatasetInfo().write_to_directory("mock://my_dataset", storage_options=mockfs.storage_options) + mockfs.touch(f"my_dataset/{builder.dataset_name}-train.arrow") + caplog.clear() + with caplog.at_level(INFO, logger=get_logger().name): + builder.download_and_prepare("mock://my_dataset", storage_options=mockfs.storage_options) + assert "Found cached dataset" in caplog.text + + +def test_generator_based_builder_download_and_prepare_as_parquet(tmp_path): + builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path) + builder.download_and_prepare(file_format="parquet") + assert builder.info.splits["train"].num_examples == 100 + parquet_path = os.path.join( + tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet" + ) + assert os.path.exists(parquet_path) + assert pq.ParquetFile(parquet_path) is not None + + +def test_generator_based_builder_download_and_prepare_sharded(tmp_path): + writer_batch_size = 25 + builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path, writer_batch_size=writer_batch_size) + with patch("datasets.config.MAX_SHARD_SIZE", 1): # one batch per shard + builder.download_and_prepare(file_format="parquet") + expected_num_shards = 100 // writer_batch_size + assert builder.info.splits["train"].num_examples == 100 + parquet_path = os.path.join( + tmp_path, + builder.dataset_name, + "default", + "0.0.0", + f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", + ) + assert os.path.exists(parquet_path) + parquet_files = [ + pq.ParquetFile(parquet_path) + for parquet_path in Path(tmp_path).rglob( + f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" + ) + ] + assert len(parquet_files) == expected_num_shards + assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 + + +def test_generator_based_builder_download_and_prepare_with_max_shard_size(tmp_path): + writer_batch_size = 25 + builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path, writer_batch_size=writer_batch_size) + builder.download_and_prepare(file_format="parquet", max_shard_size=1) # one batch per shard + expected_num_shards = 100 // writer_batch_size + assert builder.info.splits["train"].num_examples == 100 + parquet_path = os.path.join( + tmp_path, + builder.dataset_name, + "default", + "0.0.0", + f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", + ) + assert os.path.exists(parquet_path) + parquet_files = [ + pq.ParquetFile(parquet_path) + for parquet_path in Path(tmp_path).rglob( + f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" + ) + ] + assert len(parquet_files) == expected_num_shards + assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 + + +def test_generator_based_builder_download_and_prepare_with_num_proc(tmp_path): + builder = DummyGeneratorBasedBuilderWithShards(cache_dir=tmp_path) + builder.download_and_prepare(num_proc=2) + expected_num_shards = 2 + assert builder.info.splits["train"].num_examples == 400 + assert builder.info.splits["train"].shard_lengths == [200, 200] + arrow_path = os.path.join( + tmp_path, + builder.dataset_name, + "default", + "0.0.0", + f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.arrow", + ) + assert os.path.exists(arrow_path) + ds = builder.as_dataset("train") + assert len(ds) == 400 + assert ds.to_dict() == { + "id": [i for _ in range(4) for i in range(100)], + "filepath": [f"data{i}.txt" for i in range(4) for _ in range(100)], + } + + +@pytest.mark.parametrize( + "num_proc, expectation", [(None, does_not_raise()), (1, does_not_raise()), (2, pytest.raises(RuntimeError))] +) +def test_generator_based_builder_download_and_prepare_with_ambiguous_shards(num_proc, expectation, tmp_path): + builder = DummyGeneratorBasedBuilderWithAmbiguousShards(cache_dir=tmp_path) + with expectation: + builder.download_and_prepare(num_proc=num_proc) + + +def test_arrow_based_builder_download_and_prepare_as_parquet(tmp_path): + builder = DummyArrowBasedBuilder(cache_dir=tmp_path) + builder.download_and_prepare(file_format="parquet") + assert builder.info.splits["train"].num_examples == 100 + parquet_path = os.path.join( + tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet" + ) + assert os.path.exists(parquet_path) + assert pq.ParquetFile(parquet_path) is not None + + +def test_arrow_based_builder_download_and_prepare_sharded(tmp_path): + builder = DummyArrowBasedBuilder(cache_dir=tmp_path) + with patch("datasets.config.MAX_SHARD_SIZE", 1): # one batch per shard + builder.download_and_prepare(file_format="parquet") + expected_num_shards = 10 + assert builder.info.splits["train"].num_examples == 100 + parquet_path = os.path.join( + tmp_path, + builder.dataset_name, + "default", + "0.0.0", + f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", + ) + assert os.path.exists(parquet_path) + parquet_files = [ + pq.ParquetFile(parquet_path) + for parquet_path in Path(tmp_path).rglob( + f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" + ) + ] + assert len(parquet_files) == expected_num_shards + assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 + + +def test_arrow_based_builder_download_and_prepare_with_max_shard_size(tmp_path): + builder = DummyArrowBasedBuilder(cache_dir=tmp_path) + builder.download_and_prepare(file_format="parquet", max_shard_size=1) # one table per shard + expected_num_shards = 10 + assert builder.info.splits["train"].num_examples == 100 + parquet_path = os.path.join( + tmp_path, + builder.dataset_name, + "default", + "0.0.0", + f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.parquet", + ) + assert os.path.exists(parquet_path) + parquet_files = [ + pq.ParquetFile(parquet_path) + for parquet_path in Path(tmp_path).rglob( + f"{builder.dataset_name}-train-*-of-{expected_num_shards:05d}.parquet" + ) + ] + assert len(parquet_files) == expected_num_shards + assert sum(parquet_file.metadata.num_rows for parquet_file in parquet_files) == 100 + + +def test_arrow_based_builder_download_and_prepare_with_num_proc(tmp_path): + builder = DummyArrowBasedBuilderWithShards(cache_dir=tmp_path) + builder.download_and_prepare(num_proc=2) + expected_num_shards = 2 + assert builder.info.splits["train"].num_examples == 400 + assert builder.info.splits["train"].shard_lengths == [200, 200] + arrow_path = os.path.join( + tmp_path, + builder.dataset_name, + "default", + "0.0.0", + f"{builder.dataset_name}-train-00000-of-{expected_num_shards:05d}.arrow", + ) + assert os.path.exists(arrow_path) + ds = builder.as_dataset("train") + assert len(ds) == 400 + assert ds.to_dict() == { + "id": [i for _ in range(4) for i in range(100)], + "filepath": [f"data{i}.txt" for i in range(4) for _ in range(100)], + } + + +@pytest.mark.parametrize( + "num_proc, expectation", [(None, does_not_raise()), (1, does_not_raise()), (2, pytest.raises(RuntimeError))] +) +def test_arrow_based_builder_download_and_prepare_with_ambiguous_shards(num_proc, expectation, tmp_path): + builder = DummyArrowBasedBuilderWithAmbiguousShards(cache_dir=tmp_path) + with expectation: + builder.download_and_prepare(num_proc=num_proc) + + +@require_beam +def test_beam_based_builder_download_and_prepare_as_parquet(tmp_path): + builder = DummyBeamBasedBuilder(cache_dir=tmp_path, beam_runner="DirectRunner") + builder.download_and_prepare(file_format="parquet") + assert builder.info.splits["train"].num_examples == 100 + parquet_path = os.path.join( + tmp_path, builder.dataset_name, "default", "0.0.0", f"{builder.dataset_name}-train.parquet" + ) + assert os.path.exists(parquet_path) + assert pq.ParquetFile(parquet_path) is not None diff --git a/testbed/huggingface__datasets/tests/test_data_files.py b/testbed/huggingface__datasets/tests/test_data_files.py new file mode 100644 index 0000000000000000000000000000000000000000..01b5e4dd15ec4ae5c3e9411b8e75712c5decc24f --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_data_files.py @@ -0,0 +1,666 @@ +import copy +import os +from pathlib import Path +from typing import List +from unittest.mock import patch + +import fsspec +import pytest +from fsspec.registry import _registry as _fsspec_registry +from fsspec.spec import AbstractFileSystem + +from datasets.data_files import ( + DataFilesDict, + DataFilesList, + _get_data_files_patterns, + _get_metadata_files_patterns, + _is_inside_unrequested_special_dir, + _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir, + get_data_patterns, + resolve_pattern, +) +from datasets.fingerprint import Hasher + + +_TEST_PATTERNS = ["*", "**", "**/*", "*.txt", "data/*", "**/*.txt", "**/train.txt"] +_FILES_TO_IGNORE = {".dummy", "README.md", "dummy_data.zip", "dataset_infos.json"} +_DIRS_TO_IGNORE = {"data/.dummy_subdir", "__pycache__"} +_TEST_PATTERNS_SIZES = { + "*": 0, + "**": 4, + "**/*": 4, + "*.txt": 0, + "data/*": 2, + "data/**": 4, + "**/*.txt": 4, + "**/train.txt": 2, +} + +_TEST_URL = "https://raw.githubusercontent.com/huggingface/datasets/9675a5a1e7b99a86f9c250f6ea5fa5d1e6d5cc7d/setup.py" + + +@pytest.fixture +def complex_data_dir(tmp_path): + data_dir = tmp_path / "complex_data_dir" + data_dir.mkdir() + + (data_dir / "data").mkdir() + with open(data_dir / "data" / "train.txt", "w") as f: + f.write("foo\n" * 10) + with open(data_dir / "data" / "test.txt", "w") as f: + f.write("bar\n" * 10) + + with open(data_dir / "README.md", "w") as f: + f.write("This is a readme") + with open(data_dir / ".dummy", "w") as f: + f.write("this is a dummy file that is not a data file") + + (data_dir / "data" / "subdir").mkdir() + with open(data_dir / "data" / "subdir" / "train.txt", "w") as f: + f.write("foo\n" * 10) + with open(data_dir / "data" / "subdir" / "test.txt", "w") as f: + f.write("bar\n" * 10) + + (data_dir / "data" / ".dummy_subdir").mkdir() + with open(data_dir / "data" / ".dummy_subdir" / "train.txt", "w") as f: + f.write("foo\n" * 10) + with open(data_dir / "data" / ".dummy_subdir" / "test.txt", "w") as f: + f.write("bar\n" * 10) + + (data_dir / "__pycache__").mkdir() + with open(data_dir / "__pycache__" / "script.py", "w") as f: + f.write("foo\n" * 10) + + return str(data_dir) + + +def is_relative_to(path, *other): + # A built-in method in Python 3.9+ + try: + path.relative_to(*other) + return True + except ValueError: + return False + + +@pytest.fixture +def pattern_results(complex_data_dir): + # We use fsspec glob as a reference for data files resolution from patterns. + # This is the same as dask for example. + # + # /!\ Here are some behaviors specific to fsspec glob that are different from glob.glob, Path.glob, Path.match or fnmatch: + # - '*' matches only first level items + # - '**' matches all items + # - '**/*' matches all at least second level items + # + # More generally: + # - '*' matches any character except a forward-slash (to match just the file or directory name) + # - '**' matches any character including a forward-slash / + + return { + pattern: sorted( + Path(os.path.abspath(path)).as_posix() + for path in fsspec.filesystem("file").glob(os.path.join(complex_data_dir, pattern)) + if Path(path).name not in _FILES_TO_IGNORE + and not any( + is_relative_to(Path(path), os.path.join(complex_data_dir, dir_path)) for dir_path in _DIRS_TO_IGNORE + ) + and Path(path).is_file() + ) + for pattern in _TEST_PATTERNS + } + + +@pytest.fixture +def hub_dataset_repo_path(tmpfs, complex_data_dir): + for path in Path(complex_data_dir).rglob("*"): + if path.is_file(): + with tmpfs.open(path.relative_to(complex_data_dir).as_posix(), "wb") as f: + f.write(path.read_bytes()) + yield "tmp://" + + +@pytest.fixture +def hub_dataset_repo_patterns_results(hub_dataset_repo_path, complex_data_dir, pattern_results): + return { + pattern: [ + hub_dataset_repo_path + Path(path).relative_to(complex_data_dir).as_posix() + for path in pattern_results[pattern] + ] + for pattern in pattern_results + } + + +def test_is_inside_unrequested_special_dir(complex_data_dir, pattern_results): + # usual patterns outside special dir work fine + for pattern, result in pattern_results.items(): + if result: + matched_rel_path = str(Path(result[0]).relative_to(complex_data_dir)) + assert _is_inside_unrequested_special_dir(matched_rel_path, pattern) is False + # check behavior for special dir + f = _is_inside_unrequested_special_dir + assert f("__pycache__/b.txt", "**") is True + assert f("__pycache__/b.txt", "*/b.txt") is True + assert f("__pycache__/b.txt", "__pycache__/*") is False + assert f("__pycache__/__b.txt", "__pycache__/*") is False + assert f("__pycache__/__b.txt", "__*/*") is False + assert f("__b.txt", "*") is False + + +def test_is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(complex_data_dir, pattern_results): + # usual patterns outside hidden dir work fine + for pattern, result in pattern_results.items(): + if result: + matched_rel_path = str(Path(result[0]).relative_to(complex_data_dir)) + assert _is_inside_unrequested_special_dir(matched_rel_path, pattern) is False + # check behavior for hidden dir and file + f = _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir + assert f(".hidden_file.txt", "**") is True + assert f(".hidden_file.txt", ".*") is False + assert f(".hidden_dir/a.txt", "**") is True + assert f(".hidden_dir/a.txt", ".*/*") is False + assert f(".hidden_dir/a.txt", ".hidden_dir/*") is False + assert f(".hidden_dir/.hidden_file.txt", "**") is True + assert f(".hidden_dir/.hidden_file.txt", ".*/*") is True + assert f(".hidden_dir/.hidden_file.txt", ".*/.*") is False + assert f(".hidden_dir/.hidden_file.txt", ".hidden_dir/*") is True + assert f(".hidden_dir/.hidden_file.txt", ".hidden_dir/.*") is False + + +@pytest.mark.parametrize("pattern", _TEST_PATTERNS) +def test_pattern_results_fixture(pattern_results, pattern): + assert len(pattern_results[pattern]) == _TEST_PATTERNS_SIZES[pattern] + assert all(Path(path).is_file() for path in pattern_results[pattern]) + + +@pytest.mark.parametrize("pattern", _TEST_PATTERNS) +def test_resolve_pattern_locally(complex_data_dir, pattern, pattern_results): + try: + resolved_data_files = resolve_pattern(pattern, complex_data_dir) + assert sorted(str(f) for f in resolved_data_files) == pattern_results[pattern] + except FileNotFoundError: + assert len(pattern_results[pattern]) == 0 + + +def test_resolve_pattern_locally_with_dot_in_base_path(complex_data_dir): + base_path_with_dot = os.path.join(complex_data_dir, "data", ".dummy_subdir") + resolved_data_files = resolve_pattern(os.path.join(base_path_with_dot, "train.txt"), base_path_with_dot) + assert len(resolved_data_files) == 1 + + +def test_resolve_pattern_locally_with_absolute_path(tmp_path, complex_data_dir): + abs_path = os.path.join(complex_data_dir, "data", "train.txt") + resolved_data_files = resolve_pattern(abs_path, str(tmp_path / "blabla")) + assert len(resolved_data_files) == 1 + + +def test_resolve_pattern_locally_with_double_dots(tmp_path, complex_data_dir): + path_with_double_dots = os.path.join(complex_data_dir, "data", "subdir", "..", "train.txt") + resolved_data_files = resolve_pattern(path_with_double_dots, str(tmp_path / "blabla")) + assert len(resolved_data_files) == 1 + + +def test_resolve_pattern_locally_returns_hidden_file_only_if_requested(complex_data_dir): + with pytest.raises(FileNotFoundError): + resolve_pattern("*dummy", complex_data_dir) + resolved_data_files = resolve_pattern(".dummy", complex_data_dir) + assert len(resolved_data_files) == 1 + + +def test_resolve_pattern_locally_hidden_base_path(tmp_path): + hidden = tmp_path / ".test_hidden_base_path" + hidden.mkdir() + (tmp_path / ".test_hidden_base_path" / "a.txt").touch() + resolved_data_files = resolve_pattern("*", str(hidden)) + assert len(resolved_data_files) == 1 + + +def test_resolve_pattern_locallyreturns_hidden_dir_only_if_requested(complex_data_dir): + with pytest.raises(FileNotFoundError): + resolve_pattern("data/*dummy_subdir/train.txt", complex_data_dir) + resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", complex_data_dir) + assert len(resolved_data_files) == 1 + resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", complex_data_dir) + assert len(resolved_data_files) == 1 + + +def test_resolve_pattern_locally_returns_special_dir_only_if_requested(complex_data_dir): + with pytest.raises(FileNotFoundError): + resolve_pattern("data/*dummy_subdir/train.txt", complex_data_dir) + resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", complex_data_dir) + assert len(resolved_data_files) == 1 + resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", complex_data_dir) + assert len(resolved_data_files) == 1 + + +def test_resolve_pattern_locally_special_base_path(tmp_path): + special = tmp_path / "__test_special_base_path__" + special.mkdir() + (tmp_path / "__test_special_base_path__" / "a.txt").touch() + resolved_data_files = resolve_pattern("*", str(special)) + assert len(resolved_data_files) == 1 + + +@pytest.mark.parametrize("pattern,size,extensions", [("**", 4, [".txt"]), ("**", 4, None), ("**", 0, [".blablabla"])]) +def test_resolve_pattern_locally_with_extensions(complex_data_dir, pattern, size, extensions): + if size > 0: + resolved_data_files = resolve_pattern(pattern, complex_data_dir, allowed_extensions=extensions) + assert len(resolved_data_files) == size + else: + with pytest.raises(FileNotFoundError): + resolve_pattern(pattern, complex_data_dir, allowed_extensions=extensions) + + +def test_fail_resolve_pattern_locally(complex_data_dir): + with pytest.raises(FileNotFoundError): + resolve_pattern(complex_data_dir, ["blablabla"]) + + +@pytest.mark.skipif(os.name == "nt", reason="Windows does not support symlinks in the default mode") +def test_resolve_pattern_locally_does_not_resolve_symbolic_links(tmp_path, complex_data_dir): + (tmp_path / "train_data_symlink.txt").symlink_to(os.path.join(complex_data_dir, "data", "train.txt")) + resolved_data_files = resolve_pattern("train_data_symlink.txt", str(tmp_path)) + assert len(resolved_data_files) == 1 + assert Path(resolved_data_files[0]) == tmp_path / "train_data_symlink.txt" + + +def test_resolve_pattern_locally_sorted_files(tmp_path_factory): + path = str(tmp_path_factory.mktemp("unsorted_text_files")) + unsorted_names = ["0.txt", "2.txt", "3.txt"] + for name in unsorted_names: + with open(os.path.join(path, name), "w"): + pass + resolved_data_files = resolve_pattern("*", path) + resolved_names = [os.path.basename(data_file) for data_file in resolved_data_files] + assert resolved_names == sorted(unsorted_names) + + +@pytest.mark.parametrize("pattern", _TEST_PATTERNS) +def test_resolve_pattern_in_dataset_repository(hub_dataset_repo_path, pattern, hub_dataset_repo_patterns_results): + try: + resolved_data_files = resolve_pattern(pattern, hub_dataset_repo_path) + assert sorted(str(f) for f in resolved_data_files) == hub_dataset_repo_patterns_results[pattern] + except FileNotFoundError: + assert len(hub_dataset_repo_patterns_results[pattern]) == 0 + + +@pytest.mark.parametrize( + "pattern,size,base_path", [("**", 4, None), ("**", 4, "data"), ("**", 2, "data/subdir"), ("**", 0, "data/subdir2")] +) +def test_resolve_pattern_in_dataset_repository_with_base_path(hub_dataset_repo_path, pattern, size, base_path): + base_path = hub_dataset_repo_path + (base_path or "") + if size > 0: + resolved_data_files = resolve_pattern(pattern, base_path) + assert len(resolved_data_files) == size + else: + with pytest.raises(FileNotFoundError): + resolve_pattern(pattern, base_path) + + +@pytest.mark.parametrize("pattern,size,extensions", [("**", 4, [".txt"]), ("**", 4, None), ("**", 0, [".blablabla"])]) +def test_resolve_pattern_in_dataset_repository_with_extensions(hub_dataset_repo_path, pattern, size, extensions): + if size > 0: + resolved_data_files = resolve_pattern(pattern, hub_dataset_repo_path, allowed_extensions=extensions) + assert len(resolved_data_files) == size + else: + with pytest.raises(FileNotFoundError): + resolved_data_files = resolve_pattern(pattern, hub_dataset_repo_path, allowed_extensions=extensions) + + +def test_fail_resolve_pattern_in_dataset_repository(hub_dataset_repo_path): + with pytest.raises(FileNotFoundError): + resolve_pattern("blablabla", hub_dataset_repo_path) + + +def test_resolve_pattern_in_dataset_repository_returns_hidden_file_only_if_requested(hub_dataset_repo_path): + with pytest.raises(FileNotFoundError): + resolve_pattern("*dummy", hub_dataset_repo_path) + resolved_data_files = resolve_pattern(".dummy", hub_dataset_repo_path) + assert len(resolved_data_files) == 1 + + +def test_resolve_pattern_in_dataset_repository_hidden_base_path(tmpfs): + tmpfs.touch(".hidden/a.txt") + resolved_data_files = resolve_pattern("*", base_path="tmp://.hidden") + assert len(resolved_data_files) == 1 + + +def test_resolve_pattern_in_dataset_repository_returns_hidden_dir_only_if_requested(hub_dataset_repo_path): + with pytest.raises(FileNotFoundError): + resolve_pattern("data/*dummy_subdir/train.txt", hub_dataset_repo_path) + resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", hub_dataset_repo_path) + assert len(resolved_data_files) == 1 + resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", hub_dataset_repo_path) + assert len(resolved_data_files) == 1 + + +def test_resolve_pattern_in_dataset_repository_returns_special_dir_only_if_requested(hub_dataset_repo_path): + with pytest.raises(FileNotFoundError): + resolve_pattern("data/*dummy_subdir/train.txt", hub_dataset_repo_path) + resolved_data_files = resolve_pattern("data/.dummy_subdir/train.txt", hub_dataset_repo_path) + assert len(resolved_data_files) == 1 + resolved_data_files = resolve_pattern("*/.dummy_subdir/train.txt", hub_dataset_repo_path) + assert len(resolved_data_files) == 1 + + +def test_resolve_pattern_in_dataset_repository_special_base_path(tmpfs): + tmpfs.touch("__special__/a.txt") + resolved_data_files = resolve_pattern("*", base_path="tmp://__special__") + assert len(resolved_data_files) == 1 + + +@pytest.fixture +def dummy_fs(): + DummyTestFS = mock_fs(["train.txt", "test.txt"]) + _fsspec_registry["mock"] = DummyTestFS + _fsspec_registry["dummy"] = DummyTestFS + yield + del _fsspec_registry["mock"] + del _fsspec_registry["dummy"] + + +def test_resolve_pattern_fs(dummy_fs): + resolved_data_files = resolve_pattern("mock://train.txt", base_path="") + assert resolved_data_files == ["mock://train.txt"] + + +@pytest.mark.parametrize("pattern", _TEST_PATTERNS) +def test_DataFilesList_from_patterns_in_dataset_repository_( + hub_dataset_repo_path, hub_dataset_repo_patterns_results, pattern +): + try: + data_files_list = DataFilesList.from_patterns([pattern], hub_dataset_repo_path) + assert sorted(data_files_list) == hub_dataset_repo_patterns_results[pattern] + assert len(data_files_list.origin_metadata) == len(data_files_list) + except FileNotFoundError: + assert len(hub_dataset_repo_patterns_results[pattern]) == 0 + + +def test_DataFilesList_from_patterns_locally_with_extra_files(complex_data_dir, text_file): + data_files_list = DataFilesList.from_patterns([_TEST_URL, text_file.as_posix()], complex_data_dir) + assert list(data_files_list) == [_TEST_URL, text_file.as_posix()] + assert len(data_files_list.origin_metadata) == 2 + + +def test_DataFilesList_from_patterns_raises_FileNotFoundError(complex_data_dir): + with pytest.raises(FileNotFoundError): + DataFilesList.from_patterns(["file_that_doesnt_exist.txt"], complex_data_dir) + + +class TestDataFilesDict: + def test_key_order_after_copy(self): + data_files = DataFilesDict({"train": "train.csv", "test": "test.csv"}) + copied_data_files = copy.deepcopy(data_files) + assert list(copied_data_files.keys()) == list(data_files.keys()) # test split order with list() + + +@pytest.mark.parametrize("pattern", _TEST_PATTERNS) +def test_DataFilesDict_from_patterns_in_dataset_repository( + hub_dataset_repo_path, hub_dataset_repo_patterns_results, pattern +): + split_name = "train" + try: + data_files = DataFilesDict.from_patterns({split_name: [pattern]}, hub_dataset_repo_path) + assert all(isinstance(data_files_list, DataFilesList) for data_files_list in data_files.values()) + assert sorted(data_files[split_name]) == hub_dataset_repo_patterns_results[pattern] + except FileNotFoundError: + assert len(hub_dataset_repo_patterns_results[pattern]) == 0 + + +@pytest.mark.parametrize( + "pattern,size,base_path,split_name", + [ + ("**", 4, None, "train"), + ("**", 4, "data", "train"), + ("**", 2, "data/subdir", "train"), + ("**train*", 1, "data/subdir", "train"), + ("**test*", 1, "data/subdir", "test"), + ("**", 0, "data/subdir2", "train"), + ], +) +def test_DataFilesDict_from_patterns_in_dataset_repository_with_base_path( + hub_dataset_repo_path, pattern, size, base_path, split_name +): + base_path = hub_dataset_repo_path + (base_path or "") + if size > 0: + data_files = DataFilesDict.from_patterns({split_name: [pattern]}, base_path=base_path) + assert len(data_files[split_name]) == size + else: + with pytest.raises(FileNotFoundError): + resolve_pattern(pattern, base_path) + + +@pytest.mark.parametrize("pattern", _TEST_PATTERNS) +def test_DataFilesDict_from_patterns_locally(complex_data_dir, pattern_results, pattern): + split_name = "train" + try: + data_files = DataFilesDict.from_patterns({split_name: [pattern]}, complex_data_dir) + assert all(isinstance(data_files_list, DataFilesList) for data_files_list in data_files.values()) + assert sorted(data_files[split_name]) == pattern_results[pattern] + except FileNotFoundError: + assert len(pattern_results[pattern]) == 0 + + +def test_DataFilesDict_from_patterns_in_dataset_repository_hashing(hub_dataset_repo_path): + patterns = {"train": ["**/train.txt"], "test": ["**/test.txt"]} + data_files1 = DataFilesDict.from_patterns(patterns, hub_dataset_repo_path) + data_files2 = DataFilesDict.from_patterns(patterns, hub_dataset_repo_path) + assert Hasher.hash(data_files1) == Hasher.hash(data_files2) + + data_files2 = DataFilesDict(sorted(data_files1.items(), reverse=True)) + assert Hasher.hash(data_files1) == Hasher.hash(data_files2) + + patterns2 = {"train": ["data/**train.txt"], "test": ["data/**test.txt"]} + data_files2 = DataFilesDict.from_patterns(patterns2, hub_dataset_repo_path) + assert Hasher.hash(data_files1) == Hasher.hash(data_files2) + + patterns2 = {"train": ["data/**train.txt"], "test": ["data/**train.txt"]} + data_files2 = DataFilesDict.from_patterns(patterns2, hub_dataset_repo_path) + assert Hasher.hash(data_files1) != Hasher.hash(data_files2) + + # the tmpfs used to mock the hub repo is based on a local directory + # therefore os.stat is used to get the mtime of the data files + with patch("os.stat", return_value=os.stat(__file__)): + data_files2 = DataFilesDict.from_patterns(patterns, hub_dataset_repo_path) + assert Hasher.hash(data_files1) != Hasher.hash(data_files2) + + +def test_DataFilesDict_from_patterns_locally_or_remote_hashing(text_file): + patterns = {"train": [_TEST_URL], "test": [str(text_file)]} + data_files1 = DataFilesDict.from_patterns(patterns) + data_files2 = DataFilesDict.from_patterns(patterns) + assert Hasher.hash(data_files1) == Hasher.hash(data_files2) + + data_files2 = DataFilesDict(sorted(data_files1.items(), reverse=True)) + assert Hasher.hash(data_files1) == Hasher.hash(data_files2) + + patterns2 = {"train": [_TEST_URL], "test": [_TEST_URL]} + data_files2 = DataFilesDict.from_patterns(patterns2) + assert Hasher.hash(data_files1) != Hasher.hash(data_files2) + + with patch("fsspec.implementations.http._file_info", return_value={}): + data_files2 = DataFilesDict.from_patterns(patterns) + assert Hasher.hash(data_files1) != Hasher.hash(data_files2) + + with patch("os.stat", return_value=os.stat(__file__)): + data_files2 = DataFilesDict.from_patterns(patterns) + assert Hasher.hash(data_files1) != Hasher.hash(data_files2) + + +def mock_fs(file_paths: List[str]): + """ + Set up a mock filesystem for fsspec containing the provided files + + Example: + + ```py + >>> DummyTestFS = mock_fs(["data/train.txt", "data.test.txt"]) + >>> fs = DummyTestFS() + >>> assert fsspec.get_filesystem_class("mock").__name__ == "DummyTestFS" + >>> assert type(fs).__name__ == "DummyTestFS" + >>> print(fs.glob("**")) + ["data", "data/train.txt", "data.test.txt"] + ``` + """ + file_paths = [file_path.split("://")[-1] for file_path in file_paths] + dir_paths = { + "/".join(file_path.split("/")[: i + 1]) for file_path in file_paths for i in range(file_path.count("/")) + } + fs_contents = [{"name": dir_path, "type": "directory"} for dir_path in dir_paths] + [ + {"name": file_path, "type": "file", "size": 10} for file_path in file_paths + ] + + class DummyTestFS(AbstractFileSystem): + protocol = ("mock", "dummy") + _fs_contents = fs_contents + + def ls(self, path, detail=True, refresh=True, **kwargs): + if kwargs.pop("strip_proto", True): + path = self._strip_protocol(path) + + files = not refresh and self._ls_from_cache(path) + if not files: + files = [file for file in self._fs_contents if path == self._parent(file["name"])] + files.sort(key=lambda file: file["name"]) + self.dircache[path.rstrip("/")] = files + + if detail: + return files + return [file["name"] for file in files] + + return DummyTestFS + + +@pytest.mark.parametrize("base_path", ["", "mock://", "my_dir"]) +@pytest.mark.parametrize( + "data_file_per_split", + [ + # === Main cases === + # file named after split at the root + {"train": "train.txt", "validation": "valid.txt", "test": "test.txt"}, + # file named after split in a directory + { + "train": "data/train.txt", + "validation": "data/valid.txt", + "test": "data/test.txt", + }, + # directory named after split + { + "train": "train/split.txt", + "validation": "valid/split.txt", + "test": "test/split.txt", + }, + # sharded splits + { + "train": [f"data/train_{i}.txt" for i in range(3)], + "validation": [f"data/validation_{i}.txt" for i in range(3)], + "test": [f"data/test_{i}.txt" for i in range(3)], + }, + # sharded splits with standard format (+ custom split name) + { + "train": [f"data/train-0000{i}-of-00003.txt" for i in range(3)], + "validation": [f"data/validation-0000{i}-of-00003.txt" for i in range(3)], + "test": [f"data/test-0000{i}-of-00003.txt" for i in range(3)], + "random": [f"data/random-0000{i}-of-00003.txt" for i in range(3)], + }, + # === Secondary cases === + # Default to train split + {"train": "dataset.txt"}, + {"train": "data/dataset.txt"}, + {"train": ["data/image.jpg", "metadata.jsonl"]}, + {"train": ["data/image.jpg", "metadata.csv"]}, + # With prefix or suffix in directory or file names + {"train": "my_train_dir/dataset.txt"}, + {"train": "data/my_train_file.txt"}, + {"test": "my_test_dir/dataset.txt"}, + {"test": "data/my_test_file.txt"}, + {"validation": "my_validation_dir/dataset.txt"}, + {"validation": "data/my_validation_file.txt"}, + # With test<>eval aliases + {"test": "eval.txt"}, + {"test": "data/eval.txt"}, + {"test": "eval/dataset.txt"}, + # With valid<>dev aliases + {"validation": "dev.txt"}, + {"validation": "data/dev.txt"}, + {"validation": "dev/dataset.txt"}, + # With valid<>val aliases + {"validation": "val.txt"}, + {"validation": "data/val.txt"}, + # With other extensions + {"train": "train.parquet", "validation": "valid.parquet", "test": "test.parquet"}, + # With "dev" or "eval" without separators + {"train": "developers_list.txt"}, + {"train": "data/seqeval_results.txt"}, + {"train": "contest.txt"}, + # With supported separators + {"test": "my.test.file.txt"}, + {"test": "my-test-file.txt"}, + {"test": "my_test_file.txt"}, + {"test": "my test file.txt"}, + {"test": "test00001.txt"}, + ], +) +def test_get_data_files_patterns(base_path, data_file_per_split): + data_file_per_split = {k: v if isinstance(v, list) else [v] for k, v in data_file_per_split.items()} + data_file_per_split = { + split: [ + base_path + ("/" if base_path and base_path[-1] != "/" else "") + file_path + for file_path in data_file_per_split[split] + ] + for split in data_file_per_split + } + file_paths = sum(data_file_per_split.values(), []) + DummyTestFS = mock_fs(file_paths) + fs = DummyTestFS() + + def resolver(pattern): + pattern = base_path + ("/" if base_path and base_path[-1] != "/" else "") + pattern + return [ + file_path[len(fs._strip_protocol(base_path)) :].lstrip("/") + for file_path in fs.glob(pattern) + if fs.isfile(file_path) + ] + + patterns_per_split = _get_data_files_patterns(resolver) + assert list(patterns_per_split.keys()) == list(data_file_per_split.keys()) # Test split order with list() + for split, patterns in patterns_per_split.items(): + matched = [file_path for pattern in patterns for file_path in resolver(pattern)] + expected = [ + fs._strip_protocol(file_path)[len(fs._strip_protocol(base_path)) :].lstrip("/") + for file_path in data_file_per_split[split] + ] + assert matched == expected + + +@pytest.mark.parametrize( + "metadata_files", + [ + # metadata files at the root + ["metadata.jsonl"], + ["metadata.csv"], + # nested metadata files + ["metadata.jsonl", "data/metadata.jsonl"], + ["metadata.csv", "data/metadata.csv"], + ], +) +def test_get_metadata_files_patterns(metadata_files): + DummyTestFS = mock_fs(metadata_files) + fs = DummyTestFS() + + def resolver(pattern): + return [file_path for file_path in fs.glob(pattern) if fs.isfile(file_path)] + + patterns = _get_metadata_files_patterns(resolver) + matched = [file_path for pattern in patterns for file_path in resolver(pattern)] + assert sorted(matched) == sorted(metadata_files) + + +def test_get_data_patterns_from_directory_with_the_word_data_twice(tmp_path): + repo_dir = tmp_path / "directory-name-ending-with-the-word-data" # parent directory contains the word "data/" + data_dir = repo_dir / "data" + data_dir.mkdir(parents=True) + data_file = data_dir / "train-00001-of-00009.parquet" + data_file.touch() + data_file_patterns = get_data_patterns(repo_dir.as_posix()) + assert data_file_patterns == {"train": ["data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*"]} diff --git a/testbed/huggingface__datasets/tests/test_dataset_dict.py b/testbed/huggingface__datasets/tests/test_dataset_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..71d2f06d6686916721e21cff6fceefba68c9412a --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_dataset_dict.py @@ -0,0 +1,797 @@ +import os +import tempfile +from unittest import TestCase + +import numpy as np +import pandas as pd +import pytest + +from datasets import load_from_disk +from datasets.arrow_dataset import Dataset +from datasets.dataset_dict import DatasetDict, IterableDatasetDict +from datasets.features import ClassLabel, Features, Sequence, Value +from datasets.iterable_dataset import IterableDataset +from datasets.splits import NamedSplit + +from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_tf, require_torch + + +class DatasetDictTest(TestCase): + def _create_dummy_dataset(self, multiple_columns=False): + if multiple_columns: + data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} + dset = Dataset.from_dict(data) + else: + dset = Dataset.from_dict( + {"filename": ["my_name-train" + "_" + f"{x:03d}" for x in np.arange(30).tolist()]} + ) + return dset + + def _create_dummy_dataset_dict(self, multiple_columns=False) -> DatasetDict: + return DatasetDict( + { + "train": self._create_dummy_dataset(multiple_columns=multiple_columns), + "test": self._create_dummy_dataset(multiple_columns=multiple_columns), + } + ) + + def _create_dummy_iterable_dataset(self, multiple_columns=False) -> IterableDataset: + def gen(): + if multiple_columns: + data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} + for v1, v2 in zip(data["col_1"], data["col_2"]): + yield {"col_1": v1, "col_2": v2} + else: + for x in range(30): + yield {"filename": "my_name-train" + "_" + f"{x:03d}"} + + return IterableDataset.from_generator(gen) + + def _create_dummy_iterable_dataset_dict(self, multiple_columns=False) -> IterableDatasetDict: + return IterableDatasetDict( + { + "train": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns), + "test": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns), + } + ) + + def test_flatten(self): + dset_split = Dataset.from_dict( + {"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10}, + features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}), + ) + dset = DatasetDict({"train": dset_split, "test": dset_split}) + dset = dset.flatten() + self.assertDictEqual(dset.column_names, {"train": ["a.b.c", "foo"], "test": ["a.b.c", "foo"]}) + self.assertListEqual(sorted(dset["train"].features.keys()), ["a.b.c", "foo"]) + self.assertDictEqual( + dset["train"].features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")}) + ) + del dset + + def test_set_format_numpy(self): + dset = self._create_dummy_dataset_dict(multiple_columns=True) + dset.set_format(type="numpy", columns=["col_1"]) + for dset_split in dset.values(): + self.assertEqual(len(dset_split[0]), 1) + self.assertIsInstance(dset_split[0]["col_1"], np.int64) + self.assertEqual(dset_split[0]["col_1"].item(), 3) + + dset.reset_format() + with dset.formatted_as(type="numpy", columns=["col_1"]): + for dset_split in dset.values(): + self.assertEqual(len(dset_split[0]), 1) + self.assertIsInstance(dset_split[0]["col_1"], np.int64) + self.assertEqual(dset_split[0]["col_1"].item(), 3) + + for dset_split in dset.values(): + self.assertEqual(dset_split.format["type"], None) + self.assertEqual(dset_split.format["format_kwargs"], {}) + self.assertEqual(dset_split.format["columns"], dset_split.column_names) + self.assertEqual(dset_split.format["output_all_columns"], False) + + dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True) + for dset_split in dset.values(): + self.assertEqual(len(dset_split[0]), 2) + self.assertIsInstance(dset_split[0]["col_2"], str) + self.assertEqual(dset_split[0]["col_2"], "a") + + dset.set_format(type="numpy", columns=["col_1", "col_2"]) + for dset_split in dset.values(): + self.assertEqual(len(dset_split[0]), 2) + self.assertIsInstance(dset_split[0]["col_2"], np.str_) + self.assertEqual(dset_split[0]["col_2"].item(), "a") + del dset + + @require_torch + def test_set_format_torch(self): + import torch + + dset = self._create_dummy_dataset_dict(multiple_columns=True) + dset.set_format(type="torch", columns=["col_1"]) + for dset_split in dset.values(): + self.assertEqual(len(dset_split[0]), 1) + self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor) + self.assertListEqual(list(dset_split[0]["col_1"].shape), []) + self.assertEqual(dset_split[0]["col_1"].item(), 3) + + dset.set_format(type="torch", columns=["col_1"], output_all_columns=True) + for dset_split in dset.values(): + self.assertEqual(len(dset_split[0]), 2) + self.assertIsInstance(dset_split[0]["col_2"], str) + self.assertEqual(dset_split[0]["col_2"], "a") + + dset.set_format(type="torch") + for dset_split in dset.values(): + self.assertEqual(len(dset_split[0]), 2) + self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor) + self.assertListEqual(list(dset_split[0]["col_1"].shape), []) + self.assertEqual(dset_split[0]["col_1"].item(), 3) + self.assertIsInstance(dset_split[0]["col_2"], str) + self.assertEqual(dset_split[0]["col_2"], "a") + del dset + + @require_tf + def test_set_format_tf(self): + import tensorflow as tf + + dset = self._create_dummy_dataset_dict(multiple_columns=True) + dset.set_format(type="tensorflow", columns=["col_1"]) + for dset_split in dset.values(): + self.assertEqual(len(dset_split[0]), 1) + self.assertIsInstance(dset_split[0]["col_1"], tf.Tensor) + self.assertListEqual(list(dset_split[0]["col_1"].shape), []) + self.assertEqual(dset_split[0]["col_1"].numpy().item(), 3) + + dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True) + for dset_split in dset.values(): + self.assertEqual(len(dset_split[0]), 2) + self.assertIsInstance(dset_split[0]["col_2"], str) + self.assertEqual(dset_split[0]["col_2"], "a") + + dset.set_format(type="tensorflow", columns=["col_1", "col_2"]) + for dset_split in dset.values(): + self.assertEqual(len(dset_split[0]), 2) + self.assertEqual(dset_split[0]["col_2"].numpy().decode("utf-8"), "a") + del dset + + def test_set_format_pandas(self): + dset = self._create_dummy_dataset_dict(multiple_columns=True) + dset.set_format(type="pandas", columns=["col_1"]) + for dset_split in dset.values(): + self.assertEqual(len(dset_split[0].columns), 1) + self.assertIsInstance(dset_split[0], pd.DataFrame) + self.assertListEqual(list(dset_split[0].shape), [1, 1]) + self.assertEqual(dset_split[0]["col_1"].item(), 3) + + dset.set_format(type="pandas", columns=["col_1", "col_2"]) + for dset_split in dset.values(): + self.assertEqual(len(dset_split[0].columns), 2) + self.assertEqual(dset_split[0]["col_2"].item(), "a") + del dset + + def test_set_transform(self): + def transform(batch): + return {k: [str(i).upper() for i in v] for k, v in batch.items()} + + dset = self._create_dummy_dataset_dict(multiple_columns=True) + dset.set_transform(transform=transform, columns=["col_1"]) + for dset_split in dset.values(): + self.assertEqual(dset_split.format["type"], "custom") + self.assertEqual(len(dset_split[0].keys()), 1) + self.assertEqual(dset_split[0]["col_1"], "3") + self.assertEqual(dset_split[:2]["col_1"], ["3", "2"]) + self.assertEqual(dset_split["col_1"][:2], ["3", "2"]) + + prev_format = dset[list(dset.keys())[0]].format + for dset_split in dset.values(): + dset_split.set_format(**dset_split.format) + self.assertEqual(prev_format, dset_split.format) + + dset.set_transform(transform=transform, columns=["col_1", "col_2"]) + for dset_split in dset.values(): + self.assertEqual(len(dset_split[0].keys()), 2) + self.assertEqual(dset_split[0]["col_2"], "A") + del dset + + def test_with_format(self): + dset = self._create_dummy_dataset_dict(multiple_columns=True) + dset2 = dset.with_format("numpy", columns=["col_1"]) + dset.set_format("numpy", columns=["col_1"]) + for dset_split, dset_split2 in zip(dset.values(), dset2.values()): + self.assertDictEqual(dset_split.format, dset_split2.format) + del dset, dset2 + + def test_with_transform(self): + def transform(batch): + return {k: [str(i).upper() for i in v] for k, v in batch.items()} + + dset = self._create_dummy_dataset_dict(multiple_columns=True) + dset2 = dset.with_transform(transform, columns=["col_1"]) + dset.set_transform(transform, columns=["col_1"]) + for dset_split, dset_split2 in zip(dset.values(), dset2.values()): + self.assertDictEqual(dset_split.format, dset_split2.format) + del dset, dset2 + + def test_cast(self): + dset = self._create_dummy_dataset_dict(multiple_columns=True) + features = dset["train"].features + features["col_1"] = Value("float64") + dset = dset.cast(features) + for dset_split in dset.values(): + self.assertEqual(dset_split.num_columns, 2) + self.assertEqual(dset_split.features["col_1"], Value("float64")) + self.assertIsInstance(dset_split[0]["col_1"], float) + del dset + + def test_remove_columns(self): + dset = self._create_dummy_dataset_dict(multiple_columns=True) + dset = dset.remove_columns(column_names="col_1") + for dset_split in dset.values(): + self.assertEqual(dset_split.num_columns, 1) + self.assertListEqual(list(dset_split.column_names), ["col_2"]) + + dset = self._create_dummy_dataset_dict(multiple_columns=True) + dset = dset.remove_columns(column_names=["col_1", "col_2"]) + for dset_split in dset.values(): + self.assertEqual(dset_split.num_columns, 0) + + dset = self._create_dummy_dataset_dict(multiple_columns=True) + for dset_split in dset.values(): + dset_split._format_columns = ["col_1", "col_2"] + dset = dset.remove_columns(column_names=["col_1"]) + for dset_split in dset.values(): + self.assertListEqual(dset_split._format_columns, ["col_2"]) + self.assertEqual(dset_split.num_columns, 1) + self.assertListEqual(list(dset_split.column_names), ["col_2"]) + del dset + + def test_rename_column(self): + dset = self._create_dummy_dataset_dict(multiple_columns=True) + dset = dset.rename_column(original_column_name="col_1", new_column_name="new_name") + for dset_split in dset.values(): + self.assertEqual(dset_split.num_columns, 2) + self.assertListEqual(list(dset_split.column_names), ["new_name", "col_2"]) + del dset + + def test_select_columns(self): + dset = self._create_dummy_dataset_dict(multiple_columns=True) + dset = dset.select_columns(column_names=[]) + for dset_split in dset.values(): + self.assertEqual(dset_split.num_columns, 0) + + dset = self._create_dummy_dataset_dict(multiple_columns=True) + dset = dset.select_columns(column_names="col_1") + for dset_split in dset.values(): + self.assertEqual(dset_split.num_columns, 1) + self.assertListEqual(list(dset_split.column_names), ["col_1"]) + + dset = self._create_dummy_dataset_dict(multiple_columns=True) + dset = dset.select_columns(column_names=["col_1", "col_2"]) + for dset_split in dset.values(): + self.assertEqual(dset_split.num_columns, 2) + + dset = self._create_dummy_dataset_dict(multiple_columns=True) + for dset_split in dset.values(): + dset_split._format_columns = ["col_1", "col_2"] + dset = dset.select_columns(column_names=["col_1"]) + for dset_split in dset.values(): + self.assertEqual(dset_split.num_columns, 1) + self.assertListEqual(list(dset_split.column_names), ["col_1"]) + self.assertListEqual(dset_split._format_columns, ["col_1"]) + + def test_map(self): + with tempfile.TemporaryDirectory() as tmp_dir: + dsets = self._create_dummy_dataset_dict() + + mapped_dsets_1: DatasetDict = dsets.map(lambda ex: {"foo": ["bar"] * len(ex["filename"])}, batched=True) + self.assertListEqual(list(dsets.keys()), list(mapped_dsets_1.keys())) + self.assertListEqual(mapped_dsets_1["train"].column_names, ["filename", "foo"]) + + cache_file_names = { + "train": os.path.join(tmp_dir, "train.arrow"), + "test": os.path.join(tmp_dir, "test.arrow"), + } + mapped_dsets_2: DatasetDict = mapped_dsets_1.map( + lambda ex: {"bar": ["foo"] * len(ex["filename"])}, batched=True, cache_file_names=cache_file_names + ) + self.assertListEqual(list(dsets.keys()), list(mapped_dsets_2.keys())) + self.assertListEqual(sorted(mapped_dsets_2["train"].column_names), sorted(["filename", "foo", "bar"])) + del dsets, mapped_dsets_1, mapped_dsets_2 + + def test_iterable_map(self): + dsets = self._create_dummy_iterable_dataset_dict() + fn_kwargs = {"n": 3} + mapped_dsets: IterableDatasetDict = dsets.map( + lambda x, n: {"foo": [n] * len(x["filename"])}, + batched=True, + fn_kwargs=fn_kwargs, + ) + mapped_example = next(iter(mapped_dsets["train"])) + self.assertListEqual(sorted(mapped_example.keys()), sorted(["filename", "foo"])) + self.assertLessEqual(mapped_example["foo"], 3) + del dsets, mapped_dsets + + def test_filter(self): + with tempfile.TemporaryDirectory() as tmp_dir: + dsets = self._create_dummy_dataset_dict() + + filtered_dsets_1: DatasetDict = dsets.filter(lambda ex: int(ex["filename"].split("_")[-1]) < 10) + self.assertListEqual(list(dsets.keys()), list(filtered_dsets_1.keys())) + self.assertEqual(len(filtered_dsets_1["train"]), 10) + + cache_file_names = { + "train": os.path.join(tmp_dir, "train.arrow"), + "test": os.path.join(tmp_dir, "test.arrow"), + } + filtered_dsets_2: DatasetDict = filtered_dsets_1.filter( + lambda ex: int(ex["filename"].split("_")[-1]) < 5, cache_file_names=cache_file_names + ) + self.assertListEqual(list(dsets.keys()), list(filtered_dsets_2.keys())) + self.assertEqual(len(filtered_dsets_2["train"]), 5) + + filtered_dsets_3: DatasetDict = dsets.filter( + lambda examples: [int(ex.split("_")[-1]) < 10 for ex in examples["filename"]], batched=True + ) + self.assertListEqual(list(dsets.keys()), list(filtered_dsets_3.keys())) + self.assertEqual(len(filtered_dsets_3["train"]), 10) + del dsets, filtered_dsets_1, filtered_dsets_2, filtered_dsets_3 + + def test_iterable_filter(self): + dsets = self._create_dummy_iterable_dataset_dict() + example = next(iter(dsets["train"])) + fn_kwargs = {"n": 3} + filtered_dsets: IterableDatasetDict = dsets.filter( + lambda ex, n: n < int(ex["filename"].split("_")[-1]), fn_kwargs=fn_kwargs + ) + filtered_example = next(iter(filtered_dsets["train"])) + self.assertListEqual(list(example.keys()), list(filtered_example.keys())) + self.assertEqual(int(filtered_example["filename"].split("_")[-1]), 4) # id starts from 3 + del dsets, filtered_dsets + + def test_sort(self): + with tempfile.TemporaryDirectory() as tmp_dir: + dsets = self._create_dummy_dataset_dict() + + sorted_dsets_1: DatasetDict = dsets.sort("filename") + self.assertListEqual(list(dsets.keys()), list(sorted_dsets_1.keys())) + self.assertListEqual( + [f.split("_")[-1] for f in sorted_dsets_1["train"]["filename"]], + sorted(f"{x:03d}" for x in range(30)), + ) + + indices_cache_file_names = { + "train": os.path.join(tmp_dir, "train.arrow"), + "test": os.path.join(tmp_dir, "test.arrow"), + } + sorted_dsets_2: DatasetDict = sorted_dsets_1.sort( + "filename", indices_cache_file_names=indices_cache_file_names, reverse=True + ) + self.assertListEqual(list(dsets.keys()), list(sorted_dsets_2.keys())) + self.assertListEqual( + [f.split("_")[-1] for f in sorted_dsets_2["train"]["filename"]], + sorted((f"{x:03d}" for x in range(30)), reverse=True), + ) + del dsets, sorted_dsets_1, sorted_dsets_2 + + def test_shuffle(self): + with tempfile.TemporaryDirectory() as tmp_dir: + dsets = self._create_dummy_dataset_dict() + + indices_cache_file_names = { + "train": os.path.join(tmp_dir, "train.arrow"), + "test": os.path.join(tmp_dir, "test.arrow"), + } + seeds = { + "train": 1234, + "test": 1234, + } + dsets_shuffled = dsets.shuffle( + seeds=seeds, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False + ) + self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled["test"]["filename"]) + + self.assertEqual(len(dsets_shuffled["train"]), 30) + self.assertEqual(dsets_shuffled["train"][0]["filename"], "my_name-train_028") + self.assertEqual(dsets_shuffled["train"][2]["filename"], "my_name-train_010") + self.assertDictEqual(dsets["train"].features, Features({"filename": Value("string")})) + self.assertDictEqual(dsets_shuffled["train"].features, Features({"filename": Value("string")})) + + # Reproducibility + indices_cache_file_names_2 = { + "train": os.path.join(tmp_dir, "train_2.arrow"), + "test": os.path.join(tmp_dir, "test_2.arrow"), + } + dsets_shuffled_2 = dsets.shuffle( + seeds=seeds, indices_cache_file_names=indices_cache_file_names_2, load_from_cache_file=False + ) + self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled_2["train"]["filename"]) + + seeds = { + "train": 1234, + "test": 1, + } + indices_cache_file_names_3 = { + "train": os.path.join(tmp_dir, "train_3.arrow"), + "test": os.path.join(tmp_dir, "test_3.arrow"), + } + dsets_shuffled_3 = dsets.shuffle( + seeds=seeds, indices_cache_file_names=indices_cache_file_names_3, load_from_cache_file=False + ) + self.assertNotEqual(dsets_shuffled_3["train"]["filename"], dsets_shuffled_3["test"]["filename"]) + + # other input types + dsets_shuffled_int = dsets.shuffle(42) + dsets_shuffled_alias = dsets.shuffle(seed=42) + dsets_shuffled_none = dsets.shuffle() + self.assertEqual(len(dsets_shuffled_int["train"]), 30) + self.assertEqual(len(dsets_shuffled_alias["train"]), 30) + self.assertEqual(len(dsets_shuffled_none["train"]), 30) + + del dsets, dsets_shuffled, dsets_shuffled_2, dsets_shuffled_3 + del dsets_shuffled_int, dsets_shuffled_alias, dsets_shuffled_none + + def test_flatten_indices(self): + with tempfile.TemporaryDirectory() as tmp_dir: + dsets = self._create_dummy_dataset_dict() + + indices_cache_file_names = { + "train": os.path.join(tmp_dir, "train.arrow"), + "test": os.path.join(tmp_dir, "test.arrow"), + } + dsets_shuffled = dsets.shuffle( + seed=42, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False + ) + + self.assertIsNotNone(dsets_shuffled["train"]._indices) + self.assertIsNotNone(dsets_shuffled["test"]._indices) + + dsets_flat = dsets_shuffled.flatten_indices() + + self.assertIsNone(dsets_flat["train"]._indices) + self.assertIsNone(dsets_flat["test"]._indices) + + del dsets, dsets_shuffled, dsets_flat + + def test_check_values_type(self): + dsets = self._create_dummy_dataset_dict() + dsets["bad_split"] = None + self.assertRaises(TypeError, dsets.map, lambda x: x) + self.assertRaises(TypeError, dsets.filter, lambda x: True) + self.assertRaises(TypeError, dsets.shuffle) + self.assertRaises(TypeError, dsets.sort, "filename") + del dsets + + def test_serialization(self): + with tempfile.TemporaryDirectory() as tmp_dir: + dsets = self._create_dummy_dataset_dict() + dsets.save_to_disk(tmp_dir) + reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) + self.assertListEqual(sorted(reloaded_dsets), ["test", "train"]) + self.assertEqual(len(reloaded_dsets["train"]), 30) + self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) + self.assertEqual(len(reloaded_dsets["test"]), 30) + self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"]) + del reloaded_dsets + + del dsets["test"] + dsets.save_to_disk(tmp_dir) + reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) + self.assertListEqual(sorted(reloaded_dsets), ["train"]) + self.assertEqual(len(reloaded_dsets["train"]), 30) + self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) + del dsets, reloaded_dsets + + dsets = self._create_dummy_dataset_dict() + dsets.save_to_disk(tmp_dir, num_shards={"train": 3, "test": 2}) + reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) + self.assertListEqual(sorted(reloaded_dsets), ["test", "train"]) + self.assertEqual(len(reloaded_dsets["train"]), 30) + self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) + self.assertEqual(len(reloaded_dsets["train"].cache_files), 3) + self.assertEqual(len(reloaded_dsets["test"]), 30) + self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"]) + self.assertEqual(len(reloaded_dsets["test"].cache_files), 2) + del reloaded_dsets + + dsets = self._create_dummy_dataset_dict() + dsets.save_to_disk(tmp_dir, num_proc=2) + reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) + self.assertListEqual(sorted(reloaded_dsets), ["test", "train"]) + self.assertEqual(len(reloaded_dsets["train"]), 30) + self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) + self.assertEqual(len(reloaded_dsets["train"].cache_files), 2) + self.assertEqual(len(reloaded_dsets["test"]), 30) + self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"]) + self.assertEqual(len(reloaded_dsets["test"].cache_files), 2) + del reloaded_dsets + + def test_load_from_disk(self): + with tempfile.TemporaryDirectory() as tmp_dir: + dsets = self._create_dummy_dataset_dict() + dsets.save_to_disk(tmp_dir) + del dsets + dsets = load_from_disk(tmp_dir) + self.assertListEqual(sorted(dsets), ["test", "train"]) + self.assertEqual(len(dsets["train"]), 30) + self.assertListEqual(dsets["train"].column_names, ["filename"]) + self.assertEqual(len(dsets["test"]), 30) + self.assertListEqual(dsets["test"].column_names, ["filename"]) + del dsets + + def test_align_labels_with_mapping(self): + train_features = Features( + { + "input_text": Value("string"), + "input_labels": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]), + } + ) + test_features = Features( + { + "input_text": Value("string"), + "input_labels": ClassLabel(num_classes=3, names=["entailment", "contradiction", "neutral"]), + } + ) + train_data = {"input_text": ["a", "a", "b", "b", "c", "c"], "input_labels": [0, 0, 1, 1, 2, 2]} + test_data = {"input_text": ["a", "a", "c", "c", "b", "b"], "input_labels": [0, 0, 1, 1, 2, 2]} + label2id = {"CONTRADICTION": 0, "ENTAILMENT": 2, "NEUTRAL": 1} + id2label = {v: k for k, v in label2id.items()} + train_expected_labels = [2, 2, 1, 1, 0, 0] + test_expected_labels = [2, 2, 0, 0, 1, 1] + train_expected_label_names = [id2label[idx] for idx in train_expected_labels] + test_expected_label_names = [id2label[idx] for idx in test_expected_labels] + dsets = DatasetDict( + { + "train": Dataset.from_dict(train_data, features=train_features), + "test": Dataset.from_dict(test_data, features=test_features), + } + ) + dsets = dsets.align_labels_with_mapping(label2id, "input_labels") + self.assertListEqual(train_expected_labels, dsets["train"]["input_labels"]) + self.assertListEqual(test_expected_labels, dsets["test"]["input_labels"]) + train_aligned_label_names = [ + dsets["train"].features["input_labels"].int2str(idx) for idx in dsets["train"]["input_labels"] + ] + test_aligned_label_names = [ + dsets["test"].features["input_labels"].int2str(idx) for idx in dsets["test"]["input_labels"] + ] + self.assertListEqual(train_expected_label_names, train_aligned_label_names) + self.assertListEqual(test_expected_label_names, test_aligned_label_names) + + +def test_dummy_datasetdict_serialize_fs(mockfs): + dataset_dict = DatasetDict( + { + "train": Dataset.from_dict({"a": range(30)}), + "test": Dataset.from_dict({"a": range(10)}), + } + ) + dataset_path = "mock://my_dataset" + dataset_dict.save_to_disk(dataset_path, storage_options=mockfs.storage_options) + assert mockfs.isdir(dataset_path) + assert mockfs.glob(dataset_path + "/*") + reloaded = dataset_dict.load_from_disk(dataset_path, storage_options=mockfs.storage_options) + assert list(reloaded) == list(dataset_dict) + for k in dataset_dict: + assert reloaded[k].features == dataset_dict[k].features + assert reloaded[k].to_dict() == dataset_dict[k].to_dict() + + +def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)): + assert isinstance(dataset_dict, DatasetDict) + for split in splits: + dataset = dataset_dict[split] + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_datasetdict_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = DatasetDict.from_csv({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) + _check_csv_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_datasetdict_from_csv_features(features, csv_path, tmp_path): + cache_dir = tmp_path / "cache" + # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" + default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = DatasetDict.from_csv({"train": csv_path}, features=features, cache_dir=cache_dir) + _check_csv_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_datasetdict_from_csv_split(split, csv_path, tmp_path): + if split: + path = {split: csv_path} + else: + split = "train" + path = {"train": csv_path, "test": csv_path} + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} + dataset = DatasetDict.from_csv(path, cache_dir=cache_dir) + _check_csv_datasetdict(dataset, expected_features, splits=list(path.keys())) + assert all(dataset[split].split == split for split in path.keys()) + + +def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)): + assert isinstance(dataset_dict, DatasetDict) + for split in splits: + dataset = dataset_dict[split] + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = DatasetDict.from_json({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) + _check_json_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_datasetdict_from_json_features(features, jsonl_path, tmp_path): + cache_dir = tmp_path / "cache" + default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = DatasetDict.from_json({"train": jsonl_path}, features=features, cache_dir=cache_dir) + _check_json_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path): + if split: + path = {split: jsonl_path} + else: + split = "train" + path = {"train": jsonl_path, "test": jsonl_path} + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + dataset = DatasetDict.from_json(path, cache_dir=cache_dir) + _check_json_datasetdict(dataset, expected_features, splits=list(path.keys())) + assert all(dataset[split].split == split for split in path.keys()) + + +def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)): + assert isinstance(dataset_dict, DatasetDict) + for split in splits: + dataset = dataset_dict[split] + assert dataset.num_rows == 4 + assert dataset.num_columns == 3 + assert dataset.column_names == ["col_1", "col_2", "col_3"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_datasetdict_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = DatasetDict.from_parquet({"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) + _check_parquet_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"col_1": "string", "col_2": "int64", "col_3": "float64"}, + {"col_1": "string", "col_2": "string", "col_3": "string"}, + {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, + {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, + ], +) +def test_datasetdict_from_parquet_features(features, parquet_path, tmp_path): + cache_dir = tmp_path / "cache" + default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = DatasetDict.from_parquet({"train": parquet_path}, features=features, cache_dir=cache_dir) + _check_parquet_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_datasetdict_from_parquet_split(split, parquet_path, tmp_path): + if split: + path = {split: parquet_path} + else: + split = "train" + path = {"train": parquet_path, "test": parquet_path} + cache_dir = tmp_path / "cache" + expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} + dataset = DatasetDict.from_parquet(path, cache_dir=cache_dir) + _check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys())) + assert all(dataset[split].split == split for split in path.keys()) + + +def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)): + assert isinstance(dataset_dict, DatasetDict) + for split in splits: + dataset = dataset_dict[split] + assert dataset.num_rows == 4 + assert dataset.num_columns == 1 + assert dataset.column_names == ["text"] + for feature, expected_dtype in expected_features.items(): + assert dataset.features[feature].dtype == expected_dtype + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path): + cache_dir = tmp_path / "cache" + expected_features = {"text": "string"} + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = DatasetDict.from_text({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) + _check_text_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize( + "features", + [ + None, + {"text": "string"}, + {"text": "int32"}, + {"text": "float32"}, + ], +) +def test_datasetdict_from_text_features(features, text_path, tmp_path): + cache_dir = tmp_path / "cache" + default_expected_features = {"text": "string"} + expected_features = features.copy() if features else default_expected_features + features = ( + Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None + ) + dataset = DatasetDict.from_text({"train": text_path}, features=features, cache_dir=cache_dir) + _check_text_datasetdict(dataset, expected_features) + + +@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) +def test_datasetdict_from_text_split(split, text_path, tmp_path): + if split: + path = {split: text_path} + else: + split = "train" + path = {"train": text_path, "test": text_path} + cache_dir = tmp_path / "cache" + expected_features = {"text": "string"} + dataset = DatasetDict.from_text(path, cache_dir=cache_dir) + _check_text_datasetdict(dataset, expected_features, splits=list(path.keys())) + assert all(dataset[split].split == split for split in path.keys()) diff --git a/testbed/huggingface__datasets/tests/test_download_manager.py b/testbed/huggingface__datasets/tests/test_download_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..3673d43ce9876b01c15d5438e0f66b00253f2088 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_download_manager.py @@ -0,0 +1,149 @@ +import json +import os +from pathlib import Path + +import pytest + +from datasets.download.download_config import DownloadConfig +from datasets.download.download_manager import DownloadManager +from datasets.utils.file_utils import hash_url_to_filename + + +URL = "http://www.mocksite.com/file1.txt" +CONTENT = '"text": ["foo", "foo"]' +HASH = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8" + + +class MockResponse: + status_code = 200 + headers = {"Content-Length": "100"} + cookies = {} + + def iter_content(self, **kwargs): + return [bytes(CONTENT, "utf-8")] + + +def mock_request(*args, **kwargs): + return MockResponse() + + +@pytest.mark.parametrize("urls_type", [str, list, dict]) +def test_download_manager_download(urls_type, tmp_path, monkeypatch): + import requests + + monkeypatch.setattr(requests, "request", mock_request) + + url = URL + if issubclass(urls_type, str): + urls = url + elif issubclass(urls_type, list): + urls = [url] + elif issubclass(urls_type, dict): + urls = {"train": url} + dataset_name = "dummy" + cache_subdir = "downloads" + cache_dir_root = tmp_path + download_config = DownloadConfig( + cache_dir=os.path.join(cache_dir_root, cache_subdir), + use_etag=False, + ) + dl_manager = DownloadManager(dataset_name=dataset_name, download_config=download_config) + downloaded_paths = dl_manager.download(urls) + input_urls = urls + for downloaded_paths in [downloaded_paths]: + if isinstance(urls, str): + downloaded_paths = [downloaded_paths] + input_urls = [urls] + elif isinstance(urls, dict): + assert "train" in downloaded_paths.keys() + downloaded_paths = downloaded_paths.values() + input_urls = urls.values() + assert downloaded_paths + for downloaded_path, input_url in zip(downloaded_paths, input_urls): + assert downloaded_path == dl_manager.downloaded_paths[input_url] + downloaded_path = Path(downloaded_path) + parts = downloaded_path.parts + assert parts[-1] == HASH + assert parts[-2] == cache_subdir + assert downloaded_path.exists() + content = downloaded_path.read_text() + assert content == CONTENT + metadata_downloaded_path = downloaded_path.with_suffix(".json") + assert metadata_downloaded_path.exists() + metadata_content = json.loads(metadata_downloaded_path.read_text()) + assert metadata_content == {"url": URL, "etag": None} + + +@pytest.mark.parametrize("paths_type", [str, list, dict]) +def test_download_manager_extract(paths_type, xz_file, text_file): + filename = str(xz_file) + if issubclass(paths_type, str): + paths = filename + elif issubclass(paths_type, list): + paths = [filename] + elif issubclass(paths_type, dict): + paths = {"train": filename} + dataset_name = "dummy" + cache_dir = xz_file.parent + extracted_subdir = "extracted" + download_config = DownloadConfig( + cache_dir=cache_dir, + use_etag=False, + ) + dl_manager = DownloadManager(dataset_name=dataset_name, download_config=download_config) + extracted_paths = dl_manager.extract(paths) + input_paths = paths + for extracted_paths in [extracted_paths]: + if isinstance(paths, str): + extracted_paths = [extracted_paths] + input_paths = [paths] + elif isinstance(paths, dict): + assert "train" in extracted_paths.keys() + extracted_paths = extracted_paths.values() + input_paths = paths.values() + assert extracted_paths + for extracted_path, input_path in zip(extracted_paths, input_paths): + assert extracted_path == dl_manager.extracted_paths[input_path] + extracted_path = Path(extracted_path) + parts = extracted_path.parts + assert parts[-1] == hash_url_to_filename(input_path, etag=None) + assert parts[-2] == extracted_subdir + assert extracted_path.exists() + extracted_file_content = extracted_path.read_text() + expected_file_content = text_file.read_text() + assert extracted_file_content == expected_file_content + + +def _test_jsonl(path, file): + assert path.endswith(".jsonl") + for num_items, line in enumerate(file, start=1): + item = json.loads(line.decode("utf-8")) + assert item.keys() == {"col_1", "col_2", "col_3"} + assert num_items == 4 + + +@pytest.mark.parametrize("archive_jsonl", ["tar_jsonl_path", "zip_jsonl_path"]) +def test_iter_archive_path(archive_jsonl, request): + archive_jsonl_path = request.getfixturevalue(archive_jsonl) + dl_manager = DownloadManager() + for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(archive_jsonl_path), start=1): + _test_jsonl(path, file) + assert num_jsonl == 2 + + +@pytest.mark.parametrize("archive_nested_jsonl", ["tar_nested_jsonl_path", "zip_nested_jsonl_path"]) +def test_iter_archive_file(archive_nested_jsonl, request): + archive_nested_jsonl_path = request.getfixturevalue(archive_nested_jsonl) + dl_manager = DownloadManager() + for num_tar, (path, file) in enumerate(dl_manager.iter_archive(archive_nested_jsonl_path), start=1): + for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(file), start=1): + _test_jsonl(subpath, subfile) + assert num_tar == 1 + assert num_jsonl == 2 + + +def test_iter_files(data_dir_with_hidden_files): + dl_manager = DownloadManager() + for num_file, file in enumerate(dl_manager.iter_files(data_dir_with_hidden_files), start=1): + assert os.path.basename(file) == ("test.txt" if num_file == 1 else "train.txt") + assert num_file == 2 diff --git a/testbed/huggingface__datasets/tests/test_experimental.py b/testbed/huggingface__datasets/tests/test_experimental.py new file mode 100644 index 0000000000000000000000000000000000000000..7b86ec5d9fd7c6502bde12170a23446e3089c2af --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_experimental.py @@ -0,0 +1,17 @@ +import unittest +import warnings + +from datasets.utils import experimental + + +@experimental +def dummy_function(): + return "success" + + +class TestExperimentalFlag(unittest.TestCase): + def test_experimental_warning(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + self.assertEqual(dummy_function(), "success") + self.assertEqual(len(w), 1) diff --git a/testbed/huggingface__datasets/tests/test_filelock.py b/testbed/huggingface__datasets/tests/test_filelock.py new file mode 100644 index 0000000000000000000000000000000000000000..6efa6580c6553c6276fd505b3d8f1a5ea1ac50e1 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_filelock.py @@ -0,0 +1,11 @@ +import os + +from datasets.utils._filelock import FileLock + + +def test_long_path(tmpdir): + filename = "a" * 1000 + ".lock" + lock1 = FileLock(str(tmpdir / filename)) + assert lock1.lock_file.endswith(".lock") + assert not lock1.lock_file.endswith(filename) + assert len(os.path.basename(lock1.lock_file)) <= 255 diff --git a/testbed/huggingface__datasets/tests/test_filesystem.py b/testbed/huggingface__datasets/tests/test_filesystem.py new file mode 100644 index 0000000000000000000000000000000000000000..586312973ae1e118981e5db9b1ae61fb6da11aa0 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_filesystem.py @@ -0,0 +1,91 @@ +import importlib +import os + +import fsspec +import pytest +from fsspec import register_implementation +from fsspec.registry import _registry as _fsspec_registry + +from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem + +from .utils import require_lz4, require_zstandard + + +def test_mockfs(mockfs): + assert "mock" in _fsspec_registry + assert "bz2" in _fsspec_registry + + +def test_non_mockfs(): + assert "mock" not in _fsspec_registry + assert "bz2" in _fsspec_registry + + +def test_extract_path_from_uri(): + mock_bucket = "mock-s3-bucket" + dataset_path = f"s3://{mock_bucket}" + dataset_path = extract_path_from_uri(dataset_path) + assert dataset_path.startswith("s3://") is False + + dataset_path = "./local/path" + new_dataset_path = extract_path_from_uri(dataset_path) + assert dataset_path == new_dataset_path + + +def test_is_remote_filesystem(mockfs): + is_remote = is_remote_filesystem(mockfs) + assert is_remote is True + + fs = fsspec.filesystem("file") + + is_remote = is_remote_filesystem(fs) + assert is_remote is False + + +@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS) +def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file): + input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file} + input_path = input_paths[compression_fs_class.protocol] + if input_path is None: + reason = f"for '{compression_fs_class.protocol}' compression protocol, " + if compression_fs_class.protocol == "lz4": + reason += require_lz4.kwargs["reason"] + elif compression_fs_class.protocol == "zstd": + reason += require_zstandard.kwargs["reason"] + pytest.skip(reason) + fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path) + assert isinstance(fs, compression_fs_class) + expected_filename = os.path.basename(input_path) + expected_filename = expected_filename[: expected_filename.rindex(".")] + assert fs.glob("*") == [expected_filename] + with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file: + assert f.read() == expected_file.read() + + +@pytest.mark.parametrize("protocol", ["zip", "gzip"]) +def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path): + compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} + compressed_file_path = compressed_file_paths[protocol] + member_file_path = "dataset.jsonl" + path = f"{protocol}://{member_file_path}::{compressed_file_path}" + fs, *_ = fsspec.get_fs_token_paths(path) + assert fs.isfile(member_file_path) + assert not fs.isfile("non_existing_" + member_file_path) + + +def test_fs_overwrites(): + protocol = "bz2" + + # Import module + import datasets.filesystems + + # Overwrite protocol and reload + register_implementation(protocol, None, clobber=True) + with pytest.warns(UserWarning) as warning_info: + importlib.reload(datasets.filesystems) + + assert len(warning_info) == 1 + assert ( + str(warning_info[0].message) + == f"A filesystem protocol was already set for {protocol} and will be overwritten." + ) diff --git a/testbed/huggingface__datasets/tests/test_fingerprint.py b/testbed/huggingface__datasets/tests/test_fingerprint.py new file mode 100644 index 0000000000000000000000000000000000000000..f4d5d65744e6400e47910558008d6d25b8385222 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_fingerprint.py @@ -0,0 +1,386 @@ +import json +import os +import pickle +import subprocess +from functools import partial +from hashlib import md5 +from pathlib import Path +from tempfile import gettempdir +from textwrap import dedent +from types import FunctionType +from unittest import TestCase +from unittest.mock import patch + +import numpy as np +import pytest +from multiprocess import Pool + +import datasets +from datasets.fingerprint import Hasher, fingerprint_transform +from datasets.table import InMemoryTable + +from .utils import ( + require_regex, + require_spacy, + require_spacy_model, + require_tiktoken, + require_torch, + require_transformers, +) + + +class Foo: + def __init__(self, foo): + self.foo = foo + + def __call__(self): + return self.foo + + +class DatasetChild(datasets.Dataset): + @fingerprint_transform(inplace=False) + def func1(self, new_fingerprint, *args, **kwargs): + return DatasetChild(self.data, fingerprint=new_fingerprint) + + @fingerprint_transform(inplace=False) + def func2(self, new_fingerprint, *args, **kwargs): + return DatasetChild(self.data, fingerprint=new_fingerprint) + + +class UnpicklableCallable: + def __init__(self, callable): + self.callable = callable + + def __call__(self, *args, **kwargs): + if self.callable is not None: + return self.callable(*args, **kwargs) + + def __getstate__(self): + raise pickle.PicklingError() + + +class TokenizersDumpTest(TestCase): + @require_transformers + @pytest.mark.integration + def test_hash_tokenizer(self): + from transformers import AutoTokenizer + + def encode(x): + return tokenizer(x) + + # TODO: add hash consistency tests across sessions + tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") + hash1 = md5(datasets.utils.py_utils.dumps(tokenizer)).hexdigest() + hash1_lambda = md5(datasets.utils.py_utils.dumps(lambda x: tokenizer(x))).hexdigest() + hash1_encode = md5(datasets.utils.py_utils.dumps(encode)).hexdigest() + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + hash2 = md5(datasets.utils.py_utils.dumps(tokenizer)).hexdigest() + hash2_lambda = md5(datasets.utils.py_utils.dumps(lambda x: tokenizer(x))).hexdigest() + hash2_encode = md5(datasets.utils.py_utils.dumps(encode)).hexdigest() + tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") + hash3 = md5(datasets.utils.py_utils.dumps(tokenizer)).hexdigest() + hash3_lambda = md5(datasets.utils.py_utils.dumps(lambda x: tokenizer(x))).hexdigest() + hash3_encode = md5(datasets.utils.py_utils.dumps(encode)).hexdigest() + self.assertEqual(hash1, hash3) + self.assertNotEqual(hash1, hash2) + self.assertEqual(hash1_lambda, hash3_lambda) + self.assertNotEqual(hash1_lambda, hash2_lambda) + self.assertEqual(hash1_encode, hash3_encode) + self.assertNotEqual(hash1_encode, hash2_encode) + + @require_transformers + @pytest.mark.integration + def test_hash_tokenizer_with_cache(self): + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained("gpt2") + hash1 = md5(datasets.utils.py_utils.dumps(tokenizer)).hexdigest() + tokenizer("Hello world !") # call once to change the tokenizer's cache + hash2 = md5(datasets.utils.py_utils.dumps(tokenizer)).hexdigest() + self.assertEqual(hash1, hash2) + + @require_regex + def test_hash_regex(self): + import regex + + pat = regex.Regex("foo") + hash1 = md5(datasets.utils.py_utils.dumps(pat)).hexdigest() + pat = regex.Regex("bar") + hash2 = md5(datasets.utils.py_utils.dumps(pat)).hexdigest() + pat = regex.Regex("foo") + hash3 = md5(datasets.utils.py_utils.dumps(pat)).hexdigest() + self.assertEqual(hash1, hash3) + self.assertNotEqual(hash1, hash2) + + +class RecurseDumpTest(TestCase): + def test_recurse_dump_for_function(self): + def func(): + return foo + + foo = [0] + hash1 = md5(datasets.utils.py_utils.dumps(func)).hexdigest() + foo = [1] + hash2 = md5(datasets.utils.py_utils.dumps(func)).hexdigest() + foo = [0] + hash3 = md5(datasets.utils.py_utils.dumps(func)).hexdigest() + self.assertEqual(hash1, hash3) + self.assertNotEqual(hash1, hash2) + + def test_dump_ignores_line_definition_of_function(self): + def func(): + pass + + hash1 = md5(datasets.utils.py_utils.dumps(func)).hexdigest() + + def func(): + pass + + hash2 = md5(datasets.utils.py_utils.dumps(func)).hexdigest() + self.assertEqual(hash1, hash2) + + def test_recurse_dump_for_class(self): + hash1 = md5(datasets.utils.py_utils.dumps(Foo([0]))).hexdigest() + hash2 = md5(datasets.utils.py_utils.dumps(Foo([1]))).hexdigest() + hash3 = md5(datasets.utils.py_utils.dumps(Foo([0]))).hexdigest() + self.assertEqual(hash1, hash3) + self.assertNotEqual(hash1, hash2) + + def test_recurse_dump_for_method(self): + hash1 = md5(datasets.utils.py_utils.dumps(Foo([0]).__call__)).hexdigest() + hash2 = md5(datasets.utils.py_utils.dumps(Foo([1]).__call__)).hexdigest() + hash3 = md5(datasets.utils.py_utils.dumps(Foo([0]).__call__)).hexdigest() + self.assertEqual(hash1, hash3) + self.assertNotEqual(hash1, hash2) + + def test_dump_ipython_function(self): + def create_ipython_func(co_filename, returned_obj): + def func(): + return returned_obj + + code = func.__code__ + # Use _create_code from dill in order to make it work for different python versions + code = code.replace(co_filename=co_filename) + return FunctionType(code, func.__globals__, func.__name__, func.__defaults__, func.__closure__) + + co_filename, returned_obj = "", [0] + hash1 = md5(datasets.utils.py_utils.dumps(create_ipython_func(co_filename, returned_obj))).hexdigest() + co_filename, returned_obj = "", [1] + hash2 = md5(datasets.utils.py_utils.dumps(create_ipython_func(co_filename, returned_obj))).hexdigest() + co_filename, returned_obj = "", [0] + hash3 = md5(datasets.utils.py_utils.dumps(create_ipython_func(co_filename, returned_obj))).hexdigest() + self.assertEqual(hash1, hash3) + self.assertNotEqual(hash1, hash2) + + co_filename, returned_obj = os.path.join(gettempdir(), "ipykernel_12345", "321456789.py"), [0] + hash4 = md5(datasets.utils.py_utils.dumps(create_ipython_func(co_filename, returned_obj))).hexdigest() + co_filename, returned_obj = os.path.join(gettempdir(), "ipykernel_12345", "321456789.py"), [1] + hash5 = md5(datasets.utils.py_utils.dumps(create_ipython_func(co_filename, returned_obj))).hexdigest() + co_filename, returned_obj = os.path.join(gettempdir(), "ipykernel_12345", "654123987.py"), [0] + hash6 = md5(datasets.utils.py_utils.dumps(create_ipython_func(co_filename, returned_obj))).hexdigest() + self.assertEqual(hash4, hash6) + self.assertNotEqual(hash4, hash5) + + def test_recurse_dump_for_function_with_shuffled_globals(self): + foo, bar = [0], [1] + + def func(): + return foo, bar + + func.__module__ = "__main__" + + def globalvars_mock1_side_effect(func, *args, **kwargs): + return {"foo": foo, "bar": bar} + + def globalvars_mock2_side_effect(func, *args, **kwargs): + return {"bar": bar, "foo": foo} + + with patch("dill.detect.globalvars", side_effect=globalvars_mock1_side_effect) as globalvars_mock1: + hash1 = md5(datasets.utils.py_utils.dumps(func)).hexdigest() + self.assertGreater(globalvars_mock1.call_count, 0) + with patch("dill.detect.globalvars", side_effect=globalvars_mock2_side_effect) as globalvars_mock2: + hash2 = md5(datasets.utils.py_utils.dumps(func)).hexdigest() + self.assertGreater(globalvars_mock2.call_count, 0) + self.assertEqual(hash1, hash2) + + +class HashingTest(TestCase): + def test_hash_simple(self): + hash1 = Hasher.hash("hello") + hash2 = Hasher.hash("hello") + hash3 = Hasher.hash("there") + self.assertEqual(hash1, hash2) + self.assertNotEqual(hash1, hash3) + + def test_hash_class_instance(self): + hash1 = Hasher.hash(Foo("hello")) + hash2 = Hasher.hash(Foo("hello")) + hash3 = Hasher.hash(Foo("there")) + self.assertEqual(hash1, hash2) + self.assertNotEqual(hash1, hash3) + + def test_hash_update(self): + hasher = Hasher() + for x in ["hello", Foo("hello")]: + hasher.update(x) + hash1 = hasher.hexdigest() + hasher = Hasher() + for x in ["hello", Foo("hello")]: + hasher.update(x) + hash2 = hasher.hexdigest() + hasher = Hasher() + for x in ["there", Foo("there")]: + hasher.update(x) + hash3 = hasher.hexdigest() + self.assertEqual(hash1, hash2) + self.assertNotEqual(hash1, hash3) + + def test_hash_unpicklable(self): + with self.assertRaises(pickle.PicklingError): + Hasher.hash(UnpicklableCallable(Foo("hello"))) + + def test_hash_same_strings(self): + string = "abc" + obj1 = [string, string] # two strings have the same ids + obj2 = [string, string] + obj3 = json.loads(f'["{string}", "{string}"]') # two strings have different ids + self.assertIs(obj1[0], string) + self.assertIs(obj1[0], obj1[1]) + self.assertIs(obj2[0], string) + self.assertIs(obj2[0], obj2[1]) + self.assertIsNot(obj3[0], string) + self.assertIsNot(obj3[0], obj3[1]) + hash1 = Hasher.hash(obj1) + hash2 = Hasher.hash(obj2) + hash3 = Hasher.hash(obj3) + self.assertEqual(hash1, hash2) + self.assertEqual(hash1, hash3) + + def test_set_stable(self): + rng = np.random.default_rng(42) + set_ = {rng.random() for _ in range(10_000)} + expected_hash = Hasher.hash(set_) + assert expected_hash == Pool(1).apply_async(partial(Hasher.hash, set(set_))).get() + + def test_set_doesnt_depend_on_order(self): + set_ = set("abc") + hash1 = md5(datasets.utils.py_utils.dumps(set_)).hexdigest() + set_ = set("def") + hash2 = md5(datasets.utils.py_utils.dumps(set_)).hexdigest() + set_ = set("cba") + hash3 = md5(datasets.utils.py_utils.dumps(set_)).hexdigest() + self.assertEqual(hash1, hash3) + self.assertNotEqual(hash1, hash2) + + @require_tiktoken + def test_hash_tiktoken_encoding(self): + import tiktoken + + enc = tiktoken.get_encoding("gpt2") + hash1 = md5(datasets.utils.py_utils.dumps(enc)).hexdigest() + enc = tiktoken.get_encoding("r50k_base") + hash2 = md5(datasets.utils.py_utils.dumps(enc)).hexdigest() + enc = tiktoken.get_encoding("gpt2") + hash3 = md5(datasets.utils.py_utils.dumps(enc)).hexdigest() + self.assertEqual(hash1, hash3) + self.assertNotEqual(hash1, hash2) + + @require_torch + def test_hash_torch_tensor(self): + import torch + + t = torch.tensor([1.0]) + hash1 = md5(datasets.utils.py_utils.dumps(t)).hexdigest() + t = torch.tensor([2.0]) + hash2 = md5(datasets.utils.py_utils.dumps(t)).hexdigest() + t = torch.tensor([1.0]) + hash3 = md5(datasets.utils.py_utils.dumps(t)).hexdigest() + self.assertEqual(hash1, hash3) + self.assertNotEqual(hash1, hash2) + + @require_spacy + @require_spacy_model("en_core_web_sm") + @require_spacy_model("fr_core_news_sm") + @pytest.mark.integration + def test_hash_spacy_model(self): + import spacy + + nlp = spacy.load("en_core_web_sm") + hash1 = md5(datasets.utils.py_utils.dumps(nlp)).hexdigest() + nlp = spacy.load("fr_core_news_sm") + hash2 = md5(datasets.utils.py_utils.dumps(nlp)).hexdigest() + nlp = spacy.load("en_core_web_sm") + hash3 = md5(datasets.utils.py_utils.dumps(nlp)).hexdigest() + self.assertEqual(hash1, hash3) + self.assertNotEqual(hash1, hash2) + + +@pytest.mark.integration +def test_move_script_doesnt_change_hash(tmp_path: Path): + dir1 = tmp_path / "dir1" + dir2 = tmp_path / "dir2" + dir1.mkdir() + dir2.mkdir() + script_filename = "script.py" + code = dedent( + """ + from datasets.fingerprint import Hasher + def foo(): + pass + print(Hasher.hash(foo)) + """ + ) + script_path1 = dir1 / script_filename + script_path2 = dir2 / script_filename + with script_path1.open("w") as f: + f.write(code) + with script_path2.open("w") as f: + f.write(code) + fingerprint1 = subprocess.check_output(["python", str(script_path1)]) + fingerprint2 = subprocess.check_output(["python", str(script_path2)]) + assert fingerprint1 == fingerprint2 + + +def test_fingerprint_in_multiprocessing(): + data = {"a": [0, 1, 2]} + dataset = DatasetChild(InMemoryTable.from_pydict(data)) + expected_fingerprint = dataset.func1()._fingerprint + assert expected_fingerprint == dataset.func1()._fingerprint + assert expected_fingerprint != dataset.func2()._fingerprint + + with Pool(2) as p: + assert expected_fingerprint == p.apply_async(dataset.func1).get()._fingerprint + assert expected_fingerprint != p.apply_async(dataset.func2).get()._fingerprint + + +def test_fingerprint_when_transform_version_changes(): + data = {"a": [0, 1, 2]} + + class DummyDatasetChild(datasets.Dataset): + @fingerprint_transform(inplace=False) + def func(self, new_fingerprint): + return DummyDatasetChild(self.data, fingerprint=new_fingerprint) + + fingeprint_no_version = DummyDatasetChild(InMemoryTable.from_pydict(data)).func() + + class DummyDatasetChild(datasets.Dataset): + @fingerprint_transform(inplace=False, version="1.0.0") + def func(self, new_fingerprint): + return DummyDatasetChild(self.data, fingerprint=new_fingerprint) + + fingeprint_1 = DummyDatasetChild(InMemoryTable.from_pydict(data)).func() + + class DummyDatasetChild(datasets.Dataset): + @fingerprint_transform(inplace=False, version="2.0.0") + def func(self, new_fingerprint): + return DummyDatasetChild(self.data, fingerprint=new_fingerprint) + + fingeprint_2 = DummyDatasetChild(InMemoryTable.from_pydict(data)).func() + + assert len({fingeprint_no_version, fingeprint_1, fingeprint_2}) == 3 + + +def test_dependency_on_dill(): + # AttributeError: module 'dill._dill' has no attribute 'stack' + hasher = Hasher() + hasher.update(lambda x: x) diff --git a/testbed/huggingface__datasets/tests/test_formatting.py b/testbed/huggingface__datasets/tests/test_formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..9ac7d2c2f58117ddb6270df58aa0664665022793 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_formatting.py @@ -0,0 +1,931 @@ +import datetime +from pathlib import Path +from unittest import TestCase + +import numpy as np +import pandas as pd +import pyarrow as pa +import pytest + +from datasets import Audio, Features, Image, IterableDataset +from datasets.formatting import NumpyFormatter, PandasFormatter, PythonFormatter, query_table +from datasets.formatting.formatting import ( + LazyBatch, + LazyRow, + NumpyArrowExtractor, + PandasArrowExtractor, + PythonArrowExtractor, +) +from datasets.table import InMemoryTable + +from .utils import require_jax, require_pil, require_sndfile, require_tf, require_torch + + +class AnyArray: + def __init__(self, data) -> None: + self.data = data + + def __array__(self) -> np.ndarray: + return np.asarray(self.data) + + +def _gen_any_arrays(): + for _ in range(10): + yield {"array": AnyArray(list(range(10)))} + + +@pytest.fixture +def any_arrays_dataset(): + return IterableDataset.from_generator(_gen_any_arrays) + + +_COL_A = [0, 1, 2] +_COL_B = ["foo", "bar", "foobar"] +_COL_C = [[[1.0, 0.0, 0.0]] * 2, [[0.0, 1.0, 0.0]] * 2, [[0.0, 0.0, 1.0]] * 2] +_COL_D = [datetime.datetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)] * 3 + +_INDICES = [1, 0] + +IMAGE_PATH_1 = Path(__file__).parent / "features" / "data" / "test_image_rgb.jpg" +IMAGE_PATH_2 = Path(__file__).parent / "features" / "data" / "test_image_rgba.png" +AUDIO_PATH_1 = Path(__file__).parent / "features" / "data" / "test_audio_44100.wav" + + +class ArrowExtractorTest(TestCase): + def _create_dummy_table(self): + return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C, "d": _COL_D}) + + def test_python_extractor(self): + pa_table = self._create_dummy_table() + extractor = PythonArrowExtractor() + row = extractor.extract_row(pa_table) + self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0], "d": _COL_D[0]}) + col = extractor.extract_column(pa_table) + self.assertEqual(col, _COL_A) + batch = extractor.extract_batch(pa_table) + self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C, "d": _COL_D}) + + def test_numpy_extractor(self): + pa_table = self._create_dummy_table().drop(["c", "d"]) + extractor = NumpyArrowExtractor() + row = extractor.extract_row(pa_table) + np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0]}) + col = extractor.extract_column(pa_table) + np.testing.assert_equal(col, np.array(_COL_A)) + batch = extractor.extract_batch(pa_table) + np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B)}) + + def test_numpy_extractor_nested(self): + pa_table = self._create_dummy_table().drop(["a", "b", "d"]) + extractor = NumpyArrowExtractor() + row = extractor.extract_row(pa_table) + self.assertEqual(row["c"][0].dtype, np.float64) + self.assertEqual(row["c"].dtype, object) + col = extractor.extract_column(pa_table) + self.assertEqual(col[0][0].dtype, np.float64) + self.assertEqual(col[0].dtype, object) + self.assertEqual(col.dtype, object) + batch = extractor.extract_batch(pa_table) + self.assertEqual(batch["c"][0][0].dtype, np.float64) + self.assertEqual(batch["c"][0].dtype, object) + self.assertEqual(batch["c"].dtype, object) + + def test_numpy_extractor_temporal(self): + pa_table = self._create_dummy_table().drop(["a", "b", "c"]) + extractor = NumpyArrowExtractor() + row = extractor.extract_row(pa_table) + self.assertTrue(np.issubdtype(row["d"].dtype, np.datetime64)) + col = extractor.extract_column(pa_table) + self.assertTrue(np.issubdtype(col[0].dtype, np.datetime64)) + self.assertTrue(np.issubdtype(col.dtype, np.datetime64)) + batch = extractor.extract_batch(pa_table) + self.assertTrue(np.issubdtype(batch["d"][0].dtype, np.datetime64)) + self.assertTrue(np.issubdtype(batch["d"].dtype, np.datetime64)) + + def test_pandas_extractor(self): + pa_table = self._create_dummy_table() + extractor = PandasArrowExtractor() + row = extractor.extract_row(pa_table) + self.assertIsInstance(row, pd.DataFrame) + pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1]) + pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1]) + col = extractor.extract_column(pa_table) + pd.testing.assert_series_equal(col, pd.Series(_COL_A, name="a")) + batch = extractor.extract_batch(pa_table) + self.assertIsInstance(batch, pd.DataFrame) + pd.testing.assert_series_equal(batch["a"], pd.Series(_COL_A, name="a")) + pd.testing.assert_series_equal(batch["b"], pd.Series(_COL_B, name="b")) + + def test_pandas_extractor_nested(self): + pa_table = self._create_dummy_table().drop(["a", "b", "d"]) + extractor = PandasArrowExtractor() + row = extractor.extract_row(pa_table) + self.assertEqual(row["c"][0][0].dtype, np.float64) + self.assertEqual(row["c"].dtype, object) + col = extractor.extract_column(pa_table) + self.assertEqual(col[0][0].dtype, np.float64) + self.assertEqual(col[0].dtype, object) + self.assertEqual(col.dtype, object) + batch = extractor.extract_batch(pa_table) + self.assertEqual(batch["c"][0][0].dtype, np.float64) + self.assertEqual(batch["c"][0].dtype, object) + self.assertEqual(batch["c"].dtype, object) + + def test_pandas_extractor_temporal(self): + pa_table = self._create_dummy_table().drop(["a", "b", "c"]) + extractor = PandasArrowExtractor() + row = extractor.extract_row(pa_table) + self.assertTrue(pd.api.types.is_datetime64_any_dtype(row["d"].dtype)) + col = extractor.extract_column(pa_table) + self.assertTrue(isinstance(col[0], datetime.datetime)) + self.assertTrue(pd.api.types.is_datetime64_any_dtype(col.dtype)) + batch = extractor.extract_batch(pa_table) + self.assertTrue(isinstance(batch["d"][0], datetime.datetime)) + self.assertTrue(pd.api.types.is_datetime64_any_dtype(batch["d"].dtype)) + + +class LazyDictTest(TestCase): + def _create_dummy_table(self): + return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C}) + + def _create_dummy_formatter(self): + return PythonFormatter(lazy=True) + + def test_lazy_dict_copy(self): + pa_table = self._create_dummy_table() + formatter = self._create_dummy_formatter() + lazy_batch = formatter.format_batch(pa_table) + lazy_batch_copy = lazy_batch.copy() + self.assertEqual(type(lazy_batch), type(lazy_batch_copy)) + self.assertEqual(lazy_batch.items(), lazy_batch_copy.items()) + lazy_batch["d"] = [1, 2, 3] + self.assertNotEqual(lazy_batch.items(), lazy_batch_copy.items()) + + +class FormatterTest(TestCase): + def _create_dummy_table(self): + return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C}) + + def test_python_formatter(self): + pa_table = self._create_dummy_table() + formatter = PythonFormatter() + row = formatter.format_row(pa_table) + self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0]}) + col = formatter.format_column(pa_table) + self.assertEqual(col, _COL_A) + batch = formatter.format_batch(pa_table) + self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C}) + + def test_python_formatter_lazy(self): + pa_table = self._create_dummy_table() + formatter = PythonFormatter(lazy=True) + row = formatter.format_row(pa_table) + self.assertIsInstance(row, LazyRow) + self.assertEqual(row["a"], _COL_A[0]) + self.assertEqual(row["b"], _COL_B[0]) + self.assertEqual(row["c"], _COL_C[0]) + batch = formatter.format_batch(pa_table) + self.assertIsInstance(batch, LazyBatch) + self.assertEqual(batch["a"], _COL_A) + self.assertEqual(batch["b"], _COL_B) + self.assertEqual(batch["c"], _COL_C) + + def test_numpy_formatter(self): + pa_table = self._create_dummy_table() + formatter = NumpyFormatter() + row = formatter.format_row(pa_table) + np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0], "c": np.array(_COL_C[0])}) + col = formatter.format_column(pa_table) + np.testing.assert_equal(col, np.array(_COL_A)) + batch = formatter.format_batch(pa_table) + np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B), "c": np.array(_COL_C)}) + assert batch["c"].shape == np.array(_COL_C).shape + + def test_numpy_formatter_np_array_kwargs(self): + pa_table = self._create_dummy_table().drop(["b"]) + formatter = NumpyFormatter(dtype=np.float16) + row = formatter.format_row(pa_table) + self.assertEqual(row["c"].dtype, np.dtype(np.float16)) + col = formatter.format_column(pa_table) + self.assertEqual(col.dtype, np.float16) + batch = formatter.format_batch(pa_table) + self.assertEqual(batch["a"].dtype, np.dtype(np.float16)) + self.assertEqual(batch["c"].dtype, np.dtype(np.float16)) + + @require_pil + def test_numpy_formatter_image(self): + # same dimensions + pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2}) + formatter = NumpyFormatter(features=Features({"image": Image()})) + row = formatter.format_row(pa_table) + self.assertEqual(row["image"].dtype, np.uint8) + self.assertEqual(row["image"].shape, (480, 640, 3)) + col = formatter.format_column(pa_table) + self.assertEqual(col.dtype, np.uint8) + self.assertEqual(col.shape, (2, 480, 640, 3)) + batch = formatter.format_batch(pa_table) + self.assertEqual(batch["image"].dtype, np.uint8) + self.assertEqual(batch["image"].shape, (2, 480, 640, 3)) + + # different dimensions + pa_table = pa.table( + {"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]} + ) + formatter = NumpyFormatter(features=Features({"image": Image()})) + row = formatter.format_row(pa_table) + self.assertEqual(row["image"].dtype, np.uint8) + self.assertEqual(row["image"].shape, (480, 640, 3)) + col = formatter.format_column(pa_table) + self.assertIsInstance(col, np.ndarray) + self.assertEqual(col.dtype, object) + self.assertEqual(col[0].dtype, np.uint8) + self.assertEqual(col[0].shape, (480, 640, 3)) + batch = formatter.format_batch(pa_table) + self.assertIsInstance(batch["image"], np.ndarray) + self.assertEqual(batch["image"].dtype, object) + self.assertEqual(batch["image"][0].dtype, np.uint8) + self.assertEqual(batch["image"][0].shape, (480, 640, 3)) + + @require_sndfile + def test_numpy_formatter_audio(self): + pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]}) + formatter = NumpyFormatter(features=Features({"audio": Audio()})) + row = formatter.format_row(pa_table) + self.assertEqual(row["audio"]["array"].dtype, np.dtype(np.float32)) + col = formatter.format_column(pa_table) + self.assertEqual(col[0]["array"].dtype, np.float32) + batch = formatter.format_batch(pa_table) + self.assertEqual(batch["audio"][0]["array"].dtype, np.dtype(np.float32)) + + def test_pandas_formatter(self): + pa_table = self._create_dummy_table() + formatter = PandasFormatter() + row = formatter.format_row(pa_table) + self.assertIsInstance(row, pd.DataFrame) + pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1]) + pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1]) + col = formatter.format_column(pa_table) + pd.testing.assert_series_equal(col, pd.Series(_COL_A, name="a")) + batch = formatter.format_batch(pa_table) + self.assertIsInstance(batch, pd.DataFrame) + pd.testing.assert_series_equal(batch["a"], pd.Series(_COL_A, name="a")) + pd.testing.assert_series_equal(batch["b"], pd.Series(_COL_B, name="b")) + + @require_torch + def test_torch_formatter(self): + import torch + + from datasets.formatting import TorchFormatter + + pa_table = self._create_dummy_table() + formatter = TorchFormatter() + row = formatter.format_row(pa_table) + torch.testing.assert_close(row["a"], torch.tensor(_COL_A, dtype=torch.int64)[0]) + assert row["b"] == _COL_B[0] + torch.testing.assert_close(row["c"], torch.tensor(_COL_C, dtype=torch.float32)[0]) + col = formatter.format_column(pa_table) + torch.testing.assert_close(col, torch.tensor(_COL_A, dtype=torch.int64)) + batch = formatter.format_batch(pa_table) + torch.testing.assert_close(batch["a"], torch.tensor(_COL_A, dtype=torch.int64)) + assert batch["b"] == _COL_B + torch.testing.assert_close(batch["c"], torch.tensor(_COL_C, dtype=torch.float32)) + assert batch["c"].shape == np.array(_COL_C).shape + + @require_torch + def test_torch_formatter_torch_tensor_kwargs(self): + import torch + + from datasets.formatting import TorchFormatter + + pa_table = self._create_dummy_table().drop(["b"]) + formatter = TorchFormatter(dtype=torch.float16) + row = formatter.format_row(pa_table) + self.assertEqual(row["c"].dtype, torch.float16) + col = formatter.format_column(pa_table) + self.assertEqual(col.dtype, torch.float16) + batch = formatter.format_batch(pa_table) + self.assertEqual(batch["a"].dtype, torch.float16) + self.assertEqual(batch["c"].dtype, torch.float16) + + @require_torch + @require_pil + def test_torch_formatter_image(self): + import torch + + from datasets.formatting import TorchFormatter + + # same dimensions + pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2}) + formatter = TorchFormatter(features=Features({"image": Image()})) + row = formatter.format_row(pa_table) + self.assertEqual(row["image"].dtype, torch.uint8) + self.assertEqual(row["image"].shape, (480, 640, 3)) + col = formatter.format_column(pa_table) + self.assertEqual(col.dtype, torch.uint8) + self.assertEqual(col.shape, (2, 480, 640, 3)) + batch = formatter.format_batch(pa_table) + self.assertEqual(batch["image"].dtype, torch.uint8) + self.assertEqual(batch["image"].shape, (2, 480, 640, 3)) + + # different dimensions + pa_table = pa.table( + {"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]} + ) + formatter = TorchFormatter(features=Features({"image": Image()})) + row = formatter.format_row(pa_table) + self.assertEqual(row["image"].dtype, torch.uint8) + self.assertEqual(row["image"].shape, (480, 640, 3)) + col = formatter.format_column(pa_table) + self.assertIsInstance(col, list) + self.assertEqual(col[0].dtype, torch.uint8) + self.assertEqual(col[0].shape, (480, 640, 3)) + batch = formatter.format_batch(pa_table) + self.assertIsInstance(batch["image"], list) + self.assertEqual(batch["image"][0].dtype, torch.uint8) + self.assertEqual(batch["image"][0].shape, (480, 640, 3)) + + @require_torch + @require_sndfile + def test_torch_formatter_audio(self): + import torch + + from datasets.formatting import TorchFormatter + + pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]}) + formatter = TorchFormatter(features=Features({"audio": Audio()})) + row = formatter.format_row(pa_table) + self.assertEqual(row["audio"]["array"].dtype, torch.float32) + col = formatter.format_column(pa_table) + self.assertEqual(col[0]["array"].dtype, torch.float32) + batch = formatter.format_batch(pa_table) + self.assertEqual(batch["audio"][0]["array"].dtype, torch.float32) + + @require_tf + def test_tf_formatter(self): + import tensorflow as tf + + from datasets.formatting import TFFormatter + + pa_table = self._create_dummy_table() + formatter = TFFormatter() + row = formatter.format_row(pa_table) + tf.debugging.assert_equal(row["a"], tf.convert_to_tensor(_COL_A, dtype=tf.int64)[0]) + tf.debugging.assert_equal(row["b"], tf.convert_to_tensor(_COL_B, dtype=tf.string)[0]) + tf.debugging.assert_equal(row["c"], tf.convert_to_tensor(_COL_C, dtype=tf.float32)[0]) + col = formatter.format_column(pa_table) + tf.debugging.assert_equal(col, tf.ragged.constant(_COL_A, dtype=tf.int64)) + batch = formatter.format_batch(pa_table) + tf.debugging.assert_equal(batch["a"], tf.convert_to_tensor(_COL_A, dtype=tf.int64)) + tf.debugging.assert_equal(batch["b"], tf.convert_to_tensor(_COL_B, dtype=tf.string)) + self.assertIsInstance(batch["c"], tf.Tensor) + self.assertEqual(batch["c"].dtype, tf.float32) + tf.debugging.assert_equal( + batch["c"].shape.as_list(), tf.convert_to_tensor(_COL_C, dtype=tf.float32).shape.as_list() + ) + tf.debugging.assert_equal(tf.convert_to_tensor(batch["c"]), tf.convert_to_tensor(_COL_C, dtype=tf.float32)) + + @require_tf + def test_tf_formatter_tf_tensor_kwargs(self): + import tensorflow as tf + + from datasets.formatting import TFFormatter + + pa_table = self._create_dummy_table().drop(["b"]) + formatter = TFFormatter(dtype=tf.float16) + row = formatter.format_row(pa_table) + self.assertEqual(row["c"].dtype, tf.float16) + col = formatter.format_column(pa_table) + self.assertEqual(col.dtype, tf.float16) + batch = formatter.format_batch(pa_table) + self.assertEqual(batch["a"].dtype, tf.float16) + self.assertEqual(batch["c"].dtype, tf.float16) + + @require_tf + @require_pil + def test_tf_formatter_image(self): + import tensorflow as tf + + from datasets.formatting import TFFormatter + + # same dimensions + pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2}) + formatter = TFFormatter(features=Features({"image": Image()})) + row = formatter.format_row(pa_table) + self.assertEqual(row["image"].dtype, tf.uint8) + self.assertEqual(row["image"].shape, (480, 640, 3)) + col = formatter.format_column(pa_table) + self.assertEqual(col.dtype, tf.uint8) + self.assertEqual(col.shape, (2, 480, 640, 3)) + batch = formatter.format_batch(pa_table) + self.assertEqual(batch["image"][0].dtype, tf.uint8) + self.assertEqual(batch["image"].shape, (2, 480, 640, 3)) + + # different dimensions + pa_table = pa.table( + {"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]} + ) + formatter = TFFormatter(features=Features({"image": Image()})) + row = formatter.format_row(pa_table) + self.assertEqual(row["image"].dtype, tf.uint8) + self.assertEqual(row["image"].shape, (480, 640, 3)) + col = formatter.format_column(pa_table) + self.assertIsInstance(col, list) + self.assertEqual(col[0].dtype, tf.uint8) + self.assertEqual(col[0].shape, (480, 640, 3)) + batch = formatter.format_batch(pa_table) + self.assertIsInstance(batch["image"], list) + self.assertEqual(batch["image"][0].dtype, tf.uint8) + self.assertEqual(batch["image"][0].shape, (480, 640, 3)) + + @require_tf + @require_sndfile + def test_tf_formatter_audio(self): + import tensorflow as tf + + from datasets.formatting import TFFormatter + + pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]}) + formatter = TFFormatter(features=Features({"audio": Audio()})) + row = formatter.format_row(pa_table) + self.assertEqual(row["audio"]["array"].dtype, tf.float32) + col = formatter.format_column(pa_table) + self.assertEqual(col[0]["array"].dtype, tf.float32) + batch = formatter.format_batch(pa_table) + self.assertEqual(batch["audio"][0]["array"].dtype, tf.float32) + + @require_jax + def test_jax_formatter(self): + import jax + import jax.numpy as jnp + + from datasets.formatting import JaxFormatter + + pa_table = self._create_dummy_table() + formatter = JaxFormatter() + row = formatter.format_row(pa_table) + jnp.allclose(row["a"], jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32)[0]) + assert row["b"] == _COL_B[0] + jnp.allclose(row["c"], jnp.array(_COL_C, dtype=jnp.float32)[0]) + col = formatter.format_column(pa_table) + jnp.allclose(col, jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32)) + batch = formatter.format_batch(pa_table) + jnp.allclose(batch["a"], jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32)) + assert batch["b"] == _COL_B + jnp.allclose(batch["c"], jnp.array(_COL_C, dtype=jnp.float32)) + assert batch["c"].shape == np.array(_COL_C).shape + + @require_jax + def test_jax_formatter_jnp_array_kwargs(self): + import jax.numpy as jnp + + from datasets.formatting import JaxFormatter + + pa_table = self._create_dummy_table().drop(["b"]) + formatter = JaxFormatter(dtype=jnp.float16) + row = formatter.format_row(pa_table) + self.assertEqual(row["c"].dtype, jnp.float16) + col = formatter.format_column(pa_table) + self.assertEqual(col.dtype, jnp.float16) + batch = formatter.format_batch(pa_table) + self.assertEqual(batch["a"].dtype, jnp.float16) + self.assertEqual(batch["c"].dtype, jnp.float16) + + @require_jax + @require_pil + def test_jax_formatter_image(self): + import jax.numpy as jnp + + from datasets.formatting import JaxFormatter + + # same dimensions + pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2}) + formatter = JaxFormatter(features=Features({"image": Image()})) + row = formatter.format_row(pa_table) + self.assertEqual(row["image"].dtype, jnp.uint8) + self.assertEqual(row["image"].shape, (480, 640, 3)) + col = formatter.format_column(pa_table) + self.assertEqual(col.dtype, jnp.uint8) + self.assertEqual(col.shape, (2, 480, 640, 3)) + batch = formatter.format_batch(pa_table) + self.assertEqual(batch["image"].dtype, jnp.uint8) + self.assertEqual(batch["image"].shape, (2, 480, 640, 3)) + + # different dimensions + pa_table = pa.table( + {"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]} + ) + formatter = JaxFormatter(features=Features({"image": Image()})) + row = formatter.format_row(pa_table) + self.assertEqual(row["image"].dtype, jnp.uint8) + self.assertEqual(row["image"].shape, (480, 640, 3)) + col = formatter.format_column(pa_table) + self.assertIsInstance(col, list) + self.assertEqual(col[0].dtype, jnp.uint8) + self.assertEqual(col[0].shape, (480, 640, 3)) + batch = formatter.format_batch(pa_table) + self.assertIsInstance(batch["image"], list) + self.assertEqual(batch["image"][0].dtype, jnp.uint8) + self.assertEqual(batch["image"][0].shape, (480, 640, 3)) + + @require_jax + @require_sndfile + def test_jax_formatter_audio(self): + import jax.numpy as jnp + + from datasets.formatting import JaxFormatter + + pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]}) + formatter = JaxFormatter(features=Features({"audio": Audio()})) + row = formatter.format_row(pa_table) + self.assertEqual(row["audio"]["array"].dtype, jnp.float32) + col = formatter.format_column(pa_table) + self.assertEqual(col[0]["array"].dtype, jnp.float32) + batch = formatter.format_batch(pa_table) + self.assertEqual(batch["audio"][0]["array"].dtype, jnp.float32) + + @require_jax + def test_jax_formatter_device(self): + import jax + + from datasets.formatting import JaxFormatter + + pa_table = self._create_dummy_table() + device = jax.devices()[0] + formatter = JaxFormatter(device=str(device)) + row = formatter.format_row(pa_table) + assert row["a"].device() == device + assert row["c"].device() == device + col = formatter.format_column(pa_table) + assert col.device() == device + batch = formatter.format_batch(pa_table) + assert batch["a"].device() == device + assert batch["c"].device() == device + + +class QueryTest(TestCase): + def _create_dummy_table(self): + return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C}) + + def _create_dummy_arrow_indices(self): + return pa.Table.from_arrays([pa.array(_INDICES, type=pa.uint64())], names=["indices"]) + + def assertTableEqual(self, first: pa.Table, second: pa.Table): + self.assertEqual(first.schema, second.schema) + for first_array, second_array in zip(first, second): + self.assertEqual(first_array, second_array) + self.assertEqual(first, second) + + def test_query_table_int(self): + pa_table = self._create_dummy_table() + table = InMemoryTable(pa_table) + n = pa_table.num_rows + # classical usage + subtable = query_table(table, 0) + self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[:1], "b": _COL_B[:1], "c": _COL_C[:1]})) + subtable = query_table(table, 1) + self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[1:2], "b": _COL_B[1:2], "c": _COL_C[1:2]})) + subtable = query_table(table, -1) + self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[-1:], "b": _COL_B[-1:], "c": _COL_C[-1:]})) + # raise an IndexError + with self.assertRaises(IndexError): + query_table(table, n) + with self.assertRaises(IndexError): + query_table(table, -(n + 1)) + # with indices + indices = InMemoryTable(self._create_dummy_arrow_indices()) + subtable = query_table(table, 0, indices=indices) + self.assertTableEqual( + subtable, + pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), + ) + with self.assertRaises(IndexError): + assert len(indices) < n + query_table(table, len(indices), indices=indices) + + def test_query_table_slice(self): + pa_table = self._create_dummy_table() + table = InMemoryTable(pa_table) + n = pa_table.num_rows + # classical usage + subtable = query_table(table, slice(0, 1)) + self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[:1], "b": _COL_B[:1], "c": _COL_C[:1]})) + subtable = query_table(table, slice(1, 2)) + self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[1:2], "b": _COL_B[1:2], "c": _COL_C[1:2]})) + subtable = query_table(table, slice(-2, -1)) + self.assertTableEqual( + subtable, pa.Table.from_pydict({"a": _COL_A[-2:-1], "b": _COL_B[-2:-1], "c": _COL_C[-2:-1]}) + ) + # usage with None + subtable = query_table(table, slice(-1, None)) + self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[-1:], "b": _COL_B[-1:], "c": _COL_C[-1:]})) + subtable = query_table(table, slice(None, n + 1)) + self.assertTableEqual( + subtable, pa.Table.from_pydict({"a": _COL_A[: n + 1], "b": _COL_B[: n + 1], "c": _COL_C[: n + 1]}) + ) + self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})) + subtable = query_table(table, slice(-(n + 1), None)) + self.assertTableEqual( + subtable, pa.Table.from_pydict({"a": _COL_A[-(n + 1) :], "b": _COL_B[-(n + 1) :], "c": _COL_C[-(n + 1) :]}) + ) + self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})) + # usage with step + subtable = query_table(table, slice(None, None, 2)) + self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[::2], "b": _COL_B[::2], "c": _COL_C[::2]})) + # empty ouput but no errors + subtable = query_table(table, slice(-1, 0)) # usage with both negative and positive idx + assert len(_COL_A[-1:0]) == 0 + self.assertTableEqual(subtable, pa_table.slice(0, 0)) + subtable = query_table(table, slice(2, 1)) + assert len(_COL_A[2:1]) == 0 + self.assertTableEqual(subtable, pa_table.slice(0, 0)) + subtable = query_table(table, slice(n, n)) + assert len(_COL_A[n:n]) == 0 + self.assertTableEqual(subtable, pa_table.slice(0, 0)) + subtable = query_table(table, slice(n, n + 1)) + assert len(_COL_A[n : n + 1]) == 0 + self.assertTableEqual(subtable, pa_table.slice(0, 0)) + # it's not possible to get an error with a slice + + # with indices + indices = InMemoryTable(self._create_dummy_arrow_indices()) + subtable = query_table(table, slice(0, 1), indices=indices) + self.assertTableEqual( + subtable, + pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), + ) + subtable = query_table(table, slice(n - 1, n), indices=indices) + assert len(indices.column(0).to_pylist()[n - 1 : n]) == 0 + self.assertTableEqual(subtable, pa_table.slice(0, 0)) + + def test_query_table_range(self): + pa_table = self._create_dummy_table() + table = InMemoryTable(pa_table) + n = pa_table.num_rows + np_A, np_B, np_C = np.array(_COL_A, dtype=np.int64), np.array(_COL_B), np.array(_COL_C) + # classical usage + subtable = query_table(table, range(0, 1)) + self.assertTableEqual( + subtable, + pa.Table.from_pydict({"a": np_A[range(0, 1)], "b": np_B[range(0, 1)], "c": np_C[range(0, 1)].tolist()}), + ) + subtable = query_table(table, range(1, 2)) + self.assertTableEqual( + subtable, + pa.Table.from_pydict({"a": np_A[range(1, 2)], "b": np_B[range(1, 2)], "c": np_C[range(1, 2)].tolist()}), + ) + subtable = query_table(table, range(-2, -1)) + self.assertTableEqual( + subtable, + pa.Table.from_pydict( + {"a": np_A[range(-2, -1)], "b": np_B[range(-2, -1)], "c": np_C[range(-2, -1)].tolist()} + ), + ) + # usage with both negative and positive idx + subtable = query_table(table, range(-1, 0)) + self.assertTableEqual( + subtable, + pa.Table.from_pydict({"a": np_A[range(-1, 0)], "b": np_B[range(-1, 0)], "c": np_C[range(-1, 0)].tolist()}), + ) + subtable = query_table(table, range(-1, n)) + self.assertTableEqual( + subtable, + pa.Table.from_pydict({"a": np_A[range(-1, n)], "b": np_B[range(-1, n)], "c": np_C[range(-1, n)].tolist()}), + ) + # usage with step + subtable = query_table(table, range(0, n, 2)) + self.assertTableEqual( + subtable, + pa.Table.from_pydict( + {"a": np_A[range(0, n, 2)], "b": np_B[range(0, n, 2)], "c": np_C[range(0, n, 2)].tolist()} + ), + ) + subtable = query_table(table, range(0, n + 1, 2 * n)) + self.assertTableEqual( + subtable, + pa.Table.from_pydict( + { + "a": np_A[range(0, n + 1, 2 * n)], + "b": np_B[range(0, n + 1, 2 * n)], + "c": np_C[range(0, n + 1, 2 * n)].tolist(), + } + ), + ) + # empty ouput but no errors + subtable = query_table(table, range(2, 1)) + assert len(np_A[range(2, 1)]) == 0 + self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema)) + subtable = query_table(table, range(n, n)) + assert len(np_A[range(n, n)]) == 0 + self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema)) + # raise an IndexError + with self.assertRaises(IndexError): + with self.assertRaises(IndexError): + np_A[range(0, n + 1)] + query_table(table, range(0, n + 1)) + with self.assertRaises(IndexError): + with self.assertRaises(IndexError): + np_A[range(-(n + 1), -1)] + query_table(table, range(-(n + 1), -1)) + with self.assertRaises(IndexError): + with self.assertRaises(IndexError): + np_A[range(n, n + 1)] + query_table(table, range(n, n + 1)) + # with indices + indices = InMemoryTable(self._create_dummy_arrow_indices()) + subtable = query_table(table, range(0, 1), indices=indices) + self.assertTableEqual( + subtable, + pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), + ) + with self.assertRaises(IndexError): + assert len(indices) < n + query_table(table, range(len(indices), len(indices) + 1), indices=indices) + + def test_query_table_str(self): + pa_table = self._create_dummy_table() + table = InMemoryTable(pa_table) + subtable = query_table(table, "a") + self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A})) + with self.assertRaises(KeyError): + query_table(table, "z") + indices = InMemoryTable(self._create_dummy_arrow_indices()) + subtable = query_table(table, "a", indices=indices) + self.assertTableEqual(subtable, pa.Table.from_pydict({"a": [_COL_A[i] for i in _INDICES]})) + + def test_query_table_iterable(self): + pa_table = self._create_dummy_table() + table = InMemoryTable(pa_table) + n = pa_table.num_rows + np_A, np_B, np_C = np.array(_COL_A, dtype=np.int64), np.array(_COL_B), np.array(_COL_C) + # classical usage + subtable = query_table(table, [0]) + self.assertTableEqual( + subtable, pa.Table.from_pydict({"a": np_A[[0]], "b": np_B[[0]], "c": np_C[[0]].tolist()}) + ) + subtable = query_table(table, [1]) + self.assertTableEqual( + subtable, pa.Table.from_pydict({"a": np_A[[1]], "b": np_B[[1]], "c": np_C[[1]].tolist()}) + ) + subtable = query_table(table, [-1]) + self.assertTableEqual( + subtable, pa.Table.from_pydict({"a": np_A[[-1]], "b": np_B[[-1]], "c": np_C[[-1]].tolist()}) + ) + subtable = query_table(table, [0, -1, 1]) + self.assertTableEqual( + subtable, + pa.Table.from_pydict({"a": np_A[[0, -1, 1]], "b": np_B[[0, -1, 1]], "c": np_C[[0, -1, 1]].tolist()}), + ) + # numpy iterable + subtable = query_table(table, np.array([0, -1, 1])) + self.assertTableEqual( + subtable, + pa.Table.from_pydict({"a": np_A[[0, -1, 1]], "b": np_B[[0, -1, 1]], "c": np_C[[0, -1, 1]].tolist()}), + ) + # empty ouput but no errors + subtable = query_table(table, []) + assert len(np_A[[]]) == 0 + self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema)) + # raise an IndexError + with self.assertRaises(IndexError): + with self.assertRaises(IndexError): + np_A[[n]] + query_table(table, [n]) + with self.assertRaises(IndexError): + with self.assertRaises(IndexError): + np_A[[-(n + 1)]] + query_table(table, [-(n + 1)]) + # with indices + indices = InMemoryTable(self._create_dummy_arrow_indices()) + subtable = query_table(table, [0], indices=indices) + self.assertTableEqual( + subtable, + pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}), + ) + with self.assertRaises(IndexError): + assert len(indices) < n + query_table(table, [len(indices)], indices=indices) + + def test_query_table_invalid_key_type(self): + pa_table = self._create_dummy_table() + table = InMemoryTable(pa_table) + with self.assertRaises(TypeError): + query_table(table, 0.0) + with self.assertRaises(TypeError): + query_table(table, [0, "a"]) + with self.assertRaises(TypeError): + query_table(table, int) + with self.assertRaises(TypeError): + + def iter_to_inf(start=0): + while True: + yield start + start += 1 + + query_table(table, iter_to_inf()) + + +@pytest.fixture(scope="session") +def arrow_table(): + return pa.Table.from_pydict({"col_int": [0, 1, 2], "col_float": [0.0, 1.0, 2.0]}) + + +@require_tf +@pytest.mark.parametrize( + "cast_schema", + [ + None, + [("col_int", pa.int64()), ("col_float", pa.float64())], + [("col_int", pa.int32()), ("col_float", pa.float64())], + [("col_int", pa.int64()), ("col_float", pa.float32())], + ], +) +def test_tf_formatter_sets_default_dtypes(cast_schema, arrow_table): + import tensorflow as tf + + from datasets.formatting import TFFormatter + + if cast_schema: + arrow_table = arrow_table.cast(pa.schema(cast_schema)) + arrow_table_dict = arrow_table.to_pydict() + list_int = arrow_table_dict["col_int"] + list_float = arrow_table_dict["col_float"] + formatter = TFFormatter() + + row = formatter.format_row(arrow_table) + tf.debugging.assert_equal(row["col_int"], tf.ragged.constant(list_int, dtype=tf.int64)[0]) + tf.debugging.assert_equal(row["col_float"], tf.ragged.constant(list_float, dtype=tf.float32)[0]) + + col = formatter.format_column(arrow_table) + tf.debugging.assert_equal(col, tf.ragged.constant(list_int, dtype=tf.int64)) + + batch = formatter.format_batch(arrow_table) + tf.debugging.assert_equal(batch["col_int"], tf.ragged.constant(list_int, dtype=tf.int64)) + tf.debugging.assert_equal(batch["col_float"], tf.ragged.constant(list_float, dtype=tf.float32)) + + +@require_torch +@pytest.mark.parametrize( + "cast_schema", + [ + None, + [("col_int", pa.int64()), ("col_float", pa.float64())], + [("col_int", pa.int32()), ("col_float", pa.float64())], + [("col_int", pa.int64()), ("col_float", pa.float32())], + ], +) +def test_torch_formatter_sets_default_dtypes(cast_schema, arrow_table): + import torch + + from datasets.formatting import TorchFormatter + + if cast_schema: + arrow_table = arrow_table.cast(pa.schema(cast_schema)) + arrow_table_dict = arrow_table.to_pydict() + list_int = arrow_table_dict["col_int"] + list_float = arrow_table_dict["col_float"] + formatter = TorchFormatter() + + row = formatter.format_row(arrow_table) + torch.testing.assert_close(row["col_int"], torch.tensor(list_int, dtype=torch.int64)[0]) + torch.testing.assert_close(row["col_float"], torch.tensor(list_float, dtype=torch.float32)[0]) + + col = formatter.format_column(arrow_table) + torch.testing.assert_close(col, torch.tensor(list_int, dtype=torch.int64)) + + batch = formatter.format_batch(arrow_table) + torch.testing.assert_close(batch["col_int"], torch.tensor(list_int, dtype=torch.int64)) + torch.testing.assert_close(batch["col_float"], torch.tensor(list_float, dtype=torch.float32)) + + +def test_iterable_dataset_of_arrays_format_to_arrow(any_arrays_dataset: IterableDataset): + formatted = any_arrays_dataset.with_format("arrow") + assert all(isinstance(example, pa.Table) for example in formatted) + + +def test_iterable_dataset_of_arrays_format_to_numpy(any_arrays_dataset: IterableDataset): + formatted = any_arrays_dataset.with_format("np") + assert all(isinstance(example["array"], np.ndarray) for example in formatted) + + +@require_torch +def test_iterable_dataset_of_arrays_format_to_torch(any_arrays_dataset: IterableDataset): + import torch + + formatted = any_arrays_dataset.with_format("torch") + assert all(isinstance(example["array"], torch.Tensor) for example in formatted) + + +@require_tf +def test_iterable_dataset_of_arrays_format_to_tf(any_arrays_dataset: IterableDataset): + import tensorflow as tf + + formatted = any_arrays_dataset.with_format("tf") + assert all(isinstance(example["array"], tf.Tensor) for example in formatted) + + +@require_jax +def test_iterable_dataset_of_arrays_format_to_jax(any_arrays_dataset: IterableDataset): + import jax.numpy as jnp + + formatted = any_arrays_dataset.with_format("jax") + assert all(isinstance(example["array"], jnp.ndarray) for example in formatted) diff --git a/testbed/huggingface__datasets/tests/test_info_utils.py b/testbed/huggingface__datasets/tests/test_info_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3e71c78eef90bdf3b463eb66be856d8fae74e569 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_info_utils.py @@ -0,0 +1,22 @@ +import pytest + +import datasets.config +from datasets.utils.info_utils import is_small_dataset + + +@pytest.mark.parametrize("dataset_size", [None, 400 * 2**20, 600 * 2**20]) +@pytest.mark.parametrize("input_in_memory_max_size", ["default", 0, 100 * 2**20, 900 * 2**20]) +def test_is_small_dataset(dataset_size, input_in_memory_max_size, monkeypatch): + if input_in_memory_max_size != "default": + monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", input_in_memory_max_size) + in_memory_max_size = datasets.config.IN_MEMORY_MAX_SIZE + if input_in_memory_max_size == "default": + assert in_memory_max_size == 0 + else: + assert in_memory_max_size == input_in_memory_max_size + if dataset_size and in_memory_max_size: + expected = dataset_size < in_memory_max_size + else: + expected = False + result = is_small_dataset(dataset_size) + assert result == expected diff --git a/testbed/huggingface__datasets/tests/test_inspect.py b/testbed/huggingface__datasets/tests/test_inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..9d6aac45ba3f26e6d792b7151e4331aacd5dcc34 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_inspect.py @@ -0,0 +1,126 @@ +import os + +import pytest + +from datasets import ( + get_dataset_config_info, + get_dataset_config_names, + get_dataset_infos, + get_dataset_split_names, + inspect_dataset, + inspect_metric, +) + + +pytestmark = pytest.mark.integration + + +@pytest.mark.parametrize("path", ["paws", "csv"]) +def test_inspect_dataset(path, tmp_path): + inspect_dataset(path, tmp_path) + script_name = path + ".py" + assert script_name in os.listdir(tmp_path) + assert "__pycache__" not in os.listdir(tmp_path) + + +@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning") +@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning") +@pytest.mark.parametrize("path", ["accuracy"]) +def test_inspect_metric(path, tmp_path): + inspect_metric(path, tmp_path) + script_name = path + ".py" + assert script_name in os.listdir(tmp_path) + assert "__pycache__" not in os.listdir(tmp_path) + + +@pytest.mark.parametrize( + "path, config_name, expected_splits", + [ + ("squad", "plain_text", ["train", "validation"]), + ("dalle-mini/wit", "default", ["train"]), + ("paws", "labeled_final", ["train", "test", "validation"]), + ], +) +def test_get_dataset_config_info(path, config_name, expected_splits): + info = get_dataset_config_info(path, config_name=config_name) + assert info.config_name == config_name + assert list(info.splits.keys()) == expected_splits + + +def test_get_dataset_config_info_private(hf_token, hf_private_dataset_repo_txt_data): + info = get_dataset_config_info(hf_private_dataset_repo_txt_data, config_name="default", token=hf_token) + assert list(info.splits.keys()) == ["train"] + + +@pytest.mark.parametrize( + "path, config_name, expected_exception", + [ + ("paws", None, ValueError), + ], +) +def test_get_dataset_config_info_error(path, config_name, expected_exception): + with pytest.raises(expected_exception): + get_dataset_config_info(path, config_name=config_name) + + +@pytest.mark.parametrize( + "path, expected", + [ + ("acronym_identification", ["default"]), + ("squad", ["plain_text"]), + ("hf-internal-testing/dataset_with_script", ["default"]), + ("dalle-mini/wit", ["default"]), + ("hf-internal-testing/librispeech_asr_dummy", ["clean", "other"]), + ("hf-internal-testing/audiofolder_no_configs_in_metadata", ["default"]), + ("hf-internal-testing/audiofolder_single_config_in_metadata", ["custom"]), + ("hf-internal-testing/audiofolder_two_configs_in_metadata", ["v1", "v2"]), + ], +) +def test_get_dataset_config_names(path, expected): + config_names = get_dataset_config_names(path) + assert config_names == expected + + +@pytest.mark.parametrize( + "path, expected_configs, expected_splits_in_first_config", + [ + ("squad", ["plain_text"], ["train", "validation"]), + ("dalle-mini/wit", ["default"], ["train"]), + ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), + ], +) +def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config): + infos = get_dataset_infos(path) + assert list(infos.keys()) == expected_configs + expected_config = expected_configs[0] + assert expected_config in infos + info = infos[expected_config] + assert info.config_name == expected_config + assert list(info.splits.keys()) == expected_splits_in_first_config + + +@pytest.mark.parametrize( + "path, expected_config, expected_splits", + [ + ("squad", "plain_text", ["train", "validation"]), + ("dalle-mini/wit", "default", ["train"]), + ("paws", "labeled_final", ["train", "test", "validation"]), + ], +) +def test_get_dataset_split_names(path, expected_config, expected_splits): + infos = get_dataset_infos(path) + assert expected_config in infos + info = infos[expected_config] + assert info.config_name == expected_config + assert list(info.splits.keys()) == expected_splits + + +@pytest.mark.parametrize( + "path, config_name, expected_exception", + [ + ("paws", None, ValueError), + ], +) +def test_get_dataset_split_names_error(path, config_name, expected_exception): + with pytest.raises(expected_exception): + get_dataset_split_names(path, config_name=config_name) diff --git a/testbed/huggingface__datasets/tests/test_load.py b/testbed/huggingface__datasets/tests/test_load.py new file mode 100644 index 0000000000000000000000000000000000000000..aa5a275dd73cb508a8f1382ed561e277cd00df00 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_load.py @@ -0,0 +1,1488 @@ +import importlib +import os +import pickle +import shutil +import tempfile +import time +from hashlib import sha256 +from multiprocessing import Pool +from pathlib import Path +from unittest import TestCase +from unittest.mock import patch + +import dill +import pyarrow as pa +import pytest +import requests + +import datasets +from datasets import config, load_dataset, load_from_disk +from datasets.arrow_dataset import Dataset +from datasets.arrow_writer import ArrowWriter +from datasets.builder import DatasetBuilder +from datasets.config import METADATA_CONFIGS_FIELD +from datasets.data_files import DataFilesDict +from datasets.dataset_dict import DatasetDict, IterableDatasetDict +from datasets.download.download_config import DownloadConfig +from datasets.exceptions import DatasetNotFoundError +from datasets.features import Features, Value +from datasets.iterable_dataset import IterableDataset +from datasets.load import ( + CachedDatasetModuleFactory, + CachedMetricModuleFactory, + GithubMetricModuleFactory, + HubDatasetModuleFactoryWithoutScript, + HubDatasetModuleFactoryWithScript, + LocalDatasetModuleFactoryWithoutScript, + LocalDatasetModuleFactoryWithScript, + LocalMetricModuleFactory, + PackagedDatasetModuleFactory, + infer_module_for_data_files_list, + infer_module_for_data_files_list_in_archives, + load_dataset_builder, +) +from datasets.packaged_modules.audiofolder.audiofolder import AudioFolder, AudioFolderConfig +from datasets.packaged_modules.imagefolder.imagefolder import ImageFolder, ImageFolderConfig +from datasets.utils.logging import INFO, get_logger + +from .utils import ( + OfflineSimulationMode, + assert_arrow_memory_doesnt_increase, + assert_arrow_memory_increases, + offline, + require_pil, + require_sndfile, + set_current_working_directory_to_temp_dir, +) + + +DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__" + +DATASET_LOADING_SCRIPT_CODE = """ +import os + +import datasets +from datasets import DatasetInfo, Features, Split, SplitGenerator, Value + + +class __DummyDataset1__(datasets.GeneratorBasedBuilder): + + def _info(self) -> DatasetInfo: + return DatasetInfo(features=Features({"text": Value("string")})) + + def _split_generators(self, dl_manager): + return [ + SplitGenerator(Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_manager.manual_dir, "train.txt")}), + SplitGenerator(Split.TEST, gen_kwargs={"filepath": os.path.join(dl_manager.manual_dir, "test.txt")}), + ] + + def _generate_examples(self, filepath, **kwargs): + with open(filepath, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + yield i, {"text": line.strip()} +""" + +SAMPLE_DATASET_IDENTIFIER = "hf-internal-testing/dataset_with_script" # has dataset script +SAMPLE_DATASET_IDENTIFIER2 = "hf-internal-testing/dataset_with_data_files" # only has data files +SAMPLE_DATASET_IDENTIFIER3 = "hf-internal-testing/multi_dir_dataset" # has multiple data directories +SAMPLE_DATASET_IDENTIFIER4 = "hf-internal-testing/imagefolder_with_metadata" # imagefolder with a metadata file outside of the train/test directories +SAMPLE_DATASET_IDENTIFIER5 = "hf-internal-testing/imagefolder_with_metadata_no_splits" # imagefolder with a metadata file and no default split names in data files +SAMPLE_NOT_EXISTING_DATASET_IDENTIFIER = "hf-internal-testing/_dummy" +SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST = "_dummy" +SAMPLE_DATASET_NO_CONFIGS_IN_METADATA = "hf-internal-testing/audiofolder_no_configs_in_metadata" +SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_single_config_in_metadata" +SAMPLE_DATASET_TWO_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_two_configs_in_metadata" +SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT = ( + "hf-internal-testing/audiofolder_two_configs_in_metadata_with_default" +) + + +METRIC_LOADING_SCRIPT_NAME = "__dummy_metric1__" + +METRIC_LOADING_SCRIPT_CODE = """ +import datasets +from datasets import MetricInfo, Features, Value + + +class __DummyMetric1__(datasets.Metric): + + def _info(self): + return MetricInfo(features=Features({"predictions": Value("int"), "references": Value("int")})) + + def _compute(self, predictions, references): + return {"__dummy_metric1__": sum(int(p == r) for p, r in zip(predictions, references))} +""" + + +@pytest.fixture +def data_dir(tmp_path): + data_dir = tmp_path / "data_dir" + data_dir.mkdir() + with open(data_dir / "train.txt", "w") as f: + f.write("foo\n" * 10) + with open(data_dir / "test.txt", "w") as f: + f.write("bar\n" * 10) + return str(data_dir) + + +@pytest.fixture +def data_dir_with_arrow(tmp_path): + data_dir = tmp_path / "data_dir" + data_dir.mkdir() + output_train = os.path.join(data_dir, "train.arrow") + with ArrowWriter(path=output_train) as writer: + writer.write_table(pa.Table.from_pydict({"col_1": ["foo"] * 10})) + num_examples, num_bytes = writer.finalize() + assert num_examples == 10 + assert num_bytes > 0 + output_test = os.path.join(data_dir, "test.arrow") + with ArrowWriter(path=output_test) as writer: + writer.write_table(pa.Table.from_pydict({"col_1": ["bar"] * 10})) + num_examples, num_bytes = writer.finalize() + assert num_examples == 10 + assert num_bytes > 0 + return str(data_dir) + + +@pytest.fixture +def data_dir_with_metadata(tmp_path): + data_dir = tmp_path / "data_dir_with_metadata" + data_dir.mkdir() + with open(data_dir / "train.jpg", "wb") as f: + f.write(b"train_image_bytes") + with open(data_dir / "test.jpg", "wb") as f: + f.write(b"test_image_bytes") + with open(data_dir / "metadata.jsonl", "w") as f: + f.write( + """\ + {"file_name": "train.jpg", "caption": "Cool tran image"} + {"file_name": "test.jpg", "caption": "Cool test image"} + """ + ) + return str(data_dir) + + +@pytest.fixture +def data_dir_with_single_config_in_metadata(tmp_path): + data_dir = tmp_path / "data_dir_with_one_default_config_in_metadata" + + cats_data_dir = data_dir / "cats" + cats_data_dir.mkdir(parents=True) + dogs_data_dir = data_dir / "dogs" + dogs_data_dir.mkdir(parents=True) + + with open(cats_data_dir / "cat.jpg", "wb") as f: + f.write(b"this_is_a_cat_image_bytes") + with open(dogs_data_dir / "dog.jpg", "wb") as f: + f.write(b"this_is_a_dog_image_bytes") + with open(data_dir / "README.md", "w") as f: + f.write( + f"""\ +--- +{METADATA_CONFIGS_FIELD}: + - config_name: custom + drop_labels: true +--- + """ + ) + return str(data_dir) + + +@pytest.fixture +def data_dir_with_two_config_in_metadata(tmp_path): + data_dir = tmp_path / "data_dir_with_two_configs_in_metadata" + cats_data_dir = data_dir / "cats" + cats_data_dir.mkdir(parents=True) + dogs_data_dir = data_dir / "dogs" + dogs_data_dir.mkdir(parents=True) + + with open(cats_data_dir / "cat.jpg", "wb") as f: + f.write(b"this_is_a_cat_image_bytes") + with open(dogs_data_dir / "dog.jpg", "wb") as f: + f.write(b"this_is_a_dog_image_bytes") + + with open(data_dir / "README.md", "w") as f: + f.write( + f"""\ +--- +{METADATA_CONFIGS_FIELD}: + - config_name: "v1" + drop_labels: true + default: true + - config_name: "v2" + drop_labels: false +--- + """ + ) + return str(data_dir) + + +@pytest.fixture +def data_dir_with_data_dir_configs_in_metadata(tmp_path): + data_dir = tmp_path / "data_dir_with_two_configs_in_metadata" + cats_data_dir = data_dir / "cats" + cats_data_dir.mkdir(parents=True) + dogs_data_dir = data_dir / "dogs" + dogs_data_dir.mkdir(parents=True) + + with open(cats_data_dir / "cat.jpg", "wb") as f: + f.write(b"this_is_a_cat_image_bytes") + with open(dogs_data_dir / "dog.jpg", "wb") as f: + f.write(b"this_is_a_dog_image_bytes") + + +@pytest.fixture +def sub_data_dirs(tmp_path): + data_dir2 = tmp_path / "data_dir2" + relative_subdir1 = "subdir1" + sub_data_dir1 = data_dir2 / relative_subdir1 + sub_data_dir1.mkdir(parents=True) + with open(sub_data_dir1 / "train.txt", "w") as f: + f.write("foo\n" * 10) + with open(sub_data_dir1 / "test.txt", "w") as f: + f.write("bar\n" * 10) + + relative_subdir2 = "subdir2" + sub_data_dir2 = tmp_path / data_dir2 / relative_subdir2 + sub_data_dir2.mkdir(parents=True) + with open(sub_data_dir2 / "train.txt", "w") as f: + f.write("foo\n" * 10) + with open(sub_data_dir2 / "test.txt", "w") as f: + f.write("bar\n" * 10) + + return str(data_dir2), relative_subdir1 + + +@pytest.fixture +def complex_data_dir(tmp_path): + data_dir = tmp_path / "complex_data_dir" + data_dir.mkdir() + (data_dir / "data").mkdir() + with open(data_dir / "data" / "train.txt", "w") as f: + f.write("foo\n" * 10) + with open(data_dir / "data" / "test.txt", "w") as f: + f.write("bar\n" * 10) + with open(data_dir / "README.md", "w") as f: + f.write("This is a readme") + with open(data_dir / ".dummy", "w") as f: + f.write("this is a dummy file that is not a data file") + return str(data_dir) + + +@pytest.fixture +def dataset_loading_script_dir(tmp_path): + script_name = DATASET_LOADING_SCRIPT_NAME + script_dir = tmp_path / script_name + script_dir.mkdir() + script_path = script_dir / f"{script_name}.py" + with open(script_path, "w") as f: + f.write(DATASET_LOADING_SCRIPT_CODE) + return str(script_dir) + + +@pytest.fixture +def dataset_loading_script_dir_readonly(tmp_path): + script_name = DATASET_LOADING_SCRIPT_NAME + script_dir = tmp_path / "readonly" / script_name + script_dir.mkdir(parents=True) + script_path = script_dir / f"{script_name}.py" + with open(script_path, "w") as f: + f.write(DATASET_LOADING_SCRIPT_CODE) + dataset_loading_script_dir = str(script_dir) + # Make this directory readonly + os.chmod(dataset_loading_script_dir, 0o555) + os.chmod(os.path.join(dataset_loading_script_dir, f"{script_name}.py"), 0o555) + return dataset_loading_script_dir + + +@pytest.fixture +def metric_loading_script_dir(tmp_path): + script_name = METRIC_LOADING_SCRIPT_NAME + script_dir = tmp_path / script_name + script_dir.mkdir() + script_path = script_dir / f"{script_name}.py" + with open(script_path, "w") as f: + f.write(METRIC_LOADING_SCRIPT_CODE) + return str(script_dir) + + +@pytest.mark.parametrize( + "data_files, expected_module, expected_builder_kwargs", + [ + (["train.csv"], "csv", {}), + (["train.tsv"], "csv", {"sep": "\t"}), + (["train.json"], "json", {}), + (["train.jsonl"], "json", {}), + (["train.parquet"], "parquet", {}), + (["train.arrow"], "arrow", {}), + (["train.txt"], "text", {}), + (["uppercase.TXT"], "text", {}), + (["unsupported.ext"], None, {}), + ([""], None, {}), + ], +) +def test_infer_module_for_data_files(data_files, expected_module, expected_builder_kwargs): + module, builder_kwargs = infer_module_for_data_files_list(data_files) + assert module == expected_module + assert builder_kwargs == expected_builder_kwargs + + +@pytest.mark.parametrize( + "data_file, expected_module", + [ + ("zip_csv_path", "csv"), + ("zip_csv_with_dir_path", "csv"), + ("zip_uppercase_csv_path", "csv"), + ("zip_unsupported_ext_path", None), + ], +) +def test_infer_module_for_data_files_in_archives( + data_file, expected_module, zip_csv_path, zip_csv_with_dir_path, zip_uppercase_csv_path, zip_unsupported_ext_path +): + data_file_paths = { + "zip_csv_path": zip_csv_path, + "zip_csv_with_dir_path": zip_csv_with_dir_path, + "zip_uppercase_csv_path": zip_uppercase_csv_path, + "zip_unsupported_ext_path": zip_unsupported_ext_path, + } + data_files = [str(data_file_paths[data_file])] + inferred_module, _ = infer_module_for_data_files_list_in_archives(data_files) + assert inferred_module == expected_module + + +class ModuleFactoryTest(TestCase): + @pytest.fixture(autouse=True) + def inject_fixtures( + self, + jsonl_path, + data_dir, + data_dir_with_metadata, + data_dir_with_single_config_in_metadata, + data_dir_with_two_config_in_metadata, + sub_data_dirs, + dataset_loading_script_dir, + metric_loading_script_dir, + ): + self._jsonl_path = jsonl_path + self._data_dir = data_dir + self._data_dir_with_metadata = data_dir_with_metadata + self._data_dir_with_single_config_in_metadata = data_dir_with_single_config_in_metadata + self._data_dir_with_two_config_in_metadata = data_dir_with_two_config_in_metadata + self._data_dir2 = sub_data_dirs[0] + self._sub_data_dir = sub_data_dirs[1] + self._dataset_loading_script_dir = dataset_loading_script_dir + self._metric_loading_script_dir = metric_loading_script_dir + + def setUp(self): + self.hf_modules_cache = tempfile.mkdtemp() + self.cache_dir = tempfile.mkdtemp() + self.download_config = DownloadConfig(cache_dir=self.cache_dir) + self.dynamic_modules_path = datasets.load.init_dynamic_modules( + name="test_datasets_modules_" + os.path.basename(self.hf_modules_cache), + hf_modules_cache=self.hf_modules_cache, + ) + + def test_HubDatasetModuleFactoryWithScript_with_github_dataset(self): + # "wmt_t2t" has additional imports (internal) + factory = HubDatasetModuleFactoryWithScript( + "wmt_t2t", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) + + def test_GithubMetricModuleFactory_with_internal_import(self): + # "squad_v2" requires additional imports (internal) + factory = GithubMetricModuleFactory( + "squad_v2", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + + @pytest.mark.filterwarnings("ignore:GithubMetricModuleFactory is deprecated:FutureWarning") + def test_GithubMetricModuleFactory_with_external_import(self): + # "bleu" requires additional imports (external from github) + factory = GithubMetricModuleFactory( + "bleu", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + + def test_LocalMetricModuleFactory(self): + path = os.path.join(self._metric_loading_script_dir, f"{METRIC_LOADING_SCRIPT_NAME}.py") + factory = LocalMetricModuleFactory( + path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + + def test_LocalDatasetModuleFactoryWithScript(self): + path = os.path.join(self._dataset_loading_script_dir, f"{DATASET_LOADING_SCRIPT_NAME}.py") + factory = LocalDatasetModuleFactoryWithScript( + path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + assert os.path.isdir(module_factory_result.builder_kwargs["base_path"]) + + def test_LocalDatasetModuleFactoryWithoutScript(self): + factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + assert os.path.isdir(module_factory_result.builder_kwargs["base_path"]) + + def test_LocalDatasetModuleFactoryWithoutScript_with_data_dir(self): + factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir2, data_dir=self._sub_data_dir) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + assert ( + module_factory_result.builder_kwargs["data_files"] is not None + and len(module_factory_result.builder_kwargs["data_files"]["train"]) == 1 + and len(module_factory_result.builder_kwargs["data_files"]["test"]) == 1 + ) + assert all( + self._sub_data_dir in Path(data_file).parts + for data_file in module_factory_result.builder_kwargs["data_files"]["train"] + + module_factory_result.builder_kwargs["data_files"]["test"] + ) + + def test_LocalDatasetModuleFactoryWithoutScript_with_metadata(self): + factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir_with_metadata) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + assert ( + module_factory_result.builder_kwargs["data_files"] is not None + and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0 + and len(module_factory_result.builder_kwargs["data_files"]["test"]) > 0 + ) + assert any( + Path(data_file).name == "metadata.jsonl" + for data_file in module_factory_result.builder_kwargs["data_files"]["train"] + ) + assert any( + Path(data_file).name == "metadata.jsonl" + for data_file in module_factory_result.builder_kwargs["data_files"]["test"] + ) + + def test_LocalDatasetModuleFactoryWithoutScript_with_single_config_in_metadata(self): + factory = LocalDatasetModuleFactoryWithoutScript( + self._data_dir_with_single_config_in_metadata, + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + + module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs + assert module_metadata_configs is not None + assert len(module_metadata_configs) == 1 + assert next(iter(module_metadata_configs)) == "custom" + assert "drop_labels" in next(iter(module_metadata_configs.values())) + assert next(iter(module_metadata_configs.values()))["drop_labels"] is True + + module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs + assert module_builder_configs is not None + assert len(module_builder_configs) == 1 + assert isinstance(module_builder_configs[0], ImageFolderConfig) + assert module_builder_configs[0].name == "custom" + assert module_builder_configs[0].data_files is not None + assert isinstance(module_builder_configs[0].data_files, DataFilesDict) + assert len(module_builder_configs[0].data_files) == 1 # one train split + assert len(module_builder_configs[0].data_files["train"]) == 2 # two files + assert module_builder_configs[0].drop_labels is True # parameter is passed from metadata + + # config named "default" is automatically considered to be a default config + assert module_factory_result.builder_configs_parameters.default_config_name is None + + # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly + assert "drop_labels" not in module_factory_result.builder_kwargs + + def test_LocalDatasetModuleFactoryWithoutScript_with_two_configs_in_metadata(self): + factory = LocalDatasetModuleFactoryWithoutScript( + self._data_dir_with_two_config_in_metadata, + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + + module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs + assert module_metadata_configs is not None + assert len(module_metadata_configs) == 2 + assert list(module_metadata_configs) == ["v1", "v2"] + assert "drop_labels" in module_metadata_configs["v1"] + assert module_metadata_configs["v1"]["drop_labels"] is True + assert "drop_labels" in module_metadata_configs["v2"] + assert module_metadata_configs["v2"]["drop_labels"] is False + + module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs + assert module_builder_configs is not None + assert len(module_builder_configs) == 2 + module_builder_config_v1, module_builder_config_v2 = module_builder_configs + assert module_builder_config_v1.name == "v1" + assert module_builder_config_v2.name == "v2" + assert isinstance(module_builder_config_v1, ImageFolderConfig) + assert isinstance(module_builder_config_v2, ImageFolderConfig) + assert isinstance(module_builder_config_v1.data_files, DataFilesDict) + assert isinstance(module_builder_config_v2.data_files, DataFilesDict) + assert sorted(module_builder_config_v1.data_files) == ["train"] + assert len(module_builder_config_v1.data_files["train"]) == 2 + assert sorted(module_builder_config_v2.data_files) == ["train"] + assert len(module_builder_config_v2.data_files["train"]) == 2 + assert module_builder_config_v1.drop_labels is True # parameter is passed from metadata + assert module_builder_config_v2.drop_labels is False # parameter is passed from metadata + + assert ( + module_factory_result.builder_configs_parameters.default_config_name == "v1" + ) # it's marked as a default one in yaml + + # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly + assert "drop_labels" not in module_factory_result.builder_kwargs + + def test_PackagedDatasetModuleFactory(self): + factory = PackagedDatasetModuleFactory( + "json", data_files=self._jsonl_path, download_config=self.download_config + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + + def test_PackagedDatasetModuleFactory_with_data_dir(self): + factory = PackagedDatasetModuleFactory("json", data_dir=self._data_dir, download_config=self.download_config) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + assert ( + module_factory_result.builder_kwargs["data_files"] is not None + and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0 + and len(module_factory_result.builder_kwargs["data_files"]["test"]) > 0 + ) + assert Path(module_factory_result.builder_kwargs["data_files"]["train"][0]).parent.samefile(self._data_dir) + assert Path(module_factory_result.builder_kwargs["data_files"]["test"][0]).parent.samefile(self._data_dir) + + def test_PackagedDatasetModuleFactory_with_data_dir_and_metadata(self): + factory = PackagedDatasetModuleFactory( + "imagefolder", data_dir=self._data_dir_with_metadata, download_config=self.download_config + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + assert ( + module_factory_result.builder_kwargs["data_files"] is not None + and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0 + and len(module_factory_result.builder_kwargs["data_files"]["test"]) > 0 + ) + assert Path(module_factory_result.builder_kwargs["data_files"]["train"][0]).parent.samefile( + self._data_dir_with_metadata + ) + assert Path(module_factory_result.builder_kwargs["data_files"]["test"][0]).parent.samefile( + self._data_dir_with_metadata + ) + assert any( + Path(data_file).name == "metadata.jsonl" + for data_file in module_factory_result.builder_kwargs["data_files"]["train"] + ) + assert any( + Path(data_file).name == "metadata.jsonl" + for data_file in module_factory_result.builder_kwargs["data_files"]["test"] + ) + + @pytest.mark.integration + def test_HubDatasetModuleFactoryWithoutScript(self): + factory = HubDatasetModuleFactoryWithoutScript( + SAMPLE_DATASET_IDENTIFIER2, download_config=self.download_config + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) + + @pytest.mark.integration + def test_HubDatasetModuleFactoryWithoutScript_with_data_dir(self): + data_dir = "data2" + factory = HubDatasetModuleFactoryWithoutScript( + SAMPLE_DATASET_IDENTIFIER3, data_dir=data_dir, download_config=self.download_config + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) + assert ( + module_factory_result.builder_kwargs["data_files"] is not None + and len(module_factory_result.builder_kwargs["data_files"]["train"]) == 1 + and len(module_factory_result.builder_kwargs["data_files"]["test"]) == 1 + ) + assert all( + data_dir in Path(data_file).parts + for data_file in module_factory_result.builder_kwargs["data_files"]["train"] + + module_factory_result.builder_kwargs["data_files"]["test"] + ) + + @pytest.mark.integration + def test_HubDatasetModuleFactoryWithoutScript_with_metadata(self): + factory = HubDatasetModuleFactoryWithoutScript( + SAMPLE_DATASET_IDENTIFIER4, download_config=self.download_config + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) + assert ( + module_factory_result.builder_kwargs["data_files"] is not None + and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0 + and len(module_factory_result.builder_kwargs["data_files"]["test"]) > 0 + ) + assert any( + Path(data_file).name == "metadata.jsonl" + for data_file in module_factory_result.builder_kwargs["data_files"]["train"] + ) + assert any( + Path(data_file).name == "metadata.jsonl" + for data_file in module_factory_result.builder_kwargs["data_files"]["test"] + ) + + factory = HubDatasetModuleFactoryWithoutScript( + SAMPLE_DATASET_IDENTIFIER5, download_config=self.download_config + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) + assert ( + module_factory_result.builder_kwargs["data_files"] is not None + and len(module_factory_result.builder_kwargs["data_files"]) == 1 + and len(module_factory_result.builder_kwargs["data_files"]["train"]) > 0 + ) + assert any( + Path(data_file).name == "metadata.jsonl" + for data_file in module_factory_result.builder_kwargs["data_files"]["train"] + ) + + @pytest.mark.integration + def test_HubDatasetModuleFactoryWithoutScript_with_one_default_config_in_metadata(self): + factory = HubDatasetModuleFactoryWithoutScript( + SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, + download_config=self.download_config, + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) + + module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs + assert module_metadata_configs is not None + assert len(module_metadata_configs) == 1 + assert next(iter(module_metadata_configs)) == "custom" + assert "drop_labels" in next(iter(module_metadata_configs.values())) + assert next(iter(module_metadata_configs.values()))["drop_labels"] is True + + module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs + assert module_builder_configs is not None + assert len(module_builder_configs) == 1 + assert isinstance(module_builder_configs[0], AudioFolderConfig) + assert module_builder_configs[0].name == "custom" + assert module_builder_configs[0].data_files is not None + assert isinstance(module_builder_configs[0].data_files, DataFilesDict) + assert sorted(module_builder_configs[0].data_files) == ["test", "train"] + assert len(module_builder_configs[0].data_files["train"]) == 3 + assert len(module_builder_configs[0].data_files["test"]) == 3 + assert module_builder_configs[0].drop_labels is True # parameter is passed from metadata + + # config named "default" is automatically considered to be a default config + assert module_factory_result.builder_configs_parameters.default_config_name is None + + # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly + assert "drop_labels" not in module_factory_result.builder_kwargs + + @pytest.mark.integration + def test_HubDatasetModuleFactoryWithoutScript_with_two_configs_in_metadata(self): + datasets_names = [SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT] + for dataset_name in datasets_names: + factory = HubDatasetModuleFactoryWithoutScript(dataset_name, download_config=self.download_config) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + + module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs + assert module_metadata_configs is not None + assert len(module_metadata_configs) == 2 + assert list(module_metadata_configs) == ["v1", "v2"] + assert "drop_labels" in module_metadata_configs["v1"] + assert module_metadata_configs["v1"]["drop_labels"] is True + assert "drop_labels" in module_metadata_configs["v2"] + assert module_metadata_configs["v2"]["drop_labels"] is False + + module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs + assert module_builder_configs is not None + assert len(module_builder_configs) == 2 + module_builder_config_v1, module_builder_config_v2 = module_builder_configs + assert module_builder_config_v1.name == "v1" + assert module_builder_config_v2.name == "v2" + assert isinstance(module_builder_config_v1, AudioFolderConfig) + assert isinstance(module_builder_config_v2, AudioFolderConfig) + assert isinstance(module_builder_config_v1.data_files, DataFilesDict) + assert isinstance(module_builder_config_v2.data_files, DataFilesDict) + assert sorted(module_builder_config_v1.data_files) == ["test", "train"] + assert len(module_builder_config_v1.data_files["train"]) == 3 + assert len(module_builder_config_v1.data_files["test"]) == 3 + assert sorted(module_builder_config_v2.data_files) == ["test", "train"] + assert len(module_builder_config_v2.data_files["train"]) == 2 + assert len(module_builder_config_v2.data_files["test"]) == 1 + assert module_builder_config_v1.drop_labels is True # parameter is passed from metadata + assert module_builder_config_v2.drop_labels is False # parameter is passed from metadata + # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly + assert "drop_labels" not in module_factory_result.builder_kwargs + + if dataset_name == SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT: + assert module_factory_result.builder_configs_parameters.default_config_name == "v1" + else: + assert module_factory_result.builder_configs_parameters.default_config_name is None + + @pytest.mark.integration + def test_HubDatasetModuleFactoryWithScript(self): + factory = HubDatasetModuleFactoryWithScript( + SAMPLE_DATASET_IDENTIFIER, + download_config=self.download_config, + dynamic_modules_path=self.dynamic_modules_path, + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) + + def test_CachedDatasetModuleFactory(self): + path = os.path.join(self._dataset_loading_script_dir, f"{DATASET_LOADING_SCRIPT_NAME}.py") + factory = LocalDatasetModuleFactoryWithScript( + path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path + ) + module_factory_result = factory.get_module() + for offline_mode in OfflineSimulationMode: + with offline(offline_mode): + factory = CachedDatasetModuleFactory( + DATASET_LOADING_SCRIPT_NAME, + dynamic_modules_path=self.dynamic_modules_path, + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + + @pytest.mark.filterwarnings("ignore:LocalMetricModuleFactory is deprecated:FutureWarning") + @pytest.mark.filterwarnings("ignore:CachedMetricModuleFactory is deprecated:FutureWarning") + def test_CachedMetricModuleFactory(self): + path = os.path.join(self._metric_loading_script_dir, f"{METRIC_LOADING_SCRIPT_NAME}.py") + factory = LocalMetricModuleFactory( + path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path + ) + module_factory_result = factory.get_module() + for offline_mode in OfflineSimulationMode: + with offline(offline_mode): + factory = CachedMetricModuleFactory( + METRIC_LOADING_SCRIPT_NAME, + dynamic_modules_path=self.dynamic_modules_path, + ) + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + + +@pytest.mark.parametrize( + "factory_class", + [ + CachedDatasetModuleFactory, + CachedMetricModuleFactory, + GithubMetricModuleFactory, + HubDatasetModuleFactoryWithoutScript, + HubDatasetModuleFactoryWithScript, + LocalDatasetModuleFactoryWithoutScript, + LocalDatasetModuleFactoryWithScript, + LocalMetricModuleFactory, + PackagedDatasetModuleFactory, + ], +) +def test_module_factories(factory_class): + name = "dummy_name" + factory = factory_class(name) + assert factory.name == name + + +@pytest.mark.integration +class LoadTest(TestCase): + @pytest.fixture(autouse=True) + def inject_fixtures(self, caplog): + self._caplog = caplog + + def setUp(self): + self.hf_modules_cache = tempfile.mkdtemp() + self.dynamic_modules_path = datasets.load.init_dynamic_modules( + name="test_datasets_modules2", hf_modules_cache=self.hf_modules_cache + ) + + def tearDown(self): + shutil.rmtree(self.hf_modules_cache) + + def _dummy_module_dir(self, modules_dir, dummy_module_name, dummy_code): + assert dummy_module_name.startswith("__") + module_dir = os.path.join(modules_dir, dummy_module_name) + os.makedirs(module_dir, exist_ok=True) + module_path = os.path.join(module_dir, dummy_module_name + ".py") + with open(module_path, "w") as f: + f.write(dummy_code) + return module_dir + + def test_dataset_module_factory(self): + with tempfile.TemporaryDirectory() as tmp_dir: + # prepare module from directory path + dummy_code = "MY_DUMMY_VARIABLE = 'hello there'" + module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name1__", dummy_code) + dataset_module = datasets.load.dataset_module_factory( + module_dir, dynamic_modules_path=self.dynamic_modules_path + ) + dummy_module = importlib.import_module(dataset_module.module_path) + self.assertEqual(dummy_module.MY_DUMMY_VARIABLE, "hello there") + self.assertEqual(dataset_module.hash, sha256(dummy_code.encode("utf-8")).hexdigest()) + # prepare module from file path + check resolved_file_path + dummy_code = "MY_DUMMY_VARIABLE = 'general kenobi'" + module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name1__", dummy_code) + module_path = os.path.join(module_dir, "__dummy_module_name1__.py") + dataset_module = datasets.load.dataset_module_factory( + module_path, dynamic_modules_path=self.dynamic_modules_path + ) + dummy_module = importlib.import_module(dataset_module.module_path) + self.assertEqual(dummy_module.MY_DUMMY_VARIABLE, "general kenobi") + self.assertEqual(dataset_module.hash, sha256(dummy_code.encode("utf-8")).hexdigest()) + # missing module + for offline_simulation_mode in list(OfflineSimulationMode): + with offline(offline_simulation_mode): + with self.assertRaises( + (DatasetNotFoundError, ConnectionError, requests.exceptions.ConnectionError) + ): + datasets.load.dataset_module_factory( + "__missing_dummy_module_name__", dynamic_modules_path=self.dynamic_modules_path + ) + + def test_offline_dataset_module_factory(self): + with tempfile.TemporaryDirectory() as tmp_dir: + dummy_code = "MY_DUMMY_VARIABLE = 'hello there'" + module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name2__", dummy_code) + dataset_module_1 = datasets.load.dataset_module_factory( + module_dir, dynamic_modules_path=self.dynamic_modules_path + ) + time.sleep(0.1) # make sure there's a difference in the OS update time of the python file + dummy_code = "MY_DUMMY_VARIABLE = 'general kenobi'" + module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name2__", dummy_code) + dataset_module_2 = datasets.load.dataset_module_factory( + module_dir, dynamic_modules_path=self.dynamic_modules_path + ) + for offline_simulation_mode in list(OfflineSimulationMode): + with offline(offline_simulation_mode): + self._caplog.clear() + # allow provide the module name without an explicit path to remote or local actual file + dataset_module_3 = datasets.load.dataset_module_factory( + "__dummy_module_name2__", dynamic_modules_path=self.dynamic_modules_path + ) + # it loads the most recent version of the module + self.assertEqual(dataset_module_2.module_path, dataset_module_3.module_path) + self.assertNotEqual(dataset_module_1.module_path, dataset_module_3.module_path) + self.assertIn("Using the latest cached version of the module", self._caplog.text) + + def test_load_dataset_from_hub(self): + with self.assertRaises(DatasetNotFoundError) as context: + datasets.load_dataset("_dummy") + self.assertIn( + "Dataset '_dummy' doesn't exist on the Hub", + str(context.exception), + ) + with self.assertRaises(DatasetNotFoundError) as context: + datasets.load_dataset("_dummy", revision="0.0.0") + self.assertIn( + "Dataset '_dummy' doesn't exist on the Hub", + str(context.exception), + ) + self.assertIn( + "at revision '0.0.0'", + str(context.exception), + ) + for offline_simulation_mode in list(OfflineSimulationMode): + with offline(offline_simulation_mode): + with self.assertRaises(ConnectionError) as context: + datasets.load_dataset("_dummy") + if offline_simulation_mode != OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: + self.assertIn( + "Couldn't reach '_dummy' on the Hub", + str(context.exception), + ) + + def test_load_dataset_namespace(self): + with self.assertRaises(DatasetNotFoundError) as context: + datasets.load_dataset("hf-internal-testing/_dummy") + self.assertIn( + "hf-internal-testing/_dummy", + str(context.exception), + ) + for offline_simulation_mode in list(OfflineSimulationMode): + with offline(offline_simulation_mode): + with self.assertRaises(ConnectionError) as context: + datasets.load_dataset("hf-internal-testing/_dummy") + self.assertIn("hf-internal-testing/_dummy", str(context.exception), msg=offline_simulation_mode) + + +@pytest.mark.integration +def test_load_dataset_builder_with_metadata(): + builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4) + assert isinstance(builder, ImageFolder) + assert builder.config.name == "default" + assert builder.config.data_files is not None + assert builder.config.drop_metadata is None + builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4, "non-existing-config") + assert isinstance(builder, ImageFolder) + assert builder.config.name == "non-existing-config" + + +@pytest.mark.integration +def test_load_dataset_builder_config_kwargs_passed_as_arguments(): + builder_default = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4) + builder_custom = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True) + assert builder_custom.config.drop_metadata != builder_default.config.drop_metadata + assert builder_custom.config.drop_metadata is True + + +@pytest.mark.integration +def test_load_dataset_builder_with_two_configs_in_metadata(): + builder = datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") + assert isinstance(builder, AudioFolder) + assert builder.config.name == "v1" + assert builder.config.data_files is not None + with pytest.raises(ValueError): + datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA) + with pytest.raises(ValueError): + datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "non-existing-config") + + +@pytest.mark.parametrize("serializer", [pickle, dill]) +def test_load_dataset_builder_with_metadata_configs_pickable(serializer): + builder = datasets.load_dataset_builder(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA) + builder_unpickled = serializer.loads(serializer.dumps(builder)) + assert builder.BUILDER_CONFIGS == builder_unpickled.BUILDER_CONFIGS + assert list(builder_unpickled.builder_configs) == ["custom"] + assert isinstance(builder_unpickled.builder_configs["custom"], AudioFolderConfig) + + builder2 = datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") + builder2_unpickled = serializer.loads(serializer.dumps(builder2)) + assert builder2.BUILDER_CONFIGS == builder2_unpickled.BUILDER_CONFIGS != builder_unpickled.BUILDER_CONFIGS + assert list(builder2_unpickled.builder_configs) == ["v1", "v2"] + assert isinstance(builder2_unpickled.builder_configs["v1"], AudioFolderConfig) + assert isinstance(builder2_unpickled.builder_configs["v2"], AudioFolderConfig) + + +def test_load_dataset_builder_for_absolute_script_dir(dataset_loading_script_dir, data_dir): + builder = datasets.load_dataset_builder(dataset_loading_script_dir, data_dir=data_dir) + assert isinstance(builder, DatasetBuilder) + assert builder.name == DATASET_LOADING_SCRIPT_NAME + assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME + assert builder.info.features == Features({"text": Value("string")}) + + +def test_load_dataset_builder_for_relative_script_dir(dataset_loading_script_dir, data_dir): + with set_current_working_directory_to_temp_dir(): + relative_script_dir = DATASET_LOADING_SCRIPT_NAME + shutil.copytree(dataset_loading_script_dir, relative_script_dir) + builder = datasets.load_dataset_builder(relative_script_dir, data_dir=data_dir) + assert isinstance(builder, DatasetBuilder) + assert builder.name == DATASET_LOADING_SCRIPT_NAME + assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME + assert builder.info.features == Features({"text": Value("string")}) + + +def test_load_dataset_builder_for_script_path(dataset_loading_script_dir, data_dir): + builder = datasets.load_dataset_builder( + os.path.join(dataset_loading_script_dir, DATASET_LOADING_SCRIPT_NAME + ".py"), data_dir=data_dir + ) + assert isinstance(builder, DatasetBuilder) + assert builder.name == DATASET_LOADING_SCRIPT_NAME + assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME + assert builder.info.features == Features({"text": Value("string")}) + + +def test_load_dataset_builder_for_absolute_data_dir(complex_data_dir): + builder = datasets.load_dataset_builder(complex_data_dir) + assert isinstance(builder, DatasetBuilder) + assert builder.name == "text" + assert builder.dataset_name == Path(complex_data_dir).name + assert builder.config.name == "default" + assert isinstance(builder.config.data_files, DataFilesDict) + assert len(builder.config.data_files["train"]) > 0 + assert len(builder.config.data_files["test"]) > 0 + + +def test_load_dataset_builder_for_relative_data_dir(complex_data_dir): + with set_current_working_directory_to_temp_dir(): + relative_data_dir = "relative_data_dir" + shutil.copytree(complex_data_dir, relative_data_dir) + builder = datasets.load_dataset_builder(relative_data_dir) + assert isinstance(builder, DatasetBuilder) + assert builder.name == "text" + assert builder.dataset_name == relative_data_dir + assert builder.config.name == "default" + assert isinstance(builder.config.data_files, DataFilesDict) + assert len(builder.config.data_files["train"]) > 0 + assert len(builder.config.data_files["test"]) > 0 + + +@pytest.mark.integration +def test_load_dataset_builder_for_community_dataset_with_script(): + builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER) + assert isinstance(builder, DatasetBuilder) + assert builder.name == SAMPLE_DATASET_IDENTIFIER.split("/")[-1] + assert builder.dataset_name == SAMPLE_DATASET_IDENTIFIER.split("/")[-1] + assert builder.config.name == "default" + assert builder.info.features == Features({"text": Value("string")}) + namespace = SAMPLE_DATASET_IDENTIFIER[: SAMPLE_DATASET_IDENTIFIER.index("/")] + assert builder._relative_data_dir().startswith(namespace) + assert SAMPLE_DATASET_IDENTIFIER.replace("/", "--") in builder.__module__ + + +@pytest.mark.integration +def test_load_dataset_builder_for_community_dataset_without_script(): + builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER2) + assert isinstance(builder, DatasetBuilder) + assert builder.name == "text" + assert builder.dataset_name == SAMPLE_DATASET_IDENTIFIER2.split("/")[-1] + assert builder.config.name == "default" + assert isinstance(builder.config.data_files, DataFilesDict) + assert len(builder.config.data_files["train"]) > 0 + assert len(builder.config.data_files["test"]) > 0 + + +def test_load_dataset_builder_fail(): + with pytest.raises(DatasetNotFoundError): + datasets.load_dataset_builder("blabla") + + +@pytest.mark.parametrize("keep_in_memory", [False, True]) +def test_load_dataset_local(dataset_loading_script_dir, data_dir, keep_in_memory, caplog): + with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, keep_in_memory=keep_in_memory) + assert isinstance(dataset, DatasetDict) + assert all(isinstance(d, Dataset) for d in dataset.values()) + assert len(dataset) == 2 + assert isinstance(next(iter(dataset["train"])), dict) + for offline_simulation_mode in list(OfflineSimulationMode): + with offline(offline_simulation_mode): + caplog.clear() + # Load dataset from cache + dataset = datasets.load_dataset(DATASET_LOADING_SCRIPT_NAME, data_dir=data_dir) + assert len(dataset) == 2 + assert "Using the latest cached version of the module" in caplog.text + with pytest.raises(DatasetNotFoundError) as exc_info: + datasets.load_dataset(SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST) + assert f"Dataset '{SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST}' doesn't exist on the Hub" in str(exc_info.value) + + +def test_load_dataset_streaming(dataset_loading_script_dir, data_dir): + dataset = load_dataset(dataset_loading_script_dir, streaming=True, data_dir=data_dir) + assert isinstance(dataset, IterableDatasetDict) + assert all(isinstance(d, IterableDataset) for d in dataset.values()) + assert len(dataset) == 2 + assert isinstance(next(iter(dataset["train"])), dict) + + +def test_load_dataset_streaming_gz_json(jsonl_gz_path): + data_files = jsonl_gz_path + ds = load_dataset("json", split="train", data_files=data_files, streaming=True) + assert isinstance(ds, IterableDataset) + ds_item = next(iter(ds)) + assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} + + +@pytest.mark.integration +@pytest.mark.parametrize( + "path", ["sample.jsonl", "sample.jsonl.gz", "sample.tar", "sample.jsonl.xz", "sample.zip", "sample.jsonl.zst"] +) +def test_load_dataset_streaming_compressed_files(path): + repo_id = "hf-internal-testing/compressed_files" + data_files = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{path}" + if data_files[-3:] in ("zip", "tar"): # we need to glob "*" inside archives + data_files = data_files[-3:] + "://*::" + data_files + return # TODO(QL, albert): support re-add support for ZIP and TAR archives streaming + ds = load_dataset("json", split="train", data_files=data_files, streaming=True) + assert isinstance(ds, IterableDataset) + ds_item = next(iter(ds)) + assert ds_item == { + "tokens": ["Ministeri", "de", "Justícia", "d'Espanya"], + "ner_tags": [1, 2, 2, 2], + "langs": ["ca", "ca", "ca", "ca"], + "spans": ["PER: Ministeri de Justícia d'Espanya"], + } + + +@pytest.mark.parametrize("path_extension", ["csv", "csv.bz2"]) +@pytest.mark.parametrize("streaming", [False, True]) +def test_load_dataset_streaming_csv(path_extension, streaming, csv_path, bz2_csv_path): + paths = {"csv": csv_path, "csv.bz2": bz2_csv_path} + data_files = str(paths[path_extension]) + features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) + ds = load_dataset("csv", split="train", data_files=data_files, features=features, streaming=streaming) + assert isinstance(ds, IterableDataset if streaming else Dataset) + ds_item = next(iter(ds)) + assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} + + +@pytest.mark.parametrize("streaming", [False, True]) +@pytest.mark.parametrize("data_file", ["zip_csv_path", "zip_csv_with_dir_path", "csv_path"]) +def test_load_dataset_zip_csv(data_file, streaming, zip_csv_path, zip_csv_with_dir_path, csv_path): + data_file_paths = { + "zip_csv_path": zip_csv_path, + "zip_csv_with_dir_path": zip_csv_with_dir_path, + "csv_path": csv_path, + } + data_files = str(data_file_paths[data_file]) + expected_size = 8 if data_file.startswith("zip") else 4 + features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) + ds = load_dataset("csv", split="train", data_files=data_files, features=features, streaming=streaming) + if streaming: + ds_item_counter = 0 + for ds_item in ds: + if ds_item_counter == 0: + assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} + ds_item_counter += 1 + assert ds_item_counter == expected_size + else: + assert ds.shape[0] == expected_size + ds_item = next(iter(ds)) + assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} + + +@pytest.mark.parametrize("streaming", [False, True]) +@pytest.mark.parametrize("data_file", ["zip_jsonl_path", "zip_jsonl_with_dir_path", "jsonl_path"]) +def test_load_dataset_zip_jsonl(data_file, streaming, zip_jsonl_path, zip_jsonl_with_dir_path, jsonl_path): + data_file_paths = { + "zip_jsonl_path": zip_jsonl_path, + "zip_jsonl_with_dir_path": zip_jsonl_with_dir_path, + "jsonl_path": jsonl_path, + } + data_files = str(data_file_paths[data_file]) + expected_size = 8 if data_file.startswith("zip") else 4 + features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) + ds = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming) + if streaming: + ds_item_counter = 0 + for ds_item in ds: + if ds_item_counter == 0: + assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} + ds_item_counter += 1 + assert ds_item_counter == expected_size + else: + assert ds.shape[0] == expected_size + ds_item = next(iter(ds)) + assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} + + +@pytest.mark.parametrize("streaming", [False, True]) +@pytest.mark.parametrize("data_file", ["zip_text_path", "zip_text_with_dir_path", "text_path"]) +def test_load_dataset_zip_text(data_file, streaming, zip_text_path, zip_text_with_dir_path, text_path): + data_file_paths = { + "zip_text_path": zip_text_path, + "zip_text_with_dir_path": zip_text_with_dir_path, + "text_path": text_path, + } + data_files = str(data_file_paths[data_file]) + expected_size = 8 if data_file.startswith("zip") else 4 + ds = load_dataset("text", split="train", data_files=data_files, streaming=streaming) + if streaming: + ds_item_counter = 0 + for ds_item in ds: + if ds_item_counter == 0: + assert ds_item == {"text": "0"} + ds_item_counter += 1 + assert ds_item_counter == expected_size + else: + assert ds.shape[0] == expected_size + ds_item = next(iter(ds)) + assert ds_item == {"text": "0"} + + +@pytest.mark.parametrize("streaming", [False, True]) +def test_load_dataset_arrow(streaming, data_dir_with_arrow): + ds = load_dataset("arrow", split="train", data_dir=data_dir_with_arrow, streaming=streaming) + expected_size = 10 + if streaming: + ds_item_counter = 0 + for ds_item in ds: + if ds_item_counter == 0: + assert ds_item == {"col_1": "foo"} + ds_item_counter += 1 + assert ds_item_counter == 10 + else: + assert ds.num_rows == 10 + assert ds.shape[0] == expected_size + ds_item = next(iter(ds)) + assert ds_item == {"col_1": "foo"} + + +def test_load_dataset_text_with_unicode_new_lines(text_path_with_unicode_new_lines): + data_files = str(text_path_with_unicode_new_lines) + ds = load_dataset("text", split="train", data_files=data_files) + assert ds.num_rows == 3 + + +def test_load_dataset_with_unsupported_extensions(text_dir_with_unsupported_extension): + data_files = str(text_dir_with_unsupported_extension) + ds = load_dataset("text", split="train", data_files=data_files) + assert ds.num_rows == 4 + + +@pytest.mark.integration +def test_loading_from_the_datasets_hub(): + with tempfile.TemporaryDirectory() as tmp_dir: + dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=tmp_dir) + assert len(dataset["train"]) == 2 + assert len(dataset["validation"]) == 3 + del dataset + + +@pytest.mark.integration +def test_loading_from_the_datasets_hub_with_token(): + true_request = requests.Session().request + + def assert_auth(method, url, *args, headers, **kwargs): + assert headers["authorization"] == "Bearer foo" + return true_request(method, url, *args, headers=headers, **kwargs) + + with patch("requests.Session.request") as mock_request: + mock_request.side_effect = assert_auth + with tempfile.TemporaryDirectory() as tmp_dir: + with offline(): + with pytest.raises((ConnectionError, requests.exceptions.ConnectionError)): + load_dataset(SAMPLE_NOT_EXISTING_DATASET_IDENTIFIER, cache_dir=tmp_dir, token="foo") + mock_request.assert_called() + + +@pytest.mark.integration +def test_load_streaming_private_dataset(hf_token, hf_private_dataset_repo_txt_data): + ds = load_dataset(hf_private_dataset_repo_txt_data, streaming=True, token=hf_token) + assert next(iter(ds)) is not None + + +@pytest.mark.integration +def test_load_dataset_builder_private_dataset(hf_token, hf_private_dataset_repo_txt_data): + builder = load_dataset_builder(hf_private_dataset_repo_txt_data, token=hf_token) + assert isinstance(builder, DatasetBuilder) + + +@pytest.mark.integration +def test_load_streaming_private_dataset_with_zipped_data(hf_token, hf_private_dataset_repo_zipped_txt_data): + ds = load_dataset(hf_private_dataset_repo_zipped_txt_data, streaming=True, token=hf_token) + assert next(iter(ds)) is not None + + +@pytest.mark.integration +def test_load_dataset_config_kwargs_passed_as_arguments(): + ds_default = load_dataset(SAMPLE_DATASET_IDENTIFIER4) + ds_custom = load_dataset(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True) + assert list(ds_default["train"].features) == ["image", "caption"] + assert list(ds_custom["train"].features) == ["image"] + + +@require_sndfile +@pytest.mark.integration +def test_load_hub_dataset_without_script_with_single_config_in_metadata(): + # load the same dataset but with no configurations (=with default parameters) + ds = load_dataset(SAMPLE_DATASET_NO_CONFIGS_IN_METADATA) + assert list(ds["train"].features) == ["audio", "label"] # assert label feature is here as expected by default + assert len(ds["train"]) == 5 and len(ds["test"]) == 4 + + ds2 = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA) # single config -> no need to specify it + assert list(ds2["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed + assert len(ds2["train"]) == 3 and len(ds2["test"]) == 3 + + ds3 = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, "custom") + assert list(ds3["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed + assert len(ds3["train"]) == 3 and len(ds3["test"]) == 3 + + with pytest.raises(ValueError): + # no config named "default" + _ = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, "default") + + +@require_sndfile +@pytest.mark.integration +def test_load_hub_dataset_without_script_with_two_config_in_metadata(): + ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") + assert list(ds["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed + assert len(ds["train"]) == 3 and len(ds["test"]) == 3 + + ds2 = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v2") + assert list(ds2["train"].features) == [ + "audio", + "label", + ] # assert param `drop_labels=False` from metadata is passed + assert len(ds2["train"]) == 2 and len(ds2["test"]) == 1 + + with pytest.raises(ValueError): + # config is required but not specified + _ = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA) + + with pytest.raises(ValueError): + # no config named "default" + _ = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "default") + + ds_with_default = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT) + # it's a dataset with the same data but "v1" config is marked as a default one + assert list(ds_with_default["train"].features) == list(ds["train"].features) + assert len(ds_with_default["train"]) == len(ds["train"]) and len(ds_with_default["test"]) == len(ds["test"]) + + +@require_sndfile +@pytest.mark.integration +def test_load_hub_dataset_without_script_with_metadata_config_in_parallel(): + # assert it doesn't fail (pickling of dynamically created class works) + ds = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, num_proc=2) + assert "label" not in ds["train"].features # assert param `drop_labels=True` from metadata is passed + assert len(ds["train"]) == 3 and len(ds["test"]) == 3 + + ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1", num_proc=2) + assert "label" not in ds["train"].features # assert param `drop_labels=True` from metadata is passed + assert len(ds["train"]) == 3 and len(ds["test"]) == 3 + + ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v2", num_proc=2) + assert "label" in ds["train"].features + assert len(ds["train"]) == 2 and len(ds["test"]) == 1 + + +@require_pil +@pytest.mark.integration +@pytest.mark.parametrize("streaming", [True]) +def test_load_dataset_private_zipped_images(hf_private_dataset_repo_zipped_img_data, hf_token, streaming): + ds = load_dataset(hf_private_dataset_repo_zipped_img_data, split="train", streaming=streaming, token=hf_token) + assert isinstance(ds, IterableDataset if streaming else Dataset) + ds_items = list(ds) + assert len(ds_items) == 2 + + +def test_load_dataset_then_move_then_reload(dataset_loading_script_dir, data_dir, tmp_path, caplog): + cache_dir1 = tmp_path / "cache1" + cache_dir2 = tmp_path / "cache2" + dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir1) + fingerprint1 = dataset._fingerprint + del dataset + os.rename(cache_dir1, cache_dir2) + caplog.clear() + with caplog.at_level(INFO, logger=get_logger().name): + dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir2) + assert "Found cached dataset" in caplog.text + assert dataset._fingerprint == fingerprint1, "for the caching mechanism to work, fingerprint should stay the same" + dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="test", cache_dir=cache_dir2) + assert dataset._fingerprint != fingerprint1 + + +def test_load_dataset_readonly(dataset_loading_script_dir, dataset_loading_script_dir_readonly, data_dir, tmp_path): + cache_dir1 = tmp_path / "cache1" + cache_dir2 = tmp_path / "cache2" + dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir1) + fingerprint1 = dataset._fingerprint + del dataset + # Load readonly dataset and check that the fingerprint is the same. + dataset = load_dataset(dataset_loading_script_dir_readonly, data_dir=data_dir, split="train", cache_dir=cache_dir2) + assert dataset._fingerprint == fingerprint1, "Cannot load a dataset in a readonly folder." + + +@pytest.mark.parametrize("max_in_memory_dataset_size", ["default", 0, 50, 500]) +def test_load_dataset_local_with_default_in_memory( + max_in_memory_dataset_size, dataset_loading_script_dir, data_dir, monkeypatch +): + current_dataset_size = 148 + if max_in_memory_dataset_size == "default": + max_in_memory_dataset_size = 0 # default + else: + monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", max_in_memory_dataset_size) + if max_in_memory_dataset_size: + expected_in_memory = current_dataset_size < max_in_memory_dataset_size + else: + expected_in_memory = False + + with assert_arrow_memory_increases() if expected_in_memory else assert_arrow_memory_doesnt_increase(): + dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir) + assert (dataset["train"].dataset_size < max_in_memory_dataset_size) is expected_in_memory + + +@pytest.mark.parametrize("max_in_memory_dataset_size", ["default", 0, 100, 1000]) +def test_load_from_disk_with_default_in_memory( + max_in_memory_dataset_size, dataset_loading_script_dir, data_dir, tmp_path, monkeypatch +): + current_dataset_size = 512 # arrow file size = 512, in-memory dataset size = 148 + if max_in_memory_dataset_size == "default": + max_in_memory_dataset_size = 0 # default + else: + monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", max_in_memory_dataset_size) + if max_in_memory_dataset_size: + expected_in_memory = current_dataset_size < max_in_memory_dataset_size + else: + expected_in_memory = False + + dset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, keep_in_memory=True) + dataset_path = os.path.join(tmp_path, "saved_dataset") + dset.save_to_disk(dataset_path) + + with assert_arrow_memory_increases() if expected_in_memory else assert_arrow_memory_doesnt_increase(): + _ = load_from_disk(dataset_path) + + +@pytest.mark.integration +def test_remote_data_files(): + repo_id = "hf-internal-testing/raw_jsonl" + filename = "wikiann-bn-validation.jsonl" + data_files = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{filename}" + ds = load_dataset("json", split="train", data_files=data_files, streaming=True) + assert isinstance(ds, IterableDataset) + ds_item = next(iter(ds)) + assert ds_item.keys() == {"langs", "ner_tags", "spans", "tokens"} + + +@pytest.mark.parametrize("deleted", [False, True]) +def test_load_dataset_deletes_extracted_files(deleted, jsonl_gz_path, tmp_path): + data_files = jsonl_gz_path + cache_dir = tmp_path / "cache" + if deleted: + download_config = DownloadConfig(delete_extracted=True, cache_dir=cache_dir / "downloads") + ds = load_dataset( + "json", split="train", data_files=data_files, cache_dir=cache_dir, download_config=download_config + ) + else: # default + ds = load_dataset("json", split="train", data_files=data_files, cache_dir=cache_dir) + assert ds[0] == {"col_1": "0", "col_2": 0, "col_3": 0.0} + assert ( + [path for path in (cache_dir / "downloads" / "extracted").iterdir() if path.suffix != ".lock"] == [] + ) is deleted + + +def distributed_load_dataset(args): + data_name, tmp_dir, datafiles = args + dataset = load_dataset(data_name, cache_dir=tmp_dir, data_files=datafiles) + return dataset + + +def test_load_dataset_distributed(tmp_path, csv_path): + num_workers = 5 + args = "csv", str(tmp_path), csv_path + with Pool(processes=num_workers) as pool: # start num_workers processes + datasets = pool.map(distributed_load_dataset, [args] * num_workers) + assert len(datasets) == num_workers + assert all(len(dataset) == len(datasets[0]) > 0 for dataset in datasets) + assert len(datasets[0].cache_files) > 0 + assert all(dataset.cache_files == datasets[0].cache_files for dataset in datasets) + + +def test_load_dataset_with_storage_options(mockfs): + with mockfs.open("data.txt", "w") as f: + f.write("Hello there\n") + f.write("General Kenobi !") + data_files = {"train": ["mock://data.txt"]} + ds = load_dataset("text", data_files=data_files, storage_options=mockfs.storage_options) + assert list(ds["train"]) == [{"text": "Hello there"}, {"text": "General Kenobi !"}] + + +@require_pil +def test_load_dataset_with_storage_options_with_decoding(mockfs, image_file): + import PIL.Image + + filename = os.path.basename(image_file) + with mockfs.open(filename, "wb") as fout: + with open(image_file, "rb") as fin: + fout.write(fin.read()) + data_files = {"train": ["mock://" + filename]} + ds = load_dataset("imagefolder", data_files=data_files, storage_options=mockfs.storage_options) + assert len(ds["train"]) == 1 + assert isinstance(ds["train"][0]["image"], PIL.Image.Image) + + +def test_load_dataset_without_script_with_zip(zip_csv_path): + path = str(zip_csv_path.parent) + ds = load_dataset(path) + assert list(ds.keys()) == ["train"] + assert ds["train"].column_names == ["col_1", "col_2", "col_3"] + assert ds["train"].num_rows == 8 + assert ds["train"][0] == {"col_1": 0, "col_2": 0, "col_3": 0.0} diff --git a/testbed/huggingface__datasets/tests/test_parallel.py b/testbed/huggingface__datasets/tests/test_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..8a2a3554ccdb87536c3f57db7d13c4f2bc5daca9 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_parallel.py @@ -0,0 +1,51 @@ +import pytest + +from datasets.parallel import ParallelBackendConfig, parallel_backend +from datasets.utils.py_utils import map_nested + +from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows + + +def add_one(i): # picklable for multiprocessing + return i + 1 + + +@require_dill_gt_0_3_2 +@require_joblibspark +@require_not_windows +def test_parallel_backend_input(): + with parallel_backend("spark"): + assert ParallelBackendConfig.backend_name == "spark" + + lst = [1, 2, 3] + with pytest.raises(ValueError): + with parallel_backend("unsupported backend"): + map_nested(add_one, lst, num_proc=2) + + with pytest.raises(ValueError): + with parallel_backend("unsupported backend"): + map_nested(add_one, lst, num_proc=-1) + + +@require_dill_gt_0_3_2 +@require_joblibspark +@require_not_windows +@pytest.mark.parametrize("num_proc", [2, -1]) +def test_parallel_backend_map_nested(num_proc): + s1 = [1, 2] + s2 = {"a": 1, "b": 2} + s3 = {"a": [1, 2], "b": [3, 4]} + s4 = {"a": {"1": 1}, "b": 2} + s5 = {"a": 1, "b": 2, "c": 3, "d": 4} + expected_map_nested_s1 = [2, 3] + expected_map_nested_s2 = {"a": 2, "b": 3} + expected_map_nested_s3 = {"a": [2, 3], "b": [4, 5]} + expected_map_nested_s4 = {"a": {"1": 2}, "b": 3} + expected_map_nested_s5 = {"a": 2, "b": 3, "c": 4, "d": 5} + + with parallel_backend("spark"): + assert map_nested(add_one, s1, num_proc=num_proc) == expected_map_nested_s1 + assert map_nested(add_one, s2, num_proc=num_proc) == expected_map_nested_s2 + assert map_nested(add_one, s3, num_proc=num_proc) == expected_map_nested_s3 + assert map_nested(add_one, s4, num_proc=num_proc) == expected_map_nested_s4 + assert map_nested(add_one, s5, num_proc=num_proc) == expected_map_nested_s5 diff --git a/testbed/huggingface__datasets/tests/test_patching.py b/testbed/huggingface__datasets/tests/test_patching.py new file mode 100644 index 0000000000000000000000000000000000000000..42c592648f84986ccebb696d9254ff004d7ae10b --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_patching.py @@ -0,0 +1,151 @@ +from datasets.utils.patching import _PatchedModuleObj, patch_submodule + +from . import _test_patching + + +def test_patch_submodule(): + import os as original_os + from os import path as original_path + from os import rename as original_rename + from os.path import dirname as original_dirname + from os.path import join as original_join + + assert _test_patching.os is original_os + assert _test_patching.path is original_path + assert _test_patching.join is original_join + + assert _test_patching.renamed_os is original_os + assert _test_patching.renamed_path is original_path + assert _test_patching.renamed_join is original_join + + mock = "__test_patch_submodule_mock__" + with patch_submodule(_test_patching, "os.path.join", mock): + # Every way to access os.path.join must be patched, and the rest must stay untouched + + # check os.path.join + assert isinstance(_test_patching.os, _PatchedModuleObj) + assert isinstance(_test_patching.os.path, _PatchedModuleObj) + assert _test_patching.os.path.join is mock + + # check path.join + assert isinstance(_test_patching.path, _PatchedModuleObj) + assert _test_patching.path.join is mock + + # check join + assert _test_patching.join is mock + + # check that the other attributes are untouched + assert _test_patching.os.rename is original_rename + assert _test_patching.path.dirname is original_dirname + assert _test_patching.os.path.dirname is original_dirname + + # Even renamed modules or objects must be patched + + # check renamed_os.path.join + assert isinstance(_test_patching.renamed_os, _PatchedModuleObj) + assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj) + assert _test_patching.renamed_os.path.join is mock + + # check renamed_path.join + assert isinstance(_test_patching.renamed_path, _PatchedModuleObj) + assert _test_patching.renamed_path.join is mock + + # check renamed_join + assert _test_patching.renamed_join is mock + + # check that the other attributes are untouched + assert _test_patching.renamed_os.rename is original_rename + assert _test_patching.renamed_path.dirname is original_dirname + assert _test_patching.renamed_os.path.dirname is original_dirname + + # check that everthing is back to normal when the patch is over + + assert _test_patching.os is original_os + assert _test_patching.path is original_path + assert _test_patching.join is original_join + + assert _test_patching.renamed_os is original_os + assert _test_patching.renamed_path is original_path + assert _test_patching.renamed_join is original_join + + +def test_patch_submodule_builtin(): + assert _test_patching.open is open + + mock = "__test_patch_submodule_builtin_mock__" + # _test_patching has "open" in its globals + assert _test_patching.open is open + with patch_submodule(_test_patching, "open", mock): + assert _test_patching.open is mock + + # check that everthing is back to normal when the patch is over + + assert _test_patching.open is open + + +def test_patch_submodule_missing(): + # pandas.read_csv is not present in _test_patching + mock = "__test_patch_submodule_missing_mock__" + with patch_submodule(_test_patching, "pandas.read_csv", mock): + pass + + +def test_patch_submodule_missing_builtin(): + # builtin should always be mocked even if they're not in the globals + # in case they're loaded at one point + mock = "__test_patch_submodule_missing_builtin_mock__" + # _test_patching doesn't have "len" in its globals + assert getattr(_test_patching, "len", None) is None + with patch_submodule(_test_patching, "len", mock): + assert _test_patching.len is mock + assert _test_patching.len is len + + +def test_patch_submodule_start_and_stop(): + mock = "__test_patch_submodule_start_and_stop_mock__" + patch = patch_submodule(_test_patching, "open", mock) + assert _test_patching.open is open + patch.start() + assert _test_patching.open is mock + patch.stop() + assert _test_patching.open is open + + +def test_patch_submodule_successive(): + from os import rename as original_rename + from os.path import dirname as original_dirname + from os.path import join as original_join + + mock_join = "__test_patch_submodule_successive_join__" + mock_dirname = "__test_patch_submodule_successive_dirname__" + mock_rename = "__test_patch_submodule_successive_rename__" + assert _test_patching.os.path.join is original_join + assert _test_patching.os.path.dirname is original_dirname + assert _test_patching.os.rename is original_rename + + with patch_submodule(_test_patching, "os.path.join", mock_join): + with patch_submodule(_test_patching, "os.rename", mock_rename): + with patch_submodule(_test_patching, "os.path.dirname", mock_dirname): + assert _test_patching.os.path.join is mock_join + assert _test_patching.os.path.dirname is mock_dirname + assert _test_patching.os.rename is mock_rename + + # try another order + with patch_submodule(_test_patching, "os.rename", mock_rename): + with patch_submodule(_test_patching, "os.path.join", mock_join): + with patch_submodule(_test_patching, "os.path.dirname", mock_dirname): + assert _test_patching.os.path.join is mock_join + assert _test_patching.os.path.dirname is mock_dirname + assert _test_patching.os.rename is mock_rename + + assert _test_patching.os.path.join is original_join + assert _test_patching.os.path.dirname is original_dirname + assert _test_patching.os.rename is original_rename + + +def test_patch_submodule_doesnt_exist(): + mock = "__test_patch_submodule_doesnt_exist_mock__" + with patch_submodule(_test_patching, "__module_that_doesn_exist__.__attribute_that_doesn_exist__", mock): + pass + with patch_submodule(_test_patching, "os.__attribute_that_doesn_exist__", mock): + pass diff --git a/testbed/huggingface__datasets/tests/test_sharding_utils.py b/testbed/huggingface__datasets/tests/test_sharding_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..51c83cb478c2fae6846fde77ccf174c279320700 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_sharding_utils.py @@ -0,0 +1,54 @@ +import pytest + +from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs + + +@pytest.mark.parametrize( + "kwargs, expected", + [ + ({"num_shards": 0, "max_num_jobs": 1}, []), + ({"num_shards": 10, "max_num_jobs": 1}, [range(10)]), + ({"num_shards": 10, "max_num_jobs": 10}, [range(i, i + 1) for i in range(10)]), + ({"num_shards": 1, "max_num_jobs": 10}, [range(1)]), + ({"num_shards": 10, "max_num_jobs": 3}, [range(0, 4), range(4, 7), range(7, 10)]), + ({"num_shards": 3, "max_num_jobs": 10}, [range(0, 1), range(1, 2), range(2, 3)]), + ], +) +def test_distribute_shards(kwargs, expected): + out = _distribute_shards(**kwargs) + assert out == expected + + +@pytest.mark.parametrize( + "gen_kwargs, max_num_jobs, expected", + [ + ({"foo": 0}, 10, [{"foo": 0}]), + ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), + ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), + ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), + ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), + ], +) +def test_split_gen_kwargs(gen_kwargs, max_num_jobs, expected): + out = _split_gen_kwargs(gen_kwargs, max_num_jobs) + assert out == expected + + +@pytest.mark.parametrize( + "gen_kwargs, expected", + [ + ({"foo": 0}, 1), + ({"shards": [0]}, 1), + ({"shards": [0, 1, 2, 3]}, 4), + ({"shards": [0, 1, 2, 3], "foo": 0}, 4), + ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), + ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), + ], +) +def test_number_of_shards_in_gen_kwargs(gen_kwargs, expected): + if expected is RuntimeError: + with pytest.raises(expected): + _number_of_shards_in_gen_kwargs(gen_kwargs) + else: + out = _number_of_shards_in_gen_kwargs(gen_kwargs) + assert out == expected diff --git a/testbed/huggingface__datasets/tests/test_streaming_download_manager.py b/testbed/huggingface__datasets/tests/test_streaming_download_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..44e73eee73cf3d26c34914c58898187d20fa21ba --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_streaming_download_manager.py @@ -0,0 +1,939 @@ +import json +import os +import re +from pathlib import Path + +import pytest +from fsspec.registry import _registry as _fsspec_registry +from fsspec.spec import AbstractBufferedFile, AbstractFileSystem + +from datasets.download.download_config import DownloadConfig +from datasets.download.streaming_download_manager import ( + StreamingDownloadManager, + _get_extraction_protocol, + xbasename, + xexists, + xgetsize, + xglob, + xisdir, + xisfile, + xjoin, + xlistdir, + xnumpy_load, + xopen, + xPath, + xrelpath, + xsplit, + xsplitext, + xwalk, +) +from datasets.filesystems import COMPRESSION_FILESYSTEMS +from datasets.utils.hub import hf_hub_url + +from .utils import require_lz4, require_zstandard, slow + + +TEST_URL = "https://huggingface.co/datasets/hf-internal-testing/dataset_with_script/raw/main/some_text.txt" +TEST_URL_CONTENT = "foo\nbar\nfoobar" + +TEST_GG_DRIVE_FILENAME = "train.tsv" +TEST_GG_DRIVE_URL = "https://drive.google.com/uc?export=download&id=17bOgBDc3hRCoPZ89EYtKDzK-yXAWat94" +TEST_GG_DRIVE_GZIPPED_URL = "https://drive.google.com/uc?export=download&id=1Bt4Garpf0QLiwkJhHJzXaVa0I0H5Qhwz" +TEST_GG_DRIVE_ZIPPED_URL = "https://drive.google.com/uc?export=download&id=1k92sUfpHxKq8PXWRr7Y5aNHXwOCNUmqh" +TEST_GG_DRIVE_CONTENT = """\ +pokemon_name, type +Charmander, fire +Squirtle, water +Bulbasaur, grass""" + + +class DummyTestFS(AbstractFileSystem): + protocol = "mock" + _file_class = AbstractBufferedFile + _fs_contents = ( + {"name": "top_level", "type": "directory"}, + {"name": "top_level/second_level", "type": "directory"}, + {"name": "top_level/second_level/date=2019-10-01", "type": "directory"}, + { + "name": "top_level/second_level/date=2019-10-01/a.parquet", + "type": "file", + "size": 100, + }, + { + "name": "top_level/second_level/date=2019-10-01/b.parquet", + "type": "file", + "size": 100, + }, + {"name": "top_level/second_level/date=2019-10-02", "type": "directory"}, + { + "name": "top_level/second_level/date=2019-10-02/a.parquet", + "type": "file", + "size": 100, + }, + {"name": "top_level/second_level/date=2019-10-04", "type": "directory"}, + { + "name": "top_level/second_level/date=2019-10-04/a.parquet", + "type": "file", + "size": 100, + }, + {"name": "misc", "type": "directory"}, + {"name": "misc/foo.txt", "type": "file", "size": 100}, + {"name": "glob_test", "type": "directory", "size": 0}, + {"name": "glob_test/hat", "type": "directory", "size": 0}, + {"name": "glob_test/hat/^foo.txt", "type": "file", "size": 100}, + {"name": "glob_test/dollar", "type": "directory", "size": 0}, + {"name": "glob_test/dollar/$foo.txt", "type": "file", "size": 100}, + {"name": "glob_test/lbrace", "type": "directory", "size": 0}, + {"name": "glob_test/lbrace/{foo.txt", "type": "file", "size": 100}, + {"name": "glob_test/rbrace", "type": "directory", "size": 0}, + {"name": "glob_test/rbrace/}foo.txt", "type": "file", "size": 100}, + ) + + def __getitem__(self, name): + for item in self._fs_contents: + if item["name"] == name: + return item + raise IndexError(f"{name} not found!") + + def ls(self, path, detail=True, refresh=True, **kwargs): + if kwargs.pop("strip_proto", True): + path = self._strip_protocol(path) + + files = not refresh and self._ls_from_cache(path) + if not files: + files = [file for file in self._fs_contents if path == self._parent(file["name"])] + files.sort(key=lambda file: file["name"]) + self.dircache[path.rstrip("/")] = files + + if detail: + return files + return [file["name"] for file in files] + + def _open( + self, + path, + mode="rb", + block_size=None, + autocommit=True, + cache_options=None, + **kwargs, + ): + return self._file_class( + self, + path, + mode, + block_size, + autocommit, + cache_options=cache_options, + **kwargs, + ) + + +@pytest.fixture +def mock_fsspec(): + _fsspec_registry["mock"] = DummyTestFS + yield + del _fsspec_registry["mock"] + + +def _readd_double_slash_removed_by_path(path_as_posix: str) -> str: + """Path(...) on an url path like zip://file.txt::http://host.com/data.zip + converts the :// to :/ + This function readds the :// + + It handles cases like: + + - https://host.com/data.zip + - C://data.zip + - zip://file.txt::https://host.com/data.zip + - zip://file.txt::/Users/username/data.zip + - zip://file.txt::C://data.zip + + Args: + path_as_posix (str): output of Path(...).as_posix() + + Returns: + str: the url path with :// instead of :/ + """ + return re.sub("([A-z]:/)([A-z:])", r"\g<1>/\g<2>", path_as_posix) + + +@pytest.mark.parametrize( + "input_path, paths_to_join, expected_path", + [ + ( + "https://host.com/archive.zip", + ("file.txt",), + "https://host.com/archive.zip/file.txt", + ), + ( + "zip://::https://host.com/archive.zip", + ("file.txt",), + "zip://file.txt::https://host.com/archive.zip", + ), + ( + "zip://folder::https://host.com/archive.zip", + ("file.txt",), + "zip://folder/file.txt::https://host.com/archive.zip", + ), + ( + ".", + ("file.txt",), + os.path.join(".", "file.txt"), + ), + ( + str(Path().resolve()), + ("file.txt",), + str((Path().resolve() / "file.txt")), + ), + ], +) +def test_xjoin(input_path, paths_to_join, expected_path): + output_path = xjoin(input_path, *paths_to_join) + assert output_path == expected_path + output_path = xPath(input_path).joinpath(*paths_to_join) + assert output_path == xPath(expected_path) + + +@pytest.mark.parametrize( + "input_path, expected_path", + [ + (str(Path(__file__).resolve()), str(Path(__file__).resolve().parent)), + ("https://host.com/archive.zip", "https://host.com"), + ( + "zip://file.txt::https://host.com/archive.zip", + "zip://::https://host.com/archive.zip", + ), + ( + "zip://folder/file.txt::https://host.com/archive.zip", + "zip://folder::https://host.com/archive.zip", + ), + ], +) +def test_xdirname(input_path, expected_path): + from datasets.download.streaming_download_manager import xdirname + + output_path = xdirname(input_path) + output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix()) + assert output_path == _readd_double_slash_removed_by_path(Path(expected_path).as_posix()) + + +@pytest.mark.parametrize( + "input_path, exists", + [ + ("tmp_path/file.txt", True), + ("tmp_path/file_that_doesnt_exist.txt", False), + ("mock://top_level/second_level/date=2019-10-01/a.parquet", True), + ("mock://top_level/second_level/date=2019-10-01/file_that_doesnt_exist.parquet", False), + ], +) +def test_xexists(input_path, exists, tmp_path, mock_fsspec): + if input_path.startswith("tmp_path"): + input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) + (tmp_path / "file.txt").touch() + assert xexists(input_path) is exists + + +@pytest.mark.integration +def test_xexists_private(hf_private_dataset_repo_txt_data, hf_token): + root_url = hf_hub_url(hf_private_dataset_repo_txt_data, "") + download_config = DownloadConfig(token=hf_token) + assert xexists(root_url + "data/text_data.txt", download_config=download_config) + assert not xexists(root_url + "file_that_doesnt_exist.txt", download_config=download_config) + + +@pytest.mark.parametrize( + "input_path, expected_head_and_tail", + [ + ( + str(Path(__file__).resolve()), + (str(Path(__file__).resolve().parent), str(Path(__file__).resolve().name)), + ), + ("https://host.com/archive.zip", ("https://host.com", "archive.zip")), + ("zip://file.txt::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "file.txt")), + ("zip://folder::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "folder")), + ("zip://::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "")), + ], +) +def test_xsplit(input_path, expected_head_and_tail): + output_path, tail = xsplit(input_path) + expected_path, expected_tail = expected_head_and_tail + output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix()) + expected_path = _readd_double_slash_removed_by_path(Path(expected_path).as_posix()) + assert output_path == expected_path + assert tail == expected_tail + + +@pytest.mark.parametrize( + "input_path, expected_path_and_ext", + [ + ( + str(Path(__file__).resolve()), + (str(Path(__file__).resolve().with_suffix("")), str(Path(__file__).resolve().suffix)), + ), + ("https://host.com/archive.zip", ("https://host.com/archive", ".zip")), + ("zip://file.txt::https://host.com/archive.zip", ("zip://file::https://host.com/archive.zip", ".txt")), + ("zip://folder::https://host.com/archive.zip", ("zip://folder::https://host.com/archive.zip", "")), + ("zip://::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "")), + ], +) +def test_xsplitext(input_path, expected_path_and_ext): + output_path, ext = xsplitext(input_path) + expected_path, expected_ext = expected_path_and_ext + output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix()) + expected_path = _readd_double_slash_removed_by_path(Path(expected_path).as_posix()) + assert output_path == expected_path + assert ext == expected_ext + + +def test_xopen_local(text_path): + with xopen(text_path, "r", encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file: + assert list(f) == list(expected_file) + with xPath(text_path).open("r", encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file: + assert list(f) == list(expected_file) + + +@pytest.mark.integration +def test_xopen_remote(): + with xopen(TEST_URL, "r", encoding="utf-8") as f: + assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True) + with xPath(TEST_URL).open("r", encoding="utf-8") as f: + assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True) + + +@pytest.mark.parametrize( + "input_path, expected_paths", + [ + ("tmp_path", ["file1.txt", "file2.txt"]), + ("mock://", ["glob_test", "misc", "top_level"]), + ("mock://top_level", ["second_level"]), + ("mock://top_level/second_level/date=2019-10-01", ["a.parquet", "b.parquet"]), + ], +) +def test_xlistdir(input_path, expected_paths, tmp_path, mock_fsspec): + if input_path.startswith("tmp_path"): + input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) + for file in ["file1.txt", "file2.txt"]: + (tmp_path / file).touch() + output_paths = sorted(xlistdir(input_path)) + assert output_paths == expected_paths + + +@pytest.mark.integration +def test_xlistdir_private(hf_private_dataset_repo_zipped_txt_data, hf_token): + root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") + download_config = DownloadConfig(token=hf_token) + assert len(xlistdir("zip://::" + root_url, download_config=download_config)) == 1 + assert len(xlistdir("zip://main_dir::" + root_url, download_config=download_config)) == 2 + with pytest.raises(FileNotFoundError): + xlistdir("zip://qwertyuiop::" + root_url, download_config=download_config) + with pytest.raises(FileNotFoundError): + xlistdir(root_url, download_config=download_config) + + +@pytest.mark.parametrize( + "input_path, isdir", + [ + ("tmp_path", True), + ("tmp_path/file.txt", False), + ("mock://", True), + ("mock://top_level", True), + ("mock://dir_that_doesnt_exist", False), + ], +) +def test_xisdir(input_path, isdir, tmp_path, mock_fsspec): + if input_path.startswith("tmp_path"): + input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) + (tmp_path / "file.txt").touch() + assert xisdir(input_path) == isdir + + +@pytest.mark.integration +def test_xisdir_private(hf_private_dataset_repo_zipped_txt_data, hf_token): + root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") + download_config = DownloadConfig(token=hf_token) + assert xisdir("zip://::" + root_url, download_config=download_config) is True + assert xisdir("zip://main_dir::" + root_url, download_config=download_config) is True + assert xisdir("zip://qwertyuiop::" + root_url, download_config=download_config) is False + assert xisdir(root_url, download_config=download_config) is False + + +@pytest.mark.parametrize( + "input_path, isfile", + [ + ("tmp_path/file.txt", True), + ("tmp_path/file_that_doesnt_exist.txt", False), + ("mock://", False), + ("mock://top_level/second_level/date=2019-10-01/a.parquet", True), + ], +) +def test_xisfile(input_path, isfile, tmp_path, mock_fsspec): + if input_path.startswith("tmp_path"): + input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) + (tmp_path / "file.txt").touch() + assert xisfile(input_path) == isfile + + +@pytest.mark.integration +def test_xisfile_private(hf_private_dataset_repo_txt_data, hf_token): + root_url = hf_hub_url(hf_private_dataset_repo_txt_data, "") + download_config = DownloadConfig(token=hf_token) + assert xisfile(root_url + "data/text_data.txt", download_config=download_config) is True + assert xisfile(root_url + "qwertyuiop", download_config=download_config) is False + + +@pytest.mark.parametrize( + "input_path, size", + [ + ("tmp_path/file.txt", 100), + ("mock://", 0), + ("mock://top_level/second_level/date=2019-10-01/a.parquet", 100), + ], +) +def test_xgetsize(input_path, size, tmp_path, mock_fsspec): + if input_path.startswith("tmp_path"): + input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) + (tmp_path / "file.txt").touch() + (tmp_path / "file.txt").write_bytes(b"x" * 100) + assert xgetsize(input_path) == size + + +@pytest.mark.integration +def test_xgetsize_private(hf_private_dataset_repo_txt_data, hf_token): + root_url = hf_hub_url(hf_private_dataset_repo_txt_data, "") + download_config = DownloadConfig(token=hf_token) + assert xgetsize(root_url + "data/text_data.txt", download_config=download_config) == 39 + with pytest.raises(FileNotFoundError): + xgetsize(root_url + "qwertyuiop", download_config=download_config) + + +@pytest.mark.parametrize( + "input_path, expected_paths", + [ + ("tmp_path/*.txt", ["file1.txt", "file2.txt"]), + ("mock://*", ["mock://glob_test", "mock://misc", "mock://top_level"]), + ("mock://top_*", ["mock://top_level"]), + ( + "mock://top_level/second_level/date=2019-10-0[1-4]", + [ + "mock://top_level/second_level/date=2019-10-01", + "mock://top_level/second_level/date=2019-10-02", + "mock://top_level/second_level/date=2019-10-04", + ], + ), + ( + "mock://top_level/second_level/date=2019-10-0[1-4]/*", + [ + "mock://top_level/second_level/date=2019-10-01/a.parquet", + "mock://top_level/second_level/date=2019-10-01/b.parquet", + "mock://top_level/second_level/date=2019-10-02/a.parquet", + "mock://top_level/second_level/date=2019-10-04/a.parquet", + ], + ), + ], +) +def test_xglob(input_path, expected_paths, tmp_path, mock_fsspec): + if input_path.startswith("tmp_path"): + input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) + expected_paths = [str(tmp_path / file) for file in expected_paths] + for file in ["file1.txt", "file2.txt", "README.md"]: + (tmp_path / file).touch() + output_paths = sorted(xglob(input_path)) + assert output_paths == expected_paths + + +@pytest.mark.integration +def test_xglob_private(hf_private_dataset_repo_zipped_txt_data, hf_token): + root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") + download_config = DownloadConfig(token=hf_token) + assert len(xglob("zip://**::" + root_url, download_config=download_config)) == 3 + assert len(xglob("zip://qwertyuiop/*::" + root_url, download_config=download_config)) == 0 + + +@pytest.mark.parametrize( + "input_path, expected_outputs", + [ + ("tmp_path", [("", [], ["file1.txt", "file2.txt", "README.md"])]), + ( + "mock://top_level/second_level", + [ + ("mock://top_level/second_level", ["date=2019-10-01", "date=2019-10-02", "date=2019-10-04"], []), + ("mock://top_level/second_level/date=2019-10-01", [], ["a.parquet", "b.parquet"]), + ("mock://top_level/second_level/date=2019-10-02", [], ["a.parquet"]), + ("mock://top_level/second_level/date=2019-10-04", [], ["a.parquet"]), + ], + ), + ], +) +def test_xwalk(input_path, expected_outputs, tmp_path, mock_fsspec): + if input_path.startswith("tmp_path"): + input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) + expected_outputs = sorted( + [ + (str(tmp_path / dirpath).rstrip("/"), sorted(dirnames), sorted(filenames)) + for dirpath, dirnames, filenames in expected_outputs + ] + ) + for file in ["file1.txt", "file2.txt", "README.md"]: + (tmp_path / file).touch() + outputs = sorted(xwalk(input_path)) + outputs = [(dirpath, sorted(dirnames), sorted(filenames)) for dirpath, dirnames, filenames in outputs] + assert outputs == expected_outputs + + +@pytest.mark.integration +def test_xwalk_private(hf_private_dataset_repo_zipped_txt_data, hf_token): + root_url = hf_hub_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") + download_config = DownloadConfig(token=hf_token) + assert len(list(xwalk("zip://::" + root_url, download_config=download_config))) == 2 + assert len(list(xwalk("zip://main_dir::" + root_url, download_config=download_config))) == 1 + assert len(list(xwalk("zip://qwertyuiop::" + root_url, download_config=download_config))) == 0 + + +@pytest.mark.parametrize( + "input_path, start_path, expected_path", + [ + ("dir1/dir2/file.txt".replace("/", os.path.sep), "dir1", "dir2/file.txt".replace("/", os.path.sep)), + ("dir1/dir2/file.txt".replace("/", os.path.sep), "dir1/dir2".replace("/", os.path.sep), "file.txt"), + ("zip://file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip", "file.txt"), + ( + "zip://folder/file.txt::https://host.com/archive.zip", + "zip://::https://host.com/archive.zip", + "folder/file.txt", + ), + ( + "zip://folder/file.txt::https://host.com/archive.zip", + "zip://folder::https://host.com/archive.zip", + "file.txt", + ), + ], +) +def test_xrelpath(input_path, start_path, expected_path): + output_path = xrelpath(input_path, start=start_path) + assert output_path == expected_path + + +class TestxPath: + @pytest.mark.parametrize( + "input_path", + [ + "https://host.com/archive.zip", + "zip://file.txt::https://host.com/archive.zip", + "zip://dir/file.txt::https://host.com/archive.zip", + "file.txt", + str(Path().resolve() / "file.txt"), + ], + ) + def test_xpath_str(self, input_path): + assert str(xPath(input_path)) == input_path + + @pytest.mark.parametrize( + "input_path, expected_path", + [ + ("https://host.com/archive.zip", "https://host.com/archive.zip"), + ("zip://file.txt::https://host.com/archive.zip", "zip://file.txt::https://host.com/archive.zip"), + ("zip://dir/file.txt::https://host.com/archive.zip", "zip://dir/file.txt::https://host.com/archive.zip"), + ("file.txt", "file.txt"), + (str(Path().resolve() / "file.txt"), (Path().resolve() / "file.txt").as_posix()), + ], + ) + def test_xpath_as_posix(self, input_path, expected_path): + assert xPath(input_path).as_posix() == expected_path + + @pytest.mark.parametrize( + "input_path, exists", + [ + ("tmp_path/file.txt", True), + ("tmp_path/file_that_doesnt_exist.txt", False), + ("mock://top_level/second_level/date=2019-10-01/a.parquet", True), + ("mock://top_level/second_level/date=2019-10-01/file_that_doesnt_exist.parquet", False), + ], + ) + def test_xpath_exists(self, input_path, exists, tmp_path, mock_fsspec): + if input_path.startswith("tmp_path"): + input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) + (tmp_path / "file.txt").touch() + assert xexists(input_path) is exists + + @pytest.mark.parametrize( + "input_path, pattern, expected_paths", + [ + ("tmp_path", "*.txt", ["file1.txt", "file2.txt"]), + ("mock://", "*", ["mock://glob_test", "mock://misc", "mock://top_level"]), + ("mock://", "top_*", ["mock://top_level"]), + ( + "mock://top_level/second_level", + "date=2019-10-0[1-4]", + [ + "mock://top_level/second_level/date=2019-10-01", + "mock://top_level/second_level/date=2019-10-02", + "mock://top_level/second_level/date=2019-10-04", + ], + ), + ( + "mock://top_level/second_level", + "date=2019-10-0[1-4]/*", + [ + "mock://top_level/second_level/date=2019-10-01/a.parquet", + "mock://top_level/second_level/date=2019-10-01/b.parquet", + "mock://top_level/second_level/date=2019-10-02/a.parquet", + "mock://top_level/second_level/date=2019-10-04/a.parquet", + ], + ), + ], + ) + def test_xpath_glob(self, input_path, pattern, expected_paths, tmp_path, mock_fsspec): + if input_path == "tmp_path": + input_path = tmp_path + expected_paths = [tmp_path / file for file in expected_paths] + for file in ["file1.txt", "file2.txt", "README.md"]: + (tmp_path / file).touch() + else: + expected_paths = [Path(file) for file in expected_paths] + output_paths = sorted(xPath(input_path).glob(pattern)) + assert output_paths == expected_paths + + @pytest.mark.parametrize( + "input_path, pattern, expected_paths", + [ + ("tmp_path", "*.txt", ["file1.txt", "file2.txt"]), + ( + "mock://", + "date=2019-10-0[1-4]", + [ + "mock://top_level/second_level/date=2019-10-01", + "mock://top_level/second_level/date=2019-10-02", + "mock://top_level/second_level/date=2019-10-04", + ], + ), + ( + "mock://top_level", + "date=2019-10-0[1-4]", + [ + "mock://top_level/second_level/date=2019-10-01", + "mock://top_level/second_level/date=2019-10-02", + "mock://top_level/second_level/date=2019-10-04", + ], + ), + ( + "mock://", + "date=2019-10-0[1-4]/*", + [ + "mock://top_level/second_level/date=2019-10-01/a.parquet", + "mock://top_level/second_level/date=2019-10-01/b.parquet", + "mock://top_level/second_level/date=2019-10-02/a.parquet", + "mock://top_level/second_level/date=2019-10-04/a.parquet", + ], + ), + ( + "mock://top_level", + "date=2019-10-0[1-4]/*", + [ + "mock://top_level/second_level/date=2019-10-01/a.parquet", + "mock://top_level/second_level/date=2019-10-01/b.parquet", + "mock://top_level/second_level/date=2019-10-02/a.parquet", + "mock://top_level/second_level/date=2019-10-04/a.parquet", + ], + ), + ], + ) + def test_xpath_rglob(self, input_path, pattern, expected_paths, tmp_path, mock_fsspec): + if input_path == "tmp_path": + input_path = tmp_path + dir_path = tmp_path / "dir" + dir_path.mkdir() + expected_paths = [dir_path / file for file in expected_paths] + for file in ["file1.txt", "file2.txt", "README.md"]: + (dir_path / file).touch() + else: + expected_paths = [Path(file) for file in expected_paths] + output_paths = sorted(xPath(input_path).rglob(pattern)) + assert output_paths == expected_paths + + @pytest.mark.parametrize( + "input_path, expected_path", + [ + ("https://host.com/archive.zip", "https://host.com"), + ("zip://file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip"), + ("zip://dir/file.txt::https://host.com/archive.zip", "zip://dir::https://host.com/archive.zip"), + ("file.txt", ""), + (str(Path().resolve() / "file.txt"), str(Path().resolve())), + ], + ) + def test_xpath_parent(self, input_path, expected_path): + assert xPath(input_path).parent == xPath(expected_path) + + @pytest.mark.parametrize( + "input_path, expected", + [ + ("https://host.com/archive.zip", "archive.zip"), + ("zip://file.txt::https://host.com/archive.zip", "file.txt"), + ("zip://dir/file.txt::https://host.com/archive.zip", "file.txt"), + ("file.txt", "file.txt"), + (str(Path().resolve() / "file.txt"), "file.txt"), + ], + ) + def test_xpath_name(self, input_path, expected): + assert xPath(input_path).name == expected + + @pytest.mark.parametrize( + "input_path, expected", + [ + ("https://host.com/archive.zip", "archive"), + ("zip://file.txt::https://host.com/archive.zip", "file"), + ("zip://dir/file.txt::https://host.com/archive.zip", "file"), + ("file.txt", "file"), + (str(Path().resolve() / "file.txt"), "file"), + ], + ) + def test_xpath_stem(self, input_path, expected): + assert xPath(input_path).stem == expected + + @pytest.mark.parametrize( + "input_path, expected", + [ + ("https://host.com/archive.zip", ".zip"), + ("zip://file.txt::https://host.com/archive.zip", ".txt"), + ("zip://dir/file.txt::https://host.com/archive.zip", ".txt"), + ("file.txt", ".txt"), + (str(Path().resolve() / "file.txt"), ".txt"), + ], + ) + def test_xpath_suffix(self, input_path, expected): + assert xPath(input_path).suffix == expected + + @pytest.mark.parametrize( + "input_path, suffix, expected", + [ + ("https://host.com/archive.zip", ".ann", "https://host.com/archive.ann"), + ("zip://file.txt::https://host.com/archive.zip", ".ann", "zip://file.ann::https://host.com/archive.zip"), + ( + "zip://dir/file.txt::https://host.com/archive.zip", + ".ann", + "zip://dir/file.ann::https://host.com/archive.zip", + ), + ("file.txt", ".ann", "file.ann"), + (str(Path().resolve() / "file.txt"), ".ann", str(Path().resolve() / "file.ann")), + ], + ) + def test_xpath_with_suffix(self, input_path, suffix, expected): + assert xPath(input_path).with_suffix(suffix) == xPath(expected) + + +@pytest.mark.parametrize("urlpath", [r"C:\\foo\bar.txt", "/foo/bar.txt", "https://f.oo/bar.txt"]) +def test_streaming_dl_manager_download_dummy_path(urlpath): + dl_manager = StreamingDownloadManager() + assert dl_manager.download(urlpath) == urlpath + + +def test_streaming_dl_manager_download(text_path): + dl_manager = StreamingDownloadManager() + out = dl_manager.download(text_path) + assert out == text_path + with xopen(out, encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file: + assert f.read() == expected_file.read() + + +@pytest.mark.parametrize("urlpath", [r"C:\\foo\bar.txt", "/foo/bar.txt", "https://f.oo/bar.txt"]) +def test_streaming_dl_manager_download_and_extract_no_extraction(urlpath): + dl_manager = StreamingDownloadManager() + assert dl_manager.download_and_extract(urlpath) == urlpath + + +def test_streaming_dl_manager_extract(text_gz_path, text_path): + dl_manager = StreamingDownloadManager() + output_path = dl_manager.extract(text_gz_path) + path = os.path.basename(text_gz_path) + path = path[: path.rindex(".")] + assert output_path == f"gzip://{path}::{text_gz_path}" + fsspec_open_file = xopen(output_path, encoding="utf-8") + with fsspec_open_file as f, open(text_path, encoding="utf-8") as expected_file: + assert f.read() == expected_file.read() + + +def test_streaming_dl_manager_download_and_extract_with_extraction(text_gz_path, text_path): + dl_manager = StreamingDownloadManager() + output_path = dl_manager.download_and_extract(text_gz_path) + path = os.path.basename(text_gz_path) + path = path[: path.rindex(".")] + assert output_path == f"gzip://{path}::{text_gz_path}" + fsspec_open_file = xopen(output_path, encoding="utf-8") + with fsspec_open_file as f, open(text_path, encoding="utf-8") as expected_file: + assert f.read() == expected_file.read() + + +@pytest.mark.parametrize( + "input_path, filename, expected_path", + [("https://domain.org/archive.zip", "filename.jsonl", "zip://filename.jsonl::https://domain.org/archive.zip")], +) +def test_streaming_dl_manager_download_and_extract_with_join(input_path, filename, expected_path): + dl_manager = StreamingDownloadManager() + extracted_path = dl_manager.download_and_extract(input_path) + output_path = xjoin(extracted_path, filename) + assert output_path == expected_path + + +@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS) +def test_streaming_dl_manager_extract_all_supported_single_file_compression_types( + compression_fs_class, gz_file, xz_file, zstd_file, bz2_file, lz4_file, text_file +): + input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file} + input_path = input_paths[compression_fs_class.protocol] + if input_path is None: + reason = f"for '{compression_fs_class.protocol}' compression protocol, " + if compression_fs_class.protocol == "lz4": + reason += require_lz4.kwargs["reason"] + elif compression_fs_class.protocol == "zstd": + reason += require_zstandard.kwargs["reason"] + pytest.skip(reason) + dl_manager = StreamingDownloadManager() + output_path = dl_manager.extract(input_path) + path = os.path.basename(input_path) + path = path[: path.rindex(".")] + assert output_path == f"{compression_fs_class.protocol}://{path}::{input_path}" + fsspec_open_file = xopen(output_path, encoding="utf-8") + with fsspec_open_file as f, open(text_file, encoding="utf-8") as expected_file: + assert f.read() == expected_file.read() + + +@pytest.mark.parametrize( + "urlpath, expected_protocol", + [ + ("zip://train-00000.json.gz::https://foo.bar/data.zip", "gzip"), + ("https://foo.bar/train.json.gz?dl=1", "gzip"), + ("http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/en-ku.txt.zip", "zip"), + ("https://github.com/user/what-time-is-it/blob/master/gutenberg_time_phrases.zip?raw=true", "zip"), + ("https://github.com/user/repo/blob/master/data/morph_train.tsv?raw=true", None), + ("https://repo.org/bitstream/handle/20.500.12185/346/annotated_corpus.zip?sequence=3&isAllowed=y", "zip"), + ("https://zenodo.org/record/2787612/files/SICK.zip?download=1", "zip"), + ], +) +def test_streaming_dl_manager_get_extraction_protocol(urlpath, expected_protocol): + assert _get_extraction_protocol(urlpath) == expected_protocol + + +@pytest.mark.parametrize( + "urlpath, expected_protocol", + [ + (TEST_GG_DRIVE_GZIPPED_URL, "gzip"), + (TEST_GG_DRIVE_ZIPPED_URL, "zip"), + ], +) +@slow # otherwise it spams Google Drive and the CI gets banned +def test_streaming_dl_manager_get_extraction_protocol_gg_drive(urlpath, expected_protocol): + assert _get_extraction_protocol(urlpath) == expected_protocol + + +@pytest.mark.parametrize( + "urlpath", + [ + "zip://train-00000.tar.gz::https://foo.bar/data.zip", + "https://foo.bar/train.tar.gz", + "https://foo.bar/train.tgz", + "https://foo.bar/train.tar", + ], +) +def test_streaming_dl_manager_extract_throws(urlpath): + with pytest.raises(NotImplementedError): + _ = StreamingDownloadManager().extract(urlpath) + + +@slow # otherwise it spams Google Drive and the CI gets banned +@pytest.mark.integration +def test_streaming_gg_drive(): + with xopen(TEST_GG_DRIVE_URL) as f: + assert f.read() == TEST_GG_DRIVE_CONTENT + + +@slow # otherwise it spams Google Drive and the CI gets banned +@pytest.mark.integration +def test_streaming_gg_drive_no_extract(): + urlpath = StreamingDownloadManager().download_and_extract(TEST_GG_DRIVE_URL) + with xopen(urlpath) as f: + assert f.read() == TEST_GG_DRIVE_CONTENT + + +@slow # otherwise it spams Google Drive and the CI gets banned +@pytest.mark.integration +def test_streaming_gg_drive_gzipped(): + urlpath = StreamingDownloadManager().download_and_extract(TEST_GG_DRIVE_GZIPPED_URL) + with xopen(urlpath) as f: + assert f.read() == TEST_GG_DRIVE_CONTENT + + +@slow # otherwise it spams Google Drive and the CI gets banned +@pytest.mark.integration +def test_streaming_gg_drive_zipped(): + urlpath = StreamingDownloadManager().download_and_extract(TEST_GG_DRIVE_ZIPPED_URL) + all_files = list(xglob(xjoin(urlpath, "*"))) + assert len(all_files) == 1 + assert xbasename(all_files[0]) == TEST_GG_DRIVE_FILENAME + with xopen(all_files[0]) as f: + assert f.read() == TEST_GG_DRIVE_CONTENT + + +def _test_jsonl(path, file): + assert path.endswith(".jsonl") + for num_items, line in enumerate(file, start=1): + item = json.loads(line.decode("utf-8")) + assert item.keys() == {"col_1", "col_2", "col_3"} + assert num_items == 4 + + +@pytest.mark.parametrize("archive_jsonl", ["tar_jsonl_path", "zip_jsonl_path"]) +def test_iter_archive_path(archive_jsonl, request): + archive_jsonl_path = request.getfixturevalue(archive_jsonl) + dl_manager = StreamingDownloadManager() + archive_iterable = dl_manager.iter_archive(archive_jsonl_path) + num_jsonl = 0 + for num_jsonl, (path, file) in enumerate(archive_iterable, start=1): + _test_jsonl(path, file) + assert num_jsonl == 2 + # do it twice to make sure it's reset correctly + num_jsonl = 0 + for num_jsonl, (path, file) in enumerate(archive_iterable, start=1): + _test_jsonl(path, file) + assert num_jsonl == 2 + + +@pytest.mark.parametrize("archive_nested_jsonl", ["tar_nested_jsonl_path", "zip_nested_jsonl_path"]) +def test_iter_archive_file(archive_nested_jsonl, request): + archive_nested_jsonl_path = request.getfixturevalue(archive_nested_jsonl) + dl_manager = StreamingDownloadManager() + files_iterable = dl_manager.iter_archive(archive_nested_jsonl_path) + num_tar, num_jsonl = 0, 0 + for num_tar, (path, file) in enumerate(files_iterable, start=1): + for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(file), start=1): + _test_jsonl(subpath, subfile) + assert num_tar == 1 + assert num_jsonl == 2 + # do it twice to make sure it's reset correctly + num_tar, num_jsonl = 0, 0 + for num_tar, (path, file) in enumerate(files_iterable, start=1): + for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(file), start=1): + _test_jsonl(subpath, subfile) + assert num_tar == 1 + assert num_jsonl == 2 + + +def test_iter_files(data_dir_with_hidden_files): + dl_manager = StreamingDownloadManager() + for num_file, file in enumerate(dl_manager.iter_files(data_dir_with_hidden_files), start=1): + assert os.path.basename(file) == ("test.txt" if num_file == 1 else "train.txt") + assert num_file == 2 + + +def test_xnumpy_load(tmp_path): + import numpy as np + + expected_x = np.arange(10) + npy_path = tmp_path / "data-x.npy" + np.save(npy_path, expected_x) + x = xnumpy_load(npy_path) + assert np.array_equal(x, expected_x) + + npz_path = tmp_path / "data.npz" + np.savez(npz_path, x=expected_x) + with xnumpy_load(npz_path) as f: + x = f["x"] + assert np.array_equal(x, expected_x) diff --git a/testbed/huggingface__datasets/tests/test_tqdm.py b/testbed/huggingface__datasets/tests/test_tqdm.py new file mode 100644 index 0000000000000000000000000000000000000000..e6ddb86de1d12714f11bf2d9b90a2e7ca4303f53 --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_tqdm.py @@ -0,0 +1,116 @@ +import unittest +from unittest.mock import patch + +import pytest +from pytest import CaptureFixture + +from datasets.utils import ( + are_progress_bars_disabled, + disable_progress_bars, + enable_progress_bars, + tqdm, +) + + +class TestTqdmUtils(unittest.TestCase): + @pytest.fixture(autouse=True) + def capsys(self, capsys: CaptureFixture) -> None: + """Workaround to make capsys work in unittest framework. + + Capsys is a convenient pytest fixture to capture stdout. + See https://waylonwalker.com/pytest-capsys/. + + Taken from https://github.com/pytest-dev/pytest/issues/2504#issuecomment-309475790. + """ + self.capsys = capsys + + def setUp(self) -> None: + """Get verbosity to set it back after the tests.""" + self._previous_are_progress_bars_disabled = are_progress_bars_disabled() + return super().setUp() + + def tearDown(self) -> None: + """Set back progress bars verbosity as before testing.""" + if self._previous_are_progress_bars_disabled: + disable_progress_bars() + else: + enable_progress_bars() + + @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) + def test_tqdm_helpers(self) -> None: + """Test helpers to enable/disable progress bars.""" + disable_progress_bars() + self.assertTrue(are_progress_bars_disabled()) + + enable_progress_bars() + self.assertFalse(are_progress_bars_disabled()) + + @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", True) + def test_cannot_enable_tqdm_when_env_variable_is_set(self) -> None: + """ + Test helpers cannot enable/disable progress bars when + `HF_DATASETS_DISABLE_PROGRESS_BARS` is set. + """ + disable_progress_bars() + self.assertTrue(are_progress_bars_disabled()) + + with self.assertWarns(UserWarning): + enable_progress_bars() + self.assertTrue(are_progress_bars_disabled()) # Still disabled ! + + @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", False) + def test_cannot_disable_tqdm_when_env_variable_is_set(self) -> None: + """ + Test helpers cannot enable/disable progress bars when + `HF_DATASETS_DISABLE_PROGRESS_BARS` is set. + """ + enable_progress_bars() + self.assertFalse(are_progress_bars_disabled()) + + with self.assertWarns(UserWarning): + disable_progress_bars() + self.assertFalse(are_progress_bars_disabled()) # Still enabled ! + + @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) + def test_tqdm_disabled(self) -> None: + """Test TQDM not outputting anything when globally disabled.""" + disable_progress_bars() + for _ in tqdm(range(10)): + pass + + captured = self.capsys.readouterr() + self.assertEqual(captured.out, "") + self.assertEqual(captured.err, "") + + @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) + def test_tqdm_disabled_cannot_be_forced(self) -> None: + """Test TQDM cannot be forced when globally disabled.""" + disable_progress_bars() + for _ in tqdm(range(10), disable=False): + pass + + captured = self.capsys.readouterr() + self.assertEqual(captured.out, "") + self.assertEqual(captured.err, "") + + @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) + def test_tqdm_can_be_disabled_when_globally_enabled(self) -> None: + """Test TQDM can still be locally disabled even when globally enabled.""" + enable_progress_bars() + for _ in tqdm(range(10), disable=True): + pass + + captured = self.capsys.readouterr() + self.assertEqual(captured.out, "") + self.assertEqual(captured.err, "") + + @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) + def test_tqdm_enabled(self) -> None: + """Test TQDM work normally when globally enabled.""" + enable_progress_bars() + for _ in tqdm(range(10)): + pass + + captured = self.capsys.readouterr() + self.assertEqual(captured.out, "") + self.assertIn("10/10", captured.err) # tqdm log diff --git a/testbed/huggingface__datasets/tests/test_version.py b/testbed/huggingface__datasets/tests/test_version.py new file mode 100644 index 0000000000000000000000000000000000000000..977be5b7add0cc08ccbe0dabb9a9e7a2d1201a7b --- /dev/null +++ b/testbed/huggingface__datasets/tests/test_version.py @@ -0,0 +1,23 @@ +import pytest + +from datasets.utils.version import Version + + +@pytest.mark.parametrize( + "other, expected_equality", + [ + (Version("1.0.0"), True), + ("1.0.0", True), + (Version("2.0.0"), False), + ("2.0.0", False), + ("1", False), + ("a", False), + (1, False), + (None, False), + ], +) +def test_version_equality_and_hash(other, expected_equality): + version = Version("1.0.0") + assert (version == other) is expected_equality + assert (version != other) is not expected_equality + assert (hash(version) == hash(other)) is expected_equality diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/3/api-v1-json-data-qualities-3.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/3/api-v1-json-data-qualities-3.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..24537ca9b1e5187b37136b19898ab370dec315d7 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/3/api-v1-json-data-qualities-3.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09ef19cfad25c5de487ddbaef3c4d068ca3063777730a288dfd6f5096a0c6f46 +size 1407 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-list-data_name-emotions-limit-2-data_version-3.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-list-data_name-emotions-limit-2-data_version-3.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..ed4efacefc3e856c3ad56407f2d195c65c61e4bb --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-list-data_name-emotions-limit-2-data_version-3.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19e6b2a2a8fec5403c146642a4dc2e077d66a3a1ac87e8239bd1dd31c928ab9c +size 315 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-qualities-40589.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-qualities-40589.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..49d394f0c458c02f7d9781445ef870cf8f747e0e --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/api-v1-json-data-qualities-40589.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0f7973193eb35d19e99d1d8bca3c7f3a8b8d0410508af34ad571aee8ec5ab05 +size 913 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/data-v1-download-4644182.arff.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/data-v1-download-4644182.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..3c0efffc333c6ef0622ed3d3e3c95d3e319fc05f --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40589/data-v1-download-4644182.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c4226550827ceff3509c67179c473e14385cee206536362e57c5e0dfc7751bf +size 4344 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-40675.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-40675.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..b376ef7c9d32dd344e0fff0be5a30ae1e6dda779 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-40675.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a787772d60fbfcc21a0e96fd81906f03542e0b942d19dcc95dae47498953a4fd +size 323 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-features-40675.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-features-40675.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..d74f6d6f085d991634610476015839faf034ff2d --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-features-40675.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d48d9679789d6baf7d0d3c346e3576d7589b663c3640942f9c1dba76e355faaa +size 307 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-list-data_name-glass2-limit-2-data_version-1-status-deactivated.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-list-data_name-glass2-limit-2-data_version-1-status-deactivated.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..336782317369c6fdf4d987c6fd3fdee3309a50e1 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-list-data_name-glass2-limit-2-data_version-1-status-deactivated.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21ed1ecc5d874956e951a9361f251afb2165adda92798c89ca5e2f97ae80dd8f +size 317 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-list-data_name-glass2-limit-2-data_version-1.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-list-data_name-glass2-limit-2-data_version-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..577840cd46f47e22c75975d855fe21c9b997ee22 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-list-data_name-glass2-limit-2-data_version-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad0a4a5477605380f8819ce840dbb928a3d084267c512f6cb50d5be2f7c76bc2 +size 85 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-list-data_name-glass2-limit-2-status-active-.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-list-data_name-glass2-limit-2-status-active-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..de6ccfccc5f28d446f34b7ffd7fcf83688cb00cf --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-list-data_name-glass2-limit-2-status-active-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:141ba630e039ea44bbaef92a288e2d964fc3aa2ef805a9723b4aac738a26a627 +size 88 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-qualities-40675.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-qualities-40675.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..ed6cf27efa78d576427119132581c3a3fb3b76b3 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/api-v1-json-data-qualities-40675.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88fcdc3a6fed5697f36dc262f69bfffb814767ce336ff28a21def3aac937b08c +size 886 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/data-v1-download-4965250.arff.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/data-v1-download-4965250.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..d1d26798a46116abdc22f357615f381a19bccf99 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40675/data-v1-download-4965250.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:543d0887312f43d9f65a7e1d08be78a2436369f632d7382b4134cebb525a48a3 +size 3000 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40945/api-v1-json-data-40945.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40945/api-v1-json-data-40945.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..06446ec67eeede9b6d48f044d8ae402fe11bb90e --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40945/api-v1-json-data-40945.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02882c6b02c4e068ef2b16f37f33ae3d5e9dd17ca29d01662c6924e16427eb5d +size 437 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40945/api-v1-json-data-qualities-40945.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40945/api-v1-json-data-qualities-40945.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..adb1b0a58ae958ab00a906b0287f416a4ab48ace --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40945/api-v1-json-data-qualities-40945.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c7e5a46554ab6a8121832dc0cd9f7a60f5034cef1a5a7d61346bbd912516b54 +size 1042 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40945/data-v1-download-16826755.arff.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40945/data-v1-download-16826755.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..5d8d5ae4fd5692b26e281928f6a1baad008f2008 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40945/data-v1-download-16826755.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:516e961f519876e5f89b339a0364a08dd64160ac3a4d76d5ec62955bfd6d6ce5 +size 32243 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-40966.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-40966.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..2b93281d0ded598bd03b1160a1b8a86df61b485c --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-40966.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36c63c3ac8c9db59910acbf4c772cd53040ccd0eac0b0452611dd7ad8da50474 +size 1660 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-features-40966.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-features-40966.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..138ffc718b067282922ebeb107b22b8c3af08477 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-features-40966.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8adac8e2f8cbcbfa9677acdd4927a961430465d2c99401832160be455cfaced8 +size 3690 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-list-data_name-miceprotein-limit-2-data_version-4.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-list-data_name-miceprotein-limit-2-data_version-4.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..7e6491106a294f38733d8dfd6475c1afe42b8848 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-list-data_name-miceprotein-limit-2-data_version-4.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0c203b4627175cebbf527d81917a499911af915f6f2f46ee7248428a948d603 +size 325 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-qualities-40966.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-qualities-40966.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..1d119ce6ec907e4689015911b16bcbfe8552b4e8 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/api-v1-json-data-qualities-40966.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dee83987fffa8ec20e23b3cabc00d42beb7a469af6bd803909998c1687fa634 +size 934 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/data-v1-download-17928620.arff.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/data-v1-download-17928620.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..c82d051bccb1b232214b31c73114f4f78749d810 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/40966/data-v1-download-17928620.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c5fd93ffec7deb63a940fd698534dd7ebb7db349fc183930041cbf17e60e2cc +size 6471 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/42585/api-v1-json-data-42585.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/42585/api-v1-json-data-42585.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..2d2b568ee4692ad61c20ab051723efceb318b816 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/42585/api-v1-json-data-42585.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ccbf138e0663895f9cf511136bc6395c153f6238af2eacb6a367e86e15d1a71 +size 1492 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/42585/api-v1-json-data-features-42585.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/42585/api-v1-json-data-features-42585.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..9564b2e437ee328b195f6289af99be51032c64d0 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/42585/api-v1-json-data-features-42585.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0985045a454c8186b4e690ebefb6cea1ef7c13292c98d50abda470a0ff3ad425 +size 312 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/42585/api-v1-json-data-qualities-42585.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/42585/api-v1-json-data-qualities-42585.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..a54754b666113a517f58ff509416f461e92636e4 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/42585/api-v1-json-data-qualities-42585.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3736e7feb7ad30c68675c2c4e48a9fb262e80308c9083b100ddd0339da1fc282 +size 348 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/42585/data-v1-download-21854866.arff.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/42585/data-v1-download-21854866.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..2ad2b8b4fd397ee8d61b44fb77b26076f643335d --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/42585/data-v1-download-21854866.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8d00c6690576a9ec39e1cb77054e13296be0fdebab0fb35a64a0e8627b6e6f3 +size 4519 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-561.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-561.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..92ba4281fe86b5273792d24afaabb04eef03199d --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-561.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1d38fdd601b67bb9c6d16152f53ddf166a0cfcfef4fa86438e899bfe449226c +size 1798 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-features-561.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-features-561.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..b2fce3413fd38f4c4f80ef7d6b198b4ac740a90a --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-features-561.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:407424fb79cc30b8e9ff90900b3bf29244ac7f3797f278b5be602843f959b4ee +size 425 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-list-data_name-cpu-limit-2-data_version-1.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-list-data_name-cpu-limit-2-data_version-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..52ae92392967d187709107d1c1bc9709c085b519 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-list-data_name-cpu-limit-2-data_version-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0703b0ae20b9ff75087dc601640ee58f1c2ad6768858ea21a245151da9ba8e4c +size 301 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-list-data_name-cpu-limit-2-status-active-.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-list-data_name-cpu-limit-2-status-active-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..6bde2de0c6047726f26476a514d27a0d03c7d4b5 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-list-data_name-cpu-limit-2-status-active-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70d4596ad879547863109da8675c2b789d07df66b526d7ebcbce9616c4c9b94c +size 347 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-qualities-561.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-qualities-561.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..911f6823bb1bf0d9de5120e23e902e9a0a39a2bc --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/api-v1-json-data-qualities-561.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8743b2d93d2c62a82fb47e1fbc002b97e25adcfb5bf1fcb26b58ad0bed15bd48 +size 1074 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/data-v1-download-52739.arff.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/data-v1-download-52739.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..225208c948bd5270b3911828bead9d2fd3af3fbb --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/561/data-v1-download-52739.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e96142b5e00dfec2617b0c22d7192b340ae2c28ec3ffc3a894c5be746b970a59 +size 3303 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-61.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-61.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..a7ff82cef2a309d55bcae99900bdd51b6bbc675e --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-61.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5c7e79aa41ef580838fb9fc1906280f076c47be1741fddd5004ddb500eb57fe +size 898 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-features-61.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-features-61.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..466d7fab3f54e053ae4abc1044c671ac525accc0 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-features-61.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33cbd6ae945ba04969370ab35604e9363c87256393493382b5118a89d59386d6 +size 268 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-list-data_name-iris-limit-2-data_version-1.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-list-data_name-iris-limit-2-data_version-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..76bb2da49d2e31a888153004b5177dc2a0c2f46c --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-list-data_name-iris-limit-2-data_version-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bce20aae7fd903796d96d5b3a3677b7058fbc5f3fe0996ee9d491e4ee23d132 +size 293 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-list-data_name-iris-limit-2-status-active-.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-list-data_name-iris-limit-2-status-active-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..c628aa1d9076067123d34c4c392a3a215dae524b --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-list-data_name-iris-limit-2-status-active-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9f4b9317997df63ed8d2bb073a3906344c0e0be017fd384eaec36ced8b94bae +size 330 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-qualities-61.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-qualities-61.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..285c038aae89afa2eb0c334cdf28a9d0f6e2cb32 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/api-v1-json-data-qualities-61.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:424cd47c12a51c7bb8d8169fac80fb5601f152bd78468b241d4b115bf7d22f20 +size 1121 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/data-v1-download-61.arff.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/data-v1-download-61.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..25bb1bc7760d28c156677d8d257421b3805299c1 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/61/data-v1-download-61.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afe4736924606638984e573235191025d419c545d31dc8874c96b72f5ec5db73 +size 2342 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/62/api-v1-json-data-62.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/62/api-v1-json-data-62.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..da14f86aac08c072962c2eecf6fe18cf319c5718 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/62/api-v1-json-data-62.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ef3551ad47d48023c5a1f1cf077047a9a4b95544bb91d4a86097f8b574f8d07 +size 656 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/62/api-v1-json-data-features-62.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/62/api-v1-json-data-features-62.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..22da3f227189e339ed4d2b3861866ded65d999a6 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/62/api-v1-json-data-features-62.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:489b177126cb7f335cb220709233b946d3a0ad71d38bba6d48b79187146e585a +size 817 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/62/api-v1-json-data-qualities-62.json.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/62/api-v1-json-data-qualities-62.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..c8c1985e0bf13abce1abad45a5d872ccbcd44478 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/62/api-v1-json-data-qualities-62.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:278a52a52d569f07d14c6a7877b104762c77daac429fb1fd9817a0378d6ec634 +size 805 diff --git a/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/62/data-v1-download-52352.arff.gz b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/62/data-v1-download-52352.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..b3ce4b7991c2223af6097adb8d1f553088d1ece0 --- /dev/null +++ b/testbed/scikit-learn__scikit-learn/sklearn/datasets/tests/data/openml/62/data-v1-download-52352.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb5830c82112f62a400c82ac1f1b5eb61c29c0a7cc72ba56d2aeff0fae8a60f9 +size 1625 diff --git a/testbed/scrapy__scrapy/artwork/qlassik.zip b/testbed/scrapy__scrapy/artwork/qlassik.zip new file mode 100644 index 0000000000000000000000000000000000000000..88994ab56335b7e5c853c180d6f0e9d165caa7e1 --- /dev/null +++ b/testbed/scrapy__scrapy/artwork/qlassik.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c3595ba6d6c96562ac1a1ff82ce9e37a163117d20c8d10c0b0daa73e1dc6c97 +size 120204 diff --git a/testbed/scrapy__scrapy/artwork/scrapy-logo.jpg b/testbed/scrapy__scrapy/artwork/scrapy-logo.jpg new file mode 100644 index 0000000000000000000000000000000000000000..19b8fadc54f0f344c9865af0f9176707cc6752d3 --- /dev/null +++ b/testbed/scrapy__scrapy/artwork/scrapy-logo.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80cc684fae26de76fa69615e952f52b2956d2068083eaf5dfd9d06c73c30c33e +size 23398 diff --git a/testbed/scrapy__scrapy/docs/topics/_images/inspector_01.png b/testbed/scrapy__scrapy/docs/topics/_images/inspector_01.png new file mode 100644 index 0000000000000000000000000000000000000000..1b39e3d51b806db219535ec4b7c68d0cc78c8d41 --- /dev/null +++ b/testbed/scrapy__scrapy/docs/topics/_images/inspector_01.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3017f80ca066ab5c5715da2f19c89a5c1fdd38bf133a6205078c08d9a850d27e +size 53922 diff --git a/testbed/scrapy__scrapy/docs/topics/_images/network_01.png b/testbed/scrapy__scrapy/docs/topics/_images/network_01.png new file mode 100644 index 0000000000000000000000000000000000000000..0c622f5f5808090387ccd0e7d9647517d925f75b --- /dev/null +++ b/testbed/scrapy__scrapy/docs/topics/_images/network_01.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:257ffb5ce0a15c875fd6b54bc91f9c2554eae0b3b82b0d7faf43dc29368dd399 +size 10720 diff --git a/testbed/scrapy__scrapy/docs/topics/_images/network_02.png b/testbed/scrapy__scrapy/docs/topics/_images/network_02.png new file mode 100644 index 0000000000000000000000000000000000000000..9df1822f99d7510d307490048a72148131adbeb9 --- /dev/null +++ b/testbed/scrapy__scrapy/docs/topics/_images/network_02.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a1eadc8e8ffcecace896cd03f984e95b5ac1b5c93b19d10c3fe9a3808c208a6 +size 82702 diff --git a/testbed/scrapy__scrapy/docs/topics/_images/network_03.png b/testbed/scrapy__scrapy/docs/topics/_images/network_03.png new file mode 100644 index 0000000000000000000000000000000000000000..e943a6357b8b469c4304d312e94edf65ab4832d0 --- /dev/null +++ b/testbed/scrapy__scrapy/docs/topics/_images/network_03.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce7eb6c16e47b5d685dcffe95ce2979cadc92ecbe18bd8a1e720223dbf200af9 +size 45506 diff --git a/testbed/scrapy__scrapy/docs/topics/_images/scrapy_architecture.png b/testbed/scrapy__scrapy/docs/topics/_images/scrapy_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..eaf6054dc7a4bbdf2b0dfcb357ea440619e5c8c8 --- /dev/null +++ b/testbed/scrapy__scrapy/docs/topics/_images/scrapy_architecture.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cce134beef0ecd888b3dc3e5d9fd3f3bf0b89cb895aded4852fb267891beacb4 +size 92558 diff --git a/testbed/scrapy__scrapy/docs/topics/_images/scrapy_architecture_02.png b/testbed/scrapy__scrapy/docs/topics/_images/scrapy_architecture_02.png new file mode 100644 index 0000000000000000000000000000000000000000..2ed91588973174a5b54da6fecb4794b29b39ad5c --- /dev/null +++ b/testbed/scrapy__scrapy/docs/topics/_images/scrapy_architecture_02.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baff04b0ee7235ce4e63b7f1a66d7203c69b88a2eb8aa0a170b3460012a60b09 +size 53978 diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/feed-sample1.tar b/testbed/scrapy__scrapy/tests/sample_data/compressed/feed-sample1.tar new file mode 100644 index 0000000000000000000000000000000000000000..b076118a9421a47611df1b94ddd8997adc892905 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/feed-sample1.tar differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/feed-sample1.xml.bz2 b/testbed/scrapy__scrapy/tests/sample_data/compressed/feed-sample1.xml.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..d252d428bb5c7853fa2c01e09d2d773c158e95d2 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/feed-sample1.xml.bz2 differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/feed-sample1.xml.gz b/testbed/scrapy__scrapy/tests/sample_data/compressed/feed-sample1.xml.gz new file mode 100644 index 0000000000000000000000000000000000000000..8ad2edc61d6eb2428363ef3a2b8969c962798a88 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/feed-sample1.xml.gz differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/feed-sample1.zip b/testbed/scrapy__scrapy/tests/sample_data/compressed/feed-sample1.zip new file mode 100644 index 0000000000000000000000000000000000000000..3517def7e534710cc3b7b5a9bab677b87d3c0466 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/feed-sample1.zip differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/html-br.bin b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-br.bin new file mode 100644 index 0000000000000000000000000000000000000000..a1df8ae43303b90895c37c076efbd8f93c8a8002 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-br.bin differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/html-gzip.bin b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-gzip.bin new file mode 100644 index 0000000000000000000000000000000000000000..36fd796e6d6f4ef0ff1b50d7f15b1f536a74f486 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-gzip.bin differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/html-rawdeflate.bin b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-rawdeflate.bin new file mode 100644 index 0000000000000000000000000000000000000000..12852be09bf343a1357f662e529b2371060687a7 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-rawdeflate.bin differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/html-zlibdeflate.bin b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-zlibdeflate.bin new file mode 100644 index 0000000000000000000000000000000000000000..2c1841dfc39130e6ca3782e70dcdfd4cbb191d2b Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-zlibdeflate.bin differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/html-zstd-static-content-size.bin b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-zstd-static-content-size.bin new file mode 100644 index 0000000000000000000000000000000000000000..ceda1dad86f768917a4da1d48b3dea0c570d1a23 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-zstd-static-content-size.bin differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/html-zstd-static-no-content-size.bin b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-zstd-static-no-content-size.bin new file mode 100644 index 0000000000000000000000000000000000000000..66298f8bc4e0c1bb2629828dbdcba47165ef5115 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-zstd-static-no-content-size.bin differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/html-zstd-streaming-no-content-size.bin b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-zstd-streaming-no-content-size.bin new file mode 100644 index 0000000000000000000000000000000000000000..2670078f9723162caa35ec5d65407c5731195166 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/html-zstd-streaming-no-content-size.bin differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/truncated-crc-error-short.gz b/testbed/scrapy__scrapy/tests/sample_data/compressed/truncated-crc-error-short.gz new file mode 100644 index 0000000000000000000000000000000000000000..8ff6385c530f5f59914d1405c5f7f32d677c4865 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/truncated-crc-error-short.gz differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/truncated-crc-error.gz b/testbed/scrapy__scrapy/tests/sample_data/compressed/truncated-crc-error.gz new file mode 100644 index 0000000000000000000000000000000000000000..47c266852ee23f0fbc6bdb8746c5460d48fd9257 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/truncated-crc-error.gz differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/compressed/unexpected-eof.gz b/testbed/scrapy__scrapy/tests/sample_data/compressed/unexpected-eof.gz new file mode 100644 index 0000000000000000000000000000000000000000..7a4cb85a24cef60901ad78370b7f3624b83026cd Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/compressed/unexpected-eof.gz differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/test_site/files/images/python-logo-master-v3-TM-flattened.png b/testbed/scrapy__scrapy/tests/sample_data/test_site/files/images/python-logo-master-v3-TM-flattened.png new file mode 100644 index 0000000000000000000000000000000000000000..beab4f4f8d43168b807e1bcd2548e3d9031e6d45 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/test_site/files/images/python-logo-master-v3-TM-flattened.png differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/test_site/files/images/python-powered-h-50x65.png b/testbed/scrapy__scrapy/tests/sample_data/test_site/files/images/python-powered-h-50x65.png new file mode 100644 index 0000000000000000000000000000000000000000..3cb13713d717db96fa98e61d4ffaa44f9fd50422 Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/test_site/files/images/python-powered-h-50x65.png differ diff --git a/testbed/scrapy__scrapy/tests/sample_data/test_site/files/images/scrapy.png b/testbed/scrapy__scrapy/tests/sample_data/test_site/files/images/scrapy.png new file mode 100644 index 0000000000000000000000000000000000000000..94f1c0af10ff3dd64ac775d6331af078add7643f Binary files /dev/null and b/testbed/scrapy__scrapy/tests/sample_data/test_site/files/images/scrapy.png differ diff --git a/testbed/scverse__scanpy/docs/_static/img/Scanpy_Logo_RGB.png b/testbed/scverse__scanpy/docs/_static/img/Scanpy_Logo_RGB.png new file mode 100644 index 0000000000000000000000000000000000000000..82364c22bb402b98db8e3f071903bd2c7965777b --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/Scanpy_Logo_RGB.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c17218c638180f52314d85c62df6ed6cd7b091be7c05f041055c2e90dcb07a96 +size 63218 diff --git a/testbed/scverse__scanpy/docs/_static/img/ci_plot-view_attachment-tab.png b/testbed/scverse__scanpy/docs/_static/img/ci_plot-view_attachment-tab.png new file mode 100644 index 0000000000000000000000000000000000000000..2cd5da52637d4aaa95c49f4b851661517adc3888 --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/ci_plot-view_attachment-tab.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:551a9a8996b734b5cc22a8425eaae83a7d567c9a7e6cac757fa895fd5fc39b66 +size 219512 diff --git a/testbed/scverse__scanpy/docs/_static/img/ci_plot-view_select-test.png b/testbed/scverse__scanpy/docs/_static/img/ci_plot-view_select-test.png new file mode 100644 index 0000000000000000000000000000000000000000..de91602a65130188aabe99124643b44812a94247 --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/ci_plot-view_select-test.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd782b649e892a58bc4b5d5b16a67c6e4ceddf3f53aa6c964a3dbf3583c16620 +size 100919 diff --git a/testbed/scverse__scanpy/docs/_static/img/ci_plot-view_tests-tab.png b/testbed/scverse__scanpy/docs/_static/img/ci_plot-view_tests-tab.png new file mode 100644 index 0000000000000000000000000000000000000000..5d064adbd95e0e59353e59403a0070af6d1e2144 --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/ci_plot-view_tests-tab.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c1488d98a464e0e03a2f633aefa0169900abaf42dfc4cc9cc7b896ead045951 +size 241201 diff --git a/testbed/scverse__scanpy/docs/_static/img/spatial-basic-analysis.png b/testbed/scverse__scanpy/docs/_static/img/spatial-basic-analysis.png new file mode 100644 index 0000000000000000000000000000000000000000..35c8f69e178f34cfbe24a42d83ff6ece18e1c284 --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/spatial-basic-analysis.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8de1b35c8113e356b05bed7d792c46ce3515a687811c808ebf2cad0a4ca3007f +size 540395 diff --git a/testbed/scverse__scanpy/docs/_static/img/stacked_violin_dotplot_matrixplot.png b/testbed/scverse__scanpy/docs/_static/img/stacked_violin_dotplot_matrixplot.png new file mode 100644 index 0000000000000000000000000000000000000000..d38dd59a31ce7d869c8591f7d7c7bc39c47b9245 --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/stacked_violin_dotplot_matrixplot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bba6edd7bd021e448e4bd2fce023b9afd2f4bea562bf7c38f0afc53d309da0e +size 61708 diff --git a/testbed/scverse__scanpy/docs/_static/img/tutorials/170430_krumsiek11/timeseries.png b/testbed/scverse__scanpy/docs/_static/img/tutorials/170430_krumsiek11/timeseries.png new file mode 100644 index 0000000000000000000000000000000000000000..17f8f81109102d49a49c3c7e825d7d46e70607da --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/tutorials/170430_krumsiek11/timeseries.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db14dd09f93392465c1bb1c442b4f2e5af0d7d1a5840883036b105f68ff89d90 +size 198469 diff --git a/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/NKG7.png b/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/NKG7.png new file mode 100644 index 0000000000000000000000000000000000000000..3afa5757830ad16a640e74c1ba0b5572668ef017 --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/NKG7.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c38f56ec244cc504361f211e81b1f4de8c670e474836d195c2e581ecfb90309 +size 53008 diff --git a/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/cell_types.png b/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/cell_types.png new file mode 100644 index 0000000000000000000000000000000000000000..94eff25edf4022077308ed608abbd077cc8280b4 --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/cell_types.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d350beabbc72cd8d39a95d1412cacf6750eb8e802290130db0426b23c0e5cb35 +size 216130 diff --git a/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/filter_genes_dispersion.png b/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/filter_genes_dispersion.png new file mode 100644 index 0000000000000000000000000000000000000000..8dc9fbef7684456f4c3b828f020f944487761a81 --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/filter_genes_dispersion.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6aeb34364f4c6594b5c678e720159575cad9f9603270bc94a4d91c1b11a4a962 +size 59760 diff --git a/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/louvain.png b/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/louvain.png new file mode 100644 index 0000000000000000000000000000000000000000..0629cc4c3bea6b5a8a262b73764be784d42b51be --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/louvain.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca5ef8f823de4bc217d9832671659ade83eeb6ac15c4cd5881b11d7eac7b06da +size 108431 diff --git a/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/violin.png b/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/violin.png new file mode 100644 index 0000000000000000000000000000000000000000..199bb32903126e4d747b4b73cc7abe86e66c6571 --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/tutorials/170505_seurat/violin.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:868b6cca151bc877c52196c3f5dc22e392ba31f0b4a0151ec94c9491c555cf43 +size 84683 diff --git a/testbed/scverse__scanpy/docs/_static/img/tutorials/170522_visualizing_one_million_cells/tsne_1.3M.png b/testbed/scverse__scanpy/docs/_static/img/tutorials/170522_visualizing_one_million_cells/tsne_1.3M.png new file mode 100644 index 0000000000000000000000000000000000000000..50da8764571f259c468ba300d4a55b7dc7670c97 --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/tutorials/170522_visualizing_one_million_cells/tsne_1.3M.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:751ff8c16a6a8b5e9112e644c0d81e981121892c8ca6949e0705f8115f873c7c +size 207532 diff --git a/testbed/scverse__scanpy/docs/_static/img/tutorials/paga_paul15.png b/testbed/scverse__scanpy/docs/_static/img/tutorials/paga_paul15.png new file mode 100644 index 0000000000000000000000000000000000000000..19e8ae6ba55d2daface5e6bea35b2a34d1051682 --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/tutorials/paga_paul15.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9630046bdabaeba97d17e92d1346b5e6a93865db3e5bbd7e1607232866d9d4ed +size 175602 diff --git a/testbed/scverse__scanpy/docs/_static/img/tutorials/paga_planaria.png b/testbed/scverse__scanpy/docs/_static/img/tutorials/paga_planaria.png new file mode 100644 index 0000000000000000000000000000000000000000..ed9df3d2bb62c17afcb8a2c4be41a29c78398495 --- /dev/null +++ b/testbed/scverse__scanpy/docs/_static/img/tutorials/paga_planaria.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:208132d5565f539494829d3a74207d49a187efabc05ad579519538e2eebb18f0 +size 330000 diff --git a/testbed/scverse__scanpy/scanpy/datasets/10x_pbmc68k_reduced.h5ad b/testbed/scverse__scanpy/scanpy/datasets/10x_pbmc68k_reduced.h5ad new file mode 100644 index 0000000000000000000000000000000000000000..0a9ccaa2c515771de8dfa0e1b489f137fd4df1a8 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/datasets/10x_pbmc68k_reduced.h5ad @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e71d41e737c941559b7c57c9243bdb3d2c889c2adfdf00e3422ac6b46783676f +size 1772368 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/0.0 b/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/0.0 new file mode 100644 index 0000000000000000000000000000000000000000..95e594898e8b87d25936b042a6723a7a4243213c --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8edf7cb4a39cde7fc478dcdb6423b143e378d5357a20f39964699e3aebc545fb +size 917850 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/1.0 b/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/1.0 new file mode 100644 index 0000000000000000000000000000000000000000..8340df38c9def156295cfc7a3217f70aa8f7bfa3 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/1.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d2d388e50983005f127e2cf6fe79ebc5e1f29e8e05091e35a50f9ce827251f2 +size 918726 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/2.0 b/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/2.0 new file mode 100644 index 0000000000000000000000000000000000000000..302a5b807f298d8ea73858aa0bad9a7751675567 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/2.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a9690644fb3dab99bc3c1ca1182db99949331f18356445041eeacdf5abd8ebb +size 922150 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/3.0 b/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/3.0 new file mode 100644 index 0000000000000000000000000000000000000000..233d88e34b4429b905b14a7e5533bdd4e6eeff77 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/3.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24d33379801776b47df6ff3dcbfceccecf7814b29955dc7e6ce241201731d5b6 +size 928651 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/4.0 b/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/4.0 new file mode 100644 index 0000000000000000000000000000000000000000..b64401ba15b2c35b94ac41202b48532151dca997 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/10x-10k-subset.zarr/X/4.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ac2e56bb103cb83cc553ccbbe9d23abf39fcbe15c1fb32d309bf6fbcbe42409 +size 913135 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/1.2.0/filtered_gene_bc_matrices_h5.h5 b/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/1.2.0/filtered_gene_bc_matrices_h5.h5 new file mode 100644 index 0000000000000000000000000000000000000000..50f4904d6ed077e2eef67beb914757ff560cf124 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/1.2.0/filtered_gene_bc_matrices_h5.h5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21578c04234b1686b4d5e48d69439fcc57710fce8ed0c6c88063708ad71845e9 +size 26745 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/1.2.0/multiple_genomes.h5 b/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/1.2.0/multiple_genomes.h5 new file mode 100644 index 0000000000000000000000000000000000000000..f52891d525fe4693722da157d5dac4f5b4327481 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/1.2.0/multiple_genomes.h5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c760c89995dcb55be219e689353f42d36593c68082d0a2277eed7879261ae45 +size 54138 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/3.0.0/filtered_feature_bc_matrix.h5 b/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/3.0.0/filtered_feature_bc_matrix.h5 new file mode 100644 index 0000000000000000000000000000000000000000..08c57fabfcd6d6a31d2c16c245fb7490c01780da --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/3.0.0/filtered_feature_bc_matrix.h5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d55cc5f32ebb8b70746d546baa300448949f69efd4ee5022bbd9330af3cd4212 +size 100207 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/3.0.0/filtered_feature_bc_matrix/barcodes.tsv.gz b/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/3.0.0/filtered_feature_bc_matrix/barcodes.tsv.gz new file mode 100644 index 0000000000000000000000000000000000000000..db865d81c8cdbd73205ecaefe6cc204c4b018f6c --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/3.0.0/filtered_feature_bc_matrix/barcodes.tsv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3490ae2c4cbc0b8b4001cefa47e0d6efa23e59aa57e760aee593bd66605e90a +size 6423 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/3.0.0/filtered_feature_bc_matrix/features.tsv.gz b/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/3.0.0/filtered_feature_bc_matrix/features.tsv.gz new file mode 100644 index 0000000000000000000000000000000000000000..20e4d6a2c0b56a8b9f3285f2957ec8355d07aa11 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/3.0.0/filtered_feature_bc_matrix/features.tsv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5ee587a04a1744a2922018e4c92626489908ad48870433c2095184542482fcd +size 4774 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/3.0.0/filtered_feature_bc_matrix/matrix.mtx.gz b/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/3.0.0/filtered_feature_bc_matrix/matrix.mtx.gz new file mode 100644 index 0000000000000000000000000000000000000000..c1fe25d8f97cb54ad59dadc40050b30a77d466d6 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/10x_data/3.0.0/filtered_feature_bc_matrix/matrix.mtx.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d40c3b07c32089c3b310d0914670b9513407bfad18d6529f20046fe4bbd4eaf +size 73946 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/objs_t_test.pkl b/testbed/scverse__scanpy/scanpy/tests/_data/objs_t_test.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d022ceb35fe339e0255ccca05c04229bdf33b6f7 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/objs_t_test.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b06b17b9c8f6f05f5954f72d525a3636cc50faaa307cbe559a7f28d2c71fb758 +size 8815 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/objs_wilcoxon.pkl b/testbed/scverse__scanpy/scanpy/tests/_data/objs_wilcoxon.pkl new file mode 100644 index 0000000000000000000000000000000000000000..59c2f6bfbb8e49578d97491499c5a857706aa0a6 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/objs_wilcoxon.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96e2e0d48907e7054a689bacfd4f650471bbe2f010dccc6312cf55ee50b334fd +size 8815 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/score_genes_reference_paul2015.pkl b/testbed/scverse__scanpy/scanpy/tests/_data/score_genes_reference_paul2015.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f09cc47e05007ee9d70af3e6117ee236bb760983 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/score_genes_reference_paul2015.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ebeccb62ee580ad8d21129ed493398e79cd5a273405dbae857abd00d2ddc1ed +size 21991 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/visium_data/1.0.0/filtered_feature_bc_matrix.h5 b/testbed/scverse__scanpy/scanpy/tests/_data/visium_data/1.0.0/filtered_feature_bc_matrix.h5 new file mode 100644 index 0000000000000000000000000000000000000000..c9ba78e6711ac31b3d68722d2a3b5f4a56095d1d --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/visium_data/1.0.0/filtered_feature_bc_matrix.h5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b6cb386df8327ea7aec1d3ea87c73bac766dbf9b03003d70154c024c61f855a +size 35962 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/visium_data/1.0.0/spatial/tissue_hires_image.png b/testbed/scverse__scanpy/scanpy/tests/_data/visium_data/1.0.0/spatial/tissue_hires_image.png new file mode 100644 index 0000000000000000000000000000000000000000..83e569eeb4f883adad6fd320ea251ac5c46aa40a --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/visium_data/1.0.0/spatial/tissue_hires_image.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2647a9d1ac01e4269b669cc183dd0b4d0ea5c69c2de97b6c02d19d53002cc01e +size 20014 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/visium_data/1.0.0/spatial/tissue_lowres_image.png b/testbed/scverse__scanpy/scanpy/tests/_data/visium_data/1.0.0/spatial/tissue_lowres_image.png new file mode 100644 index 0000000000000000000000000000000000000000..1da88c5e2f3c4203b70282737f8259e18c8feba7 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/visium_data/1.0.0/spatial/tissue_lowres_image.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7069e504911a86c16aac826194fd28a3a0883d05e7ccb84b72781635c676891d +size 518124 diff --git a/testbed/scverse__scanpy/scanpy/tests/_data/visium_data/2.1.0/raw_probe_bc_matrix.h5 b/testbed/scverse__scanpy/scanpy/tests/_data/visium_data/2.1.0/raw_probe_bc_matrix.h5 new file mode 100644 index 0000000000000000000000000000000000000000..7b346a127fd937b92b6cfeb4d7053dba7db586d3 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_data/visium_data/2.1.0/raw_probe_bc_matrix.h5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f03fcc014dfada6bc77f0ed0f212e3227d51368fa340d857b1d4569c14dd3b4a +size 127196 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/3dprojection/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/3dprojection/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..063f186f117491c0a931e77b03a42129b48033e4 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/3dprojection/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a67ce308c535dd337809bbec8c768555b94eb5bd7f8777aa0f8d1d496e0e07e +size 11732 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/binary_pca/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/binary_pca/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..5de22f7d7141fb3c6b5b0d8df79a2e49e753b3c5 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/binary_pca/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ffc0fcfb8dbea0a37b275c95b391650ec035ee2cc6cbee00ecf1830d0a2e8ba +size 3584 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/clustermap/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/clustermap/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..438f0955b8d951e021b4dc4574eaab3d28de06ac --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/clustermap/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55a8b3798e0e213c4385b949928695033fd207f5a52e02b945a1c3b373ee4755 +size 25439 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/clustermap_withcolor/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/clustermap_withcolor/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..efcd529822363ad405ffc474bb2fc1339dacdc99 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/clustermap_withcolor/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9546a3c1abe2d7bf23ba961e7d1b5edd6d4f68c65216e34a67129370f4c4ceba +size 25362 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/correlation/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/correlation/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..a4a46e283d9672fbd1800a2f83bd2fa8d1c39cc2 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/correlation/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:708dcefbe3499eab9cdee2fd26eebe30192f21274352cc5df39ff2c58e6f4209 +size 12292 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/dendrogram/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/dendrogram/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..ff29a6118ca7e165ed6733c5e3995eb654605bea --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/dendrogram/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e84d0a94fa30d5627c341397ddbd0b8ace94d320e46817fc78496586a04c3f9 +size 3417 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/dotplot/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..ae69ea17b4c2de1b00cae516a6966378f1e9fb27 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f56f118b3909d351014cccf1005e20fd5a3122efe16b6c61a2ba6b59a8985fb +size 13522 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/dotplot2/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot2/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..3af54d5771b14de50355cbb5f3ebddd71b10018d --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot2/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b72c6e5d2c8776c74cb2c3586bf5cc6a6253bbf776dd8060ae3242be23ab1454 +size 19477 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/dotplot3/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot3/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..71e28b159cc02a96268f0f004f5b9a0539554e8b --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot3/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9ef4a31ad5387e807c0db1f29ef86745149f589589981308007acdb3b7cb04d +size 14963 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_dict/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_dict/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..670809824a5f5c2ba2083ba4102c1affe90d0e5f --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_dict/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca62a6d9b05ca672965454e4137319ac92172c5784cc7bbb00998d3f0d188b74 +size 14613 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_gene_symbols/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_gene_symbols/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bb80681b0e4b3aced271f5e94fb4c4258dd31a86 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_gene_symbols/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f562080d14b3dd2a7658a70a364ab3096dcc32bab32274ec73e5f94e2025a2f1 +size 13135 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_groupby_index/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_groupby_index/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..5ec4c9183e4a1225b4bc4a4823a2d3985b4fde07 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_groupby_index/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d93b8f594af9f80c8c99787247c0144eab227d76e854922071f6bc2c0b675762 +size 16720 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_groupby_list_catorder/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_groupby_list_catorder/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..c875ac3f5b4e05e9a732d330eadfdfdb1c884743 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_groupby_list_catorder/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acfdb4ee848570dbff053db04d825a22b455d726223605e22130b16c2b4c2f54 +size 14663 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_std_scale_group/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_std_scale_group/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bd412296ce4589f65188f4aeb059063851e0bc87 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_std_scale_group/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2df20ac24ca9772ad894670454f746537e1c84a8e6ea4f5bee298a62662809dc +size 12161 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_std_scale_var/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_std_scale_var/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..e21ba36f3a56faf1f513b7900d4baf70bf731a3d --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/dotplot_std_scale_var/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d316e25aace95e67cebac9b56db5fc842c9a4984ad1d58a79c7d33e5dfb3982 +size 28361 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.off-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.off-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..7ce057fc32a036d7664ff6e34cfce0956b6ccfc8 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.off-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db4e90b4012ac1ce5084e3bbceaae1ac17be905ce128034b5ad1e82e8d47f153 +size 21451 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.off-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.off-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..8d98f69e7f686d7b8da80fa59c158d2de3f08a2e --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.off-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e856f249f8d60d558acc984a29d10085dea4a12eb2cf15de7b9ef6aa9d5de401 +size 22738 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.on_data-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.on_data-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..9b51c5c88e28eeb38973db73f86d106204729cfe --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.on_data-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c18ba0c9d28cc6b06665918239a130870e6c80e6a61d6e135ae8a20542d1f32 +size 21537 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.on_data-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.on_data-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..66742b1b0c61980b0c89450a6ad86e1114eb93a5 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.on_data-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10b1c3034339bd8c8c525ab0a829fb39a5c993a882f68851a97acf14dc7ac9f7 +size 23397 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.on_right-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.on_right-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..2e8e1f266d8a770b63a9155a1fbb53cbba20a5c7 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.on_right-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4698cce1df7930af858ddbd45aeb0c2731277eed213247832a115504915227cd +size 22087 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.on_right-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.on_right-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..3de275b502e4d45ceb743d2155573108275976eb --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.False-legend.on_right-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63aed2ca9f5c6f52053c4b5d9d3063750009a4c7319fa6d44c22f3ab6b5427df +size 25297 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.off-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.off-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..7ce057fc32a036d7664ff6e34cfce0956b6ccfc8 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.off-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db4e90b4012ac1ce5084e3bbceaae1ac17be905ce128034b5ad1e82e8d47f153 +size 21451 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.off-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.off-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..8d98f69e7f686d7b8da80fa59c158d2de3f08a2e --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.off-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e856f249f8d60d558acc984a29d10085dea4a12eb2cf15de7b9ef6aa9d5de401 +size 22738 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.on_data-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.on_data-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..21eb6119b78fb15335067988c0290ff6c6c356ca --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.on_data-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b4c73c9d718594a156af282c7ebabb4bbe99d3d8478d33f7acd0a973aaedbca +size 21692 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.on_data-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.on_data-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..214d67186bc6c5f5f22cef26b1f6e3424a574bc3 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.on_data-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69265346e0d16a708182261cac2802455737c1f27ade995de72bb263caf5f1fd +size 23553 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.on_right-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.on_right-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..aee67af489895f41b6594435cffa82f9c522eed9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.on_right-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee468ca4af78f3e7f10dc3ca9baf8fa1fa0dd03fcb8bf8ec633c5e5658a1b6a1 +size 22347 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.on_right-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.on_right-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..b734e4897d902b297a64d26d2c8a53167b16f5ee --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.black_tup-na_in_legend.True-legend.on_right-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4e2216240690ba55e64343423c1b9cf583620b4377759a36876cb52496e1cd7 +size 26113 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.off-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.off-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..65094b72807b6b7f8c7711dfed0dda772f6a6e09 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.off-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e12d81000ff91f60c01bbb52e26df1f6e9e1d4ed87da9aab200bc3342af8f635 +size 18224 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.off-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.off-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..78b881969a84319ff9ec37f28f116a205977f3bb --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.off-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9180db2592e3bd0b84fa9755e943fdbde3a144caafe9b075520a4310d33cca3c +size 21809 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.on_data-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.on_data-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..11544175048f371bfc546aaa2e37f68867d080ce --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.on_data-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72dbbbe88bae9e395f9ba63d08d840da595c1af1dd87d7a786608cfc8cca1881 +size 18433 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.on_data-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.on_data-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bcf4bd1a12cf1e8edfa36c0b7c071936381abcc9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.on_data-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b940ebf08992956c02b108e562084e6b334c490b79af91e9cb352546241fdfac +size 22536 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.on_right-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.on_right-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..8fbf63cbcd4e7ad0663ceacd79882f133ee20b2c --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.on_right-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:551588b877f266753493a731ee5097f61ba6391d140cbee3ec2df7c852584e38 +size 18860 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.on_right-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.on_right-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..6384918475f9f1c02a71b80e4aebc6bd61b8e7c7 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.False-legend.on_right-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51ec1892500dd2e6aac1678f38cd3d627d37f65e9769dc7fc665e8c830165869 +size 24424 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.off-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.off-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..65094b72807b6b7f8c7711dfed0dda772f6a6e09 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.off-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e12d81000ff91f60c01bbb52e26df1f6e9e1d4ed87da9aab200bc3342af8f635 +size 18224 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.off-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.off-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..78b881969a84319ff9ec37f28f116a205977f3bb --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.off-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9180db2592e3bd0b84fa9755e943fdbde3a144caafe9b075520a4310d33cca3c +size 21809 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.on_data-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.on_data-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..39077845b11100af7198b5127b6010617b47695b --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.on_data-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baee6f6a07c1c707b2cc9899671789b839e149bece1062e28b6010ccde96882d +size 18623 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.on_data-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.on_data-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..78dac3aa1f5928284cd158096d2bca9d96b7d134 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.on_data-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3777917825c825f0bec9b6f1acfcd029d68a4bc26dad592f9112b3059f5d084 +size 22696 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.on_right-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.on_right-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..017afdb8555565de5fd03804fece55244b4c6bfa --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.on_right-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd9e1bd0c837024b0d62f7edaa6c84b0ea67831b4f54d455831ee8025b7d992d +size 19128 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.on_right-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.on_right-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..edec4934004c193127d750a77687e770a7317e0b --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[pca-na_color.default-na_in_legend.True-legend.on_right-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fa4789075d8d8a018f7d133fc573a094bf233342f27ed44ed13a40ca6f9f335 +size 25074 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.off-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.off-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..2db80e728291e66074261c0ceb4956073585f920 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.off-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9274cdcd17745250897efcf6accbba1961a4568b2c52941210e02edb51d61d6 +size 37697 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.off-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.off-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..68af13a4c9423692b30729f73c21bb44bf4faacb --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.off-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4796ad2b0b373b0307fe249d4a3a330b3f65ff8e70586350a29ca6c0411c95ef +size 38267 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.on_data-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.on_data-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..84907bdd087bb1b4bb5ebee56f7ed6dbd78caf26 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.on_data-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9863d10439fabffb2fb1a2f2d61bbc399acc552a76a1ed14028874dc2318f35 +size 37877 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.on_data-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.on_data-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..d100ac068b7f0c037ce62a43f6023a5492759fcf --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.on_data-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3458d480b76a78330a0bb6fe27eb5749b4d45c9e562afc691b1a6367263586c2 +size 38916 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.on_right-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.on_right-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..9ea2bc396cdf58f4df04242171d8b320e5ed8167 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.on_right-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:139780ebd3ff1951a624e7e9be4a3e962158d9ed2d96a1937d440b757298b9d9 +size 38376 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.on_right-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.on_right-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..5661855b3b390d95c93071fe4d7b3aafa05b4abb --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.False-legend.on_right-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3afce28e81217b645745bffc7696dfd4795756eb5fba2119dd8d262b5acf690a +size 40796 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.off-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.off-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..2db80e728291e66074261c0ceb4956073585f920 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.off-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9274cdcd17745250897efcf6accbba1961a4568b2c52941210e02edb51d61d6 +size 37697 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.off-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.off-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..68af13a4c9423692b30729f73c21bb44bf4faacb --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.off-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4796ad2b0b373b0307fe249d4a3a330b3f65ff8e70586350a29ca6c0411c95ef +size 38267 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.on_data-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.on_data-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..0ee778ab721eabe5120475f0a9eaf95ea6b2991d --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.on_data-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f892640c4297278da4f0917992cf42c9e73ca994ca5591a62fa11a130e66033 +size 37964 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.on_data-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.on_data-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..233ab8764b88f7f337c9e8ded59c796e293ba7aa --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.on_data-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb025828b56b07c14e4b35b5a0692f3bfd1c89cdc84fb32a5d1c8765acd13a9d +size 39007 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.on_right-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.on_right-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..dc326325acc70b78aa8d157ff3099b4597116e3f --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.on_right-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61053586eb7daa747ad5146f5c578d33c4ba0d44c315716eda50eeb0224c8919 +size 38573 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.on_right-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.on_right-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..16ce38fd5a796f616fe1d9f1d9e6d82362d8653b --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.black_tup-na_in_legend.True-legend.on_right-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30051ded31a6a5ff8603338cf9f591d144b4c007a7622927e008dde625d0d574 +size 41683 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.off-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.off-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..66ed0908c5516b928b7b241d192ff660f48a43d3 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.off-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f2075a62553cda57fa971185ae174a59af2743414a6d3a023f43b47783304f4 +size 20440 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.off-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.off-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..cc31ab15213e18b3b0af3d0a608baa84324207f9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.off-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b716e5197b493d4aea2f2c0f3cb3d593c417633e56fd856ef2c28ec5d7f2ff50 +size 35678 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.on_data-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.on_data-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bfb06ad368dd34ef68f2597e29a7d1a039476a29 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.on_data-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bffee99ffd8ade5036e063b0fbbc0470b4be370800abfb810371346930f7171e +size 20668 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.on_data-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.on_data-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..65e022576ad68ab070bd511c293d793af27a33a3 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.on_data-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6998f7b1c8cb49ab4ca21d924dfda039d252e7323d9af268a8587e26395f82d3 +size 36463 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.on_right-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.on_right-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..4c76165ef350ed19584ab174f4756636853f3860 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.on_right-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b518a44e8c333d2c970574df9606f92e76b1606458f6f7ab3e6951ace82c629 +size 21022 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.on_right-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.on_right-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..c8f0e9b1c841d905d13f2368cfbd7fa0b9f6b246 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.False-legend.on_right-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d046cfa3f46d1dd88db7ba2d180df4f57a87d994a9f73f3f04431311ab492fde +size 38205 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.off-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.off-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..66ed0908c5516b928b7b241d192ff660f48a43d3 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.off-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f2075a62553cda57fa971185ae174a59af2743414a6d3a023f43b47783304f4 +size 20440 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.off-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.off-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..cc31ab15213e18b3b0af3d0a608baa84324207f9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.off-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b716e5197b493d4aea2f2c0f3cb3d593c417633e56fd856ef2c28ec5d7f2ff50 +size 35678 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.on_data-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.on_data-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..90c12dbbf5f710265e169dfc735d9d6bdc2b512a --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.on_data-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1457a3339b6f25c35c1b6a1def640dd125a4064fd8d4c8930ef8832b8ec59260 +size 21004 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.on_data-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.on_data-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..9be310509de449e21b4881e987d3c25071c38919 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.on_data-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bffd400321483c839feb15b85cc09a8f1f9c6f072259d828d07afa5bd84a3a4c +size 36583 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.on_right-groups.3]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.on_right-groups.3]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..5ff33cd7f7823d0ac969e2fed6e17a080b118385 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.on_right-groups.3]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e9a5383e57dc1f183c0a05ea5dc7ecfa232aa5b0df48f30f22fb15428f7b664 +size 21241 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.on_right-groups.all]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.on_right-groups.all]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..4593827e9cf321ecc1e6c35a131752f25c91dd09 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_categorical[spatial-na_color.default-na_in_legend.True-legend.on_right-groups.all]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eabb4cc160e440272c9d6e0bbb131cb5ae60bb82b8898c28b248e81262c22083 +size 39040 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.default]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.default]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..a8c4ba020e776a1a99dc4d5035141a82bafdd6e8 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.default]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65a8ac760d9f939686a0b0075cda7ae37816fa213070d35634107c4efd422f04 +size 25472 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.norm]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.norm]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..7fe42b3e7e59632d9fbca5ef0e4a301c5869346b --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.norm]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b1c6e102b2004bb6794348e3431a2ef5e2ef504c844703a225392b6509b502a +size 23781 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.numbers]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.numbers]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..197c7a7d0ca963d0be41f186f88ec0887185cebb --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.numbers]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9dd96fe1a7c376b68634adc6bdc2821026eab573ba4472c8286009f6fba0d251 +size 23781 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.percentile]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.percentile]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..12666840c52cf64dfb44434c0cde245a1f2e9d62 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.percentile]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7efac42225737ebb4c0a2a6b5475eee585d2a9dcc8149b747a37ef6c6467b0f +size 25776 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.vcenter]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.vcenter]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..207bfbe45f933ea5283749900d653259d7b903b5 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.off-vbounds.vcenter]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:730675a843fbe4042cb72097b9ef64b84870b851639970426bfadaba541d1fea +size 24608 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.default]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.default]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..a8c4ba020e776a1a99dc4d5035141a82bafdd6e8 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.default]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65a8ac760d9f939686a0b0075cda7ae37816fa213070d35634107c4efd422f04 +size 25472 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.norm]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.norm]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..7fe42b3e7e59632d9fbca5ef0e4a301c5869346b --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.norm]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b1c6e102b2004bb6794348e3431a2ef5e2ef504c844703a225392b6509b502a +size 23781 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.numbers]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.numbers]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..197c7a7d0ca963d0be41f186f88ec0887185cebb --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.numbers]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9dd96fe1a7c376b68634adc6bdc2821026eab573ba4472c8286009f6fba0d251 +size 23781 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.percentile]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.percentile]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..12666840c52cf64dfb44434c0cde245a1f2e9d62 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.percentile]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7efac42225737ebb4c0a2a6b5475eee585d2a9dcc8149b747a37ef6c6467b0f +size 25776 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.vcenter]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.vcenter]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..207bfbe45f933ea5283749900d653259d7b903b5 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_data-vbounds.vcenter]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:730675a843fbe4042cb72097b9ef64b84870b851639970426bfadaba541d1fea +size 24608 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.default]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.default]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..a8c4ba020e776a1a99dc4d5035141a82bafdd6e8 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.default]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65a8ac760d9f939686a0b0075cda7ae37816fa213070d35634107c4efd422f04 +size 25472 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.norm]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.norm]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..7fe42b3e7e59632d9fbca5ef0e4a301c5869346b --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.norm]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b1c6e102b2004bb6794348e3431a2ef5e2ef504c844703a225392b6509b502a +size 23781 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.numbers]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.numbers]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..197c7a7d0ca963d0be41f186f88ec0887185cebb --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.numbers]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9dd96fe1a7c376b68634adc6bdc2821026eab573ba4472c8286009f6fba0d251 +size 23781 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.percentile]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.percentile]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..12666840c52cf64dfb44434c0cde245a1f2e9d62 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.percentile]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7efac42225737ebb4c0a2a6b5475eee585d2a9dcc8149b747a37ef6c6467b0f +size 25776 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.vcenter]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.vcenter]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..207bfbe45f933ea5283749900d653259d7b903b5 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.black_tup-legend.on_right-vbounds.vcenter]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:730675a843fbe4042cb72097b9ef64b84870b851639970426bfadaba541d1fea +size 24608 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.default]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.default]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..f499eff3dd64d1837f7725b83e27199d2eeedcc9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.default]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebe2cafc5f1dfea8953d41f685866c80f522967df34f5e9d99a2cdcae8e6f05e +size 24760 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.norm]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.norm]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..fa171ba1aa8507b7b46d7641b3cc5bc926e92f0a --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.norm]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:838518a4bc9637ae9663b1d2eb77eb4e3757020fa8cc17ae3ee432e4cd6c7ba5 +size 23194 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.numbers]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.numbers]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..839b8e531f2d1f4e6f0d3b93405e643d46a952d4 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.numbers]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e22a1e8508b72f2a8e19cda31ad04b2893637cf0d791bf0d2a1a19139f4f34e4 +size 23196 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.percentile]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.percentile]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..12a6c989387fa9fcb869d14b3ec66a3e13099dc9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.percentile]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b2e227f8a99305c656fb8a289bb7a9e116444b4acdaa7bf0b0d43ca1e011746 +size 25115 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.vcenter]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.vcenter]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..1e1323f5f34e59c330a0f427912753f91b911ecc --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.off-vbounds.vcenter]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2fa3f11ba84a7603fdcd16886031fcea64e138ac38a8f201b39119052d5cfbe +size 23970 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.default]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.default]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..f499eff3dd64d1837f7725b83e27199d2eeedcc9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.default]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebe2cafc5f1dfea8953d41f685866c80f522967df34f5e9d99a2cdcae8e6f05e +size 24760 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.norm]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.norm]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..fa171ba1aa8507b7b46d7641b3cc5bc926e92f0a --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.norm]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:838518a4bc9637ae9663b1d2eb77eb4e3757020fa8cc17ae3ee432e4cd6c7ba5 +size 23194 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.numbers]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.numbers]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..839b8e531f2d1f4e6f0d3b93405e643d46a952d4 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.numbers]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e22a1e8508b72f2a8e19cda31ad04b2893637cf0d791bf0d2a1a19139f4f34e4 +size 23196 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.percentile]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.percentile]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..12a6c989387fa9fcb869d14b3ec66a3e13099dc9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.percentile]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b2e227f8a99305c656fb8a289bb7a9e116444b4acdaa7bf0b0d43ca1e011746 +size 25115 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.vcenter]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.vcenter]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..1e1323f5f34e59c330a0f427912753f91b911ecc --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_data-vbounds.vcenter]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2fa3f11ba84a7603fdcd16886031fcea64e138ac38a8f201b39119052d5cfbe +size 23970 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.default]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.default]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..f499eff3dd64d1837f7725b83e27199d2eeedcc9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.default]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebe2cafc5f1dfea8953d41f685866c80f522967df34f5e9d99a2cdcae8e6f05e +size 24760 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.norm]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.norm]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..fa171ba1aa8507b7b46d7641b3cc5bc926e92f0a --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.norm]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:838518a4bc9637ae9663b1d2eb77eb4e3757020fa8cc17ae3ee432e4cd6c7ba5 +size 23194 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.numbers]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.numbers]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..839b8e531f2d1f4e6f0d3b93405e643d46a952d4 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.numbers]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e22a1e8508b72f2a8e19cda31ad04b2893637cf0d791bf0d2a1a19139f4f34e4 +size 23196 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.percentile]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.percentile]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..12a6c989387fa9fcb869d14b3ec66a3e13099dc9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.percentile]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b2e227f8a99305c656fb8a289bb7a9e116444b4acdaa7bf0b0d43ca1e011746 +size 25115 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.vcenter]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.vcenter]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..1e1323f5f34e59c330a0f427912753f91b911ecc --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[pca-na_color.default-legend.on_right-vbounds.vcenter]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2fa3f11ba84a7603fdcd16886031fcea64e138ac38a8f201b39119052d5cfbe +size 23970 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.default]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.default]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..b0dc952b40da4ce4c9ea2811fd31af1e4c41c553 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.default]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e277e5ab03b962d6d79382789e6c05979d9d971d5292dc24f58ae055a9d29f3 +size 40161 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.norm]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.norm]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..8bf9d79c92c229c094201dc6c1029d66a477acff --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.norm]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ee04999018aa81fd6da7d11ba2f4870082f6342dd4c7ba2a196b502f80921af +size 39676 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.numbers]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.numbers]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..038a0fa38a20e66ea684af45a5909d02fc4ee2d1 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.numbers]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1708e0445821af614dd38bb2ba1955470ae7e76b34595235a96289787f59ce7 +size 39676 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.percentile]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.percentile]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bb2bb8a40a6c58fc175a32bf442e57e638d25673 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.percentile]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:389ac5c36d0af5208d3a8f27072aa2d91115178467ed57dd6f9339f1572fc0b5 +size 40601 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.vcenter]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.vcenter]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..fc038eaf22f4f63c4306633a66abedea6a7023db --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.off-vbounds.vcenter]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c4bbe44d15a8b079701798295b706933f51b98ebff685416acdab0cecd62e82 +size 40153 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.default]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.default]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..b0dc952b40da4ce4c9ea2811fd31af1e4c41c553 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.default]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e277e5ab03b962d6d79382789e6c05979d9d971d5292dc24f58ae055a9d29f3 +size 40161 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.norm]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.norm]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..8bf9d79c92c229c094201dc6c1029d66a477acff --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.norm]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ee04999018aa81fd6da7d11ba2f4870082f6342dd4c7ba2a196b502f80921af +size 39676 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.numbers]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.numbers]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..038a0fa38a20e66ea684af45a5909d02fc4ee2d1 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.numbers]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1708e0445821af614dd38bb2ba1955470ae7e76b34595235a96289787f59ce7 +size 39676 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.percentile]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.percentile]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bb2bb8a40a6c58fc175a32bf442e57e638d25673 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.percentile]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:389ac5c36d0af5208d3a8f27072aa2d91115178467ed57dd6f9339f1572fc0b5 +size 40601 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.vcenter]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.vcenter]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..fc038eaf22f4f63c4306633a66abedea6a7023db --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_data-vbounds.vcenter]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c4bbe44d15a8b079701798295b706933f51b98ebff685416acdab0cecd62e82 +size 40153 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.default]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.default]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..b0dc952b40da4ce4c9ea2811fd31af1e4c41c553 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.default]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e277e5ab03b962d6d79382789e6c05979d9d971d5292dc24f58ae055a9d29f3 +size 40161 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.norm]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.norm]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..8bf9d79c92c229c094201dc6c1029d66a477acff --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.norm]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ee04999018aa81fd6da7d11ba2f4870082f6342dd4c7ba2a196b502f80921af +size 39676 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.numbers]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.numbers]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..038a0fa38a20e66ea684af45a5909d02fc4ee2d1 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.numbers]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1708e0445821af614dd38bb2ba1955470ae7e76b34595235a96289787f59ce7 +size 39676 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.percentile]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.percentile]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bb2bb8a40a6c58fc175a32bf442e57e638d25673 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.percentile]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:389ac5c36d0af5208d3a8f27072aa2d91115178467ed57dd6f9339f1572fc0b5 +size 40601 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.vcenter]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.vcenter]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..fc038eaf22f4f63c4306633a66abedea6a7023db --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.black_tup-legend.on_right-vbounds.vcenter]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c4bbe44d15a8b079701798295b706933f51b98ebff685416acdab0cecd62e82 +size 40153 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.default]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.default]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..2234b78d88e7379003106fb288b95a97004cd520 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.default]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ec97fa0d8d3ec38677cbdd24f513a1e6d26d59459fd342d8a7aed5f77b4b781 +size 36158 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.norm]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.norm]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..0489e1dfdd5fffcc37956e35061631b4b630986f --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.norm]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04fca94fc4ad62ffeb04a0f4ffe422919fd103784280b9827477c1ce9d39f6b8 +size 35595 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.numbers]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.numbers]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..44ef49f913b0be751c1fe3a4ca0997b017477af9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.numbers]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:321cd6276f4299698fafc97fa4c0ba72778f9af3b9ec78dcecca091e9e9181d9 +size 35595 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.percentile]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.percentile]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bac32f4aeb02457e8c076e8de8993d05e78a5481 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.percentile]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:584884afa0d64ad3c4a5f4e119aed4ce6d4d89526d926730f41de1a77598b466 +size 36544 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.vcenter]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.vcenter]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..53abdbfdd1f56153aa5591303cff9162fe34fc34 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.off-vbounds.vcenter]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:939a60963938f933ded69ddbdcc837cbd9b12ffbe241071b156a36e17a3f7fe2 +size 36119 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.default]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.default]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..2234b78d88e7379003106fb288b95a97004cd520 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.default]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ec97fa0d8d3ec38677cbdd24f513a1e6d26d59459fd342d8a7aed5f77b4b781 +size 36158 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.norm]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.norm]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..0489e1dfdd5fffcc37956e35061631b4b630986f --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.norm]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04fca94fc4ad62ffeb04a0f4ffe422919fd103784280b9827477c1ce9d39f6b8 +size 35595 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.numbers]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.numbers]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..44ef49f913b0be751c1fe3a4ca0997b017477af9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.numbers]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:321cd6276f4299698fafc97fa4c0ba72778f9af3b9ec78dcecca091e9e9181d9 +size 35595 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.percentile]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.percentile]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bac32f4aeb02457e8c076e8de8993d05e78a5481 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.percentile]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:584884afa0d64ad3c4a5f4e119aed4ce6d4d89526d926730f41de1a77598b466 +size 36544 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.vcenter]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.vcenter]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..53abdbfdd1f56153aa5591303cff9162fe34fc34 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_data-vbounds.vcenter]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:939a60963938f933ded69ddbdcc837cbd9b12ffbe241071b156a36e17a3f7fe2 +size 36119 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.default]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.default]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..2234b78d88e7379003106fb288b95a97004cd520 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.default]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ec97fa0d8d3ec38677cbdd24f513a1e6d26d59459fd342d8a7aed5f77b4b781 +size 36158 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.norm]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.norm]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..0489e1dfdd5fffcc37956e35061631b4b630986f --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.norm]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04fca94fc4ad62ffeb04a0f4ffe422919fd103784280b9827477c1ce9d39f6b8 +size 35595 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.numbers]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.numbers]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..44ef49f913b0be751c1fe3a4ca0997b017477af9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.numbers]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:321cd6276f4299698fafc97fa4c0ba72778f9af3b9ec78dcecca091e9e9181d9 +size 35595 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.percentile]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.percentile]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bac32f4aeb02457e8c076e8de8993d05e78a5481 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.percentile]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:584884afa0d64ad3c4a5f4e119aed4ce6d4d89526d926730f41de1a77598b466 +size 36544 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.vcenter]/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.vcenter]/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..53abdbfdd1f56153aa5591303cff9162fe34fc34 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding-missing-values/test_missing_values_continuous[spatial-na_color.default-legend.on_right-vbounds.vcenter]/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:939a60963938f933ded69ddbdcc837cbd9b12ffbe241071b156a36e17a3f7fe2 +size 36119 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding_groups_size/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding_groups_size/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..f549f42d2715cf3a39b6b5d5e304d63e025fcda6 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding_groups_size/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2138ee89c724653a19bbf1e103da501e9df43b4db64edd014018ca1439b8a95a +size 12076 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/embedding_outline_vmin_vmax/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/embedding_outline_vmin_vmax/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..a7e86bd38fcb980a65b840ccee54eec7273e6270 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/embedding_outline_vmin_vmax/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a77dc94f2e3447a1909f4f9b0c117bab09c1265a1b1f83291bbada8807561902 +size 106635 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/heatmap/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..6d53c8d93029dd9f16a56aa752bfe18d354cdadf --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96a2688cf882b9e212eba1dd3fd29893c16dd5115a58600cbf45155d841e1340 +size 12528 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/heatmap2/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap2/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..6f407192573fdf5135727782533062abc35e6124 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap2/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6164892e6e6b64c6037eb0c76fae76b60c414e73302ad9c08d2d9c1d3b2c0a0 +size 11787 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_gene_symbols/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_gene_symbols/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..839052fdfe5a2aa6d5cf14b956176f6778a0ac19 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_gene_symbols/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29cf12fcb85acac3f248e2f202ae0bd39be82d51aa51e4b987ed8509e96f22c7 +size 13105 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_small_alignment/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_small_alignment/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bb61e2a8ff6a7d5305873b40c36dbbfaaaaabcf7 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_small_alignment/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a16a1d920dc2624d25207dd7c4fe05a08d344a7bac408c16fb2623999f25ec6 +size 3201 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_small_swap_alignment/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_small_swap_alignment/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..4282a7088bbc3b4e317cfd0b11d217bbed734208 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_small_swap_alignment/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b660a8ad72b9973578225f4a752235011078f5ae05bc4307ac9eeaed31fa87c +size 3119 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_std_scale_obs/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_std_scale_obs/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bc194f554ae0605c98c3059dfd26daa0e9997356 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_std_scale_obs/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03b1c8f5751b05ca195e7c672504aeffe329313939c13975c30fbb294bdd86ed +size 12125 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_std_scale_var/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_std_scale_var/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..eba1b6a76098be7bc4e77e1d1e51ec971e728a86 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_std_scale_var/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:384401dadd068f5d12da2639693c165c260f18b74af63e44c237cce17a51bb05 +size 13032 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_swap_axes/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_swap_axes/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..430ab7c9f266c2134dfbf3dfa98e6d78c54e12b7 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_swap_axes/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:373f04b7ae3a1fbd7bb75ebc9e2e98ec62e23c29c298caedb1c8adbcc300fc56 +size 12245 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_var_as_dict/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_var_as_dict/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..b7a19c793f17672abb7e1f92fbf235e0c089db92 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/heatmap_var_as_dict/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec3ee59d907759f4149aa7f45479af8ddc9d9824f6aa896b7a5bf3211a905c96 +size 6008 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..062c7df60de44a8af23c84d0a5913a97562480db --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57c6f1720be257cd553d39a6afd8b69579a8972eb6ccd1ae5cd4eac5128d9252 +size 5623 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot2/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot2/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..aa764e8a44497c6c4157f4ddf5b6968453c6cf0e --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot2/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfd479c12b03220723f91be66d9e9c55c0c30a27994bbf2372ebbfbd7e99e40a +size 7482 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_gene_symbols/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_gene_symbols/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..775e3a007a2fb3e3d0c93ebb5ccaffd8c76205c7 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_gene_symbols/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7bcd02dbdb3894a02ef7827fd185c9e946bb451e6bfae310d88c6933de5fdcfc +size 5091 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_std_scale_group/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_std_scale_group/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..99e8616d7c60809699f57190fbe403c830e5f311 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_std_scale_group/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fe809f319550524b0eb2f06f9353bd32d9a89dd11ee79909867d672aa8e6dfc +size 6888 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_std_scale_var_dict/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_std_scale_var_dict/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..ff5b43016544a26b2e0fefdc5589c43ee1afee6f --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_std_scale_var_dict/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2548d3cccb47b236cf496a29bbff6b0f54f834818c803668e9b175bbd3fb412b +size 7423 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_swap_axes/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_swap_axes/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..744a14fee07d5a8977e77ff9fb8e6a6c359c0332 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_swap_axes/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae2951399baaeddba2d9e451c1871cb75516b70a22734b8541aa051e90b99632 +size 5854 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_with_totals/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_with_totals/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..ef16bf8c5705188a80d24053eee514b1b4e0cb5b --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/matrixplot_with_totals/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0e2b4df940a152aad01d7e361b09798a8b022ffe34911854c4900ac2480d8a7 +size 10062 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/multipanel/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/multipanel/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..5e531bd621ad0b9ffb9caa6c8a365b61ddb79ab8 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/multipanel/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:653caa0019c50df4614bf81908f4f313c771a9bd0e0d47bc7d8374a216e040a2 +size 48784 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/multipanel_vcenter/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/multipanel_vcenter/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bd0c93d48b8966cedf37671454f6fad4bf69ccc0 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/multipanel_vcenter/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f030bb956c75dfdb28ad9b08a0753a559b7167a5843e56986def2110b1a69361 +size 52657 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/multiple_plots/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/multiple_plots/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..4c231ae49fb8749d59cebe1c90c5e55209921074 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/multiple_plots/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5d020f74a2b4151f836a7112b94e950671420b2962716d2b2397438cd015c81 +size 35895 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/no_colorbar/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/no_colorbar/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..a6a549a8f8a4274d0f1881f15a38204d2d50482f --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/no_colorbar/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04904293d1f3d3cc1905a7b652bd8e6b0a201c3c46ed6913120729094f79d735 +size 22956 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/paga/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/paga/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..0d058fcecd8a7dff8fd31f9ca5f8a88a068b8baf --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/paga/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4661b6f4b65d83078350b5e47d644266e9c08d5beb3cf79100b57d5f901f5388 +size 12083 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/paga_compare_pbmc3k/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/paga_compare_pbmc3k/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..27d6ba3b32267aedb72dee614c9751a16269b6ed --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/paga_compare_pbmc3k/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ba48beac03814633f0cb6fa5c744dc3f5d20bf0d5e9d77b5db8dd916375bb0a +size 36004 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/paga_compare_pca/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/paga_compare_pca/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..c5c7658e81849b5e245ea051865ed8d39b7adb41 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/paga_compare_pca/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec113981cb2216ad722b53f668f151e3ad6c4a023e5bf56faf8bbb7f35b0f8bf +size 32520 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/paga_continuous/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/paga_continuous/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..bb6dc1b255a5bf260a2e5454104a0376da69c9e7 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/paga_continuous/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61c5cc7e6dd88acb30c8380a1f42a6265115cb2ebb8bf9fef5c605bfb1217de3 +size 14428 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/paga_continuous_multiple/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/paga_continuous_multiple/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..3ac3bf7869b30446303a4229af56a849c3992810 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/paga_continuous_multiple/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d88ccb0e9c11c0f891cab80fa69ce07c834bdfb07fdeeb77cf3178257f07b1d2 +size 23487 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/paga_path/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/paga_path/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..06abbf18de0c7be2406f2bdcafaa89b6cfccf307 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/paga_path/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5e938912458e6be42b15379d73c1fe51facdf3d1521f52bbf2bf33de4fc217d +size 5421 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/paga_pie/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/paga_pie/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..671d105bc266dc59d02c6241f5c46e0e1f22a3d2 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/paga_pie/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd8a9d923f3d9ad9086d36d670ecbb960e9c8eb13d5b78243182a895b6bb5200 +size 19565 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/pca/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/pca/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..151bc27b63f23a77a3770ab503f2d6a07eb06792 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/pca/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae548889bab12cd7d0d782039402fbc910c9a3ae97ad4d81fd723c2dae1a0157 +size 23147 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/pca_loadings/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/pca_loadings/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..295680e7b46a3084641476534539de3ea02a447f --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/pca_loadings/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b39450097b9865fce9e8aec1d4be5ce49359d68d0a28266230d83a03374c044 +size 18431 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/pca_loadings_10_points/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/pca_loadings_10_points/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..b6769f0e5aa1233bf58e60b886818e99b26b55a5 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/pca_loadings_10_points/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db8824ce1d43df13b8b2171cb0aa590beaf7c7fafd9f249e104422cb34721bdc +size 10955 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/pca_mask/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/pca_mask/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..fbd9ba480f017ccb17608cb452eb44e9ec8f23d9 --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/pca_mask/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ab2a32c24b51a4e0ca9c64cd558dd65422a776924e8e68c35c1591797e51c3e +size 55095 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/pca_one_marker_multiple_colors/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/pca_one_marker_multiple_colors/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..6f6fcd6ca8764e8b6bc6ef299cad902f8933f48f --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/pca_one_marker_multiple_colors/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cfa24c9aa6cd0328ee715631e927c677e441aeabd7e24ffc101a07cbe55a70d +size 48173 diff --git a/testbed/scverse__scanpy/scanpy/tests/_images/ranked_genes_dotplot_logfoldchange_vcenter/expected.png b/testbed/scverse__scanpy/scanpy/tests/_images/ranked_genes_dotplot_logfoldchange_vcenter/expected.png new file mode 100644 index 0000000000000000000000000000000000000000..b36ec4d8de53f69b333308c2cc2391ab9a99ac1f --- /dev/null +++ b/testbed/scverse__scanpy/scanpy/tests/_images/ranked_genes_dotplot_logfoldchange_vcenter/expected.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf76a7b97609e05b533a92117dcafe17b36b64528d4e7f153afe600e25ec4322 +size 57075