| { |
| "repo": "Project-MONAI/MONAI", |
| "pull_number": 2801, |
| "url": "https://github.com/Project-MONAI/MONAI/pull/2801", |
| "instance_id": "Project-MONAI__MONAI-2801", |
| "issue_numbers": [], |
| "base_commit": "7f05d7873cfe9cb8aaeee4341e8585ca96ada1dd", |
| "patch": "diff --git a/monai/transforms/__init__.py b/monai/transforms/__init__.py\nindex 5f9ed84bcd..f409c0bd8c 100644\n--- a/monai/transforms/__init__.py\n+++ b/monai/transforms/__init__.py\n@@ -515,6 +515,7 @@\n map_binary_to_indices,\n map_classes_to_indices,\n map_spatial_axes,\n+ print_transform_backends,\n rand_choice,\n rescale_array,\n rescale_array_int_max,\ndiff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py\nindex 113fbadbb1..5d26ee0e63 100644\n--- a/monai/transforms/intensity/array.py\n+++ b/monai/transforms/intensity/array.py\n@@ -37,6 +37,7 @@\n ensure_tuple_size,\n fall_back_tuple,\n )\n+from monai.utils.enums import TransformBackends\n \n __all__ = [\n \"RandGaussianNoise\",\n@@ -81,7 +82,7 @@ class RandGaussianNoise(RandomizableTransform):\n std: Standard deviation (spread) of distribution.\n \"\"\"\n \n- backend = [\"torch\", \"numpy\"]\n+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]\n \n def __init__(self, prob: float = 0.1, mean: Union[Sequence[float], float] = 0.0, std: float = 0.1) -> None:\n RandomizableTransform.__init__(self, prob)\n@@ -852,7 +853,7 @@ class SavitzkyGolaySmooth(Transform):\n or ``'circular'``. Default: ``'zeros'``. See ``torch.nn.Conv1d()`` for more information.\n \"\"\"\n \n- backend = [\"numpy\"]\n+ backend = [TransformBackends.NUMPY]\n \n def __init__(self, window_length: int, order: int, axis: int = 1, mode: str = \"zeros\"):\n \ndiff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py\nindex d3780641ae..7ca21432c5 100644\n--- a/monai/transforms/intensity/dictionary.py\n+++ b/monai/transforms/intensity/dictionary.py\n@@ -45,6 +45,7 @@\n from monai.transforms.transform import MapTransform, RandomizableTransform\n from monai.transforms.utils import is_positive\n from monai.utils import convert_to_dst_type, ensure_tuple, ensure_tuple_rep, ensure_tuple_size, fall_back_tuple\n+from monai.utils.enums import TransformBackends\n \n __all__ = [\n \"RandGaussianNoised\",\n@@ -144,7 +145,7 @@ class RandGaussianNoised(RandomizableTransform, MapTransform):\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n \n- backend = [\"torch\", \"numpy\"]\n+ backend = [TransformBackends.TORCH, TransformBackends.NUMPY]\n \n def __init__(\n self,\ndiff --git a/monai/transforms/transform.py b/monai/transforms/transform.py\nindex aff468b2a5..ef49bc706c 100644\n--- a/monai/transforms/transform.py\n+++ b/monai/transforms/transform.py\n@@ -22,6 +22,7 @@\n from monai import transforms\n from monai.config import KeysCollection\n from monai.utils import MAX_SEED, ensure_tuple\n+from monai.utils.enums import TransformBackends\n \n __all__ = [\n \"ThreadUnsafe\",\n@@ -212,7 +213,7 @@ class Transform(ABC):\n :py:class:`monai.transforms.Compose`\n \"\"\"\n \n- backend: List[str] = []\n+ backend: List[TransformBackends] = []\n \"\"\"Transforms should add data types to this list if they are capable of performing a transform without\n modifying the input type. For example, [\\\"torch.Tensor\\\", \\\"np.ndarray\\\"] means that no copies of the data\n are required if the input is either \\\"torch.Tensor\\\" or \\\"np.ndarray\\\".\"\"\"\ndiff --git a/monai/transforms/utils.py b/monai/transforms/utils.py\nindex 5886c35974..e81cb7ca17 100644\n--- a/monai/transforms/utils.py\n+++ b/monai/transforms/utils.py\n@@ -13,16 +13,18 @@\n import random\n import warnings\n from contextlib import contextmanager\n+from inspect import getmembers, isclass\n from typing import Any, Callable, Hashable, Iterable, List, Optional, Sequence, Tuple, Union\n \n import numpy as np\n import torch\n \n+import monai\n import monai.transforms.transform\n from monai.config import DtypeLike, IndexSelection\n from monai.networks.layers import GaussianFilter\n from monai.transforms.compose import Compose, OneOf\n-from monai.transforms.transform import MapTransform\n+from monai.transforms.transform import MapTransform, Transform\n from monai.utils import (\n GridSampleMode,\n InterpolateMode,\n@@ -77,6 +79,7 @@\n \"zero_margins\",\n \"equalize_hist\",\n \"get_number_image_type_conversions\",\n+ \"print_transform_backends\",\n ]\n \n \n@@ -1149,3 +1152,59 @@ def _get_data(obj, key):\n if not isinstance(curr_data, prev_type) or curr_device != prev_device:\n num_conversions += 1\n return num_conversions\n+\n+\n+def print_transform_backends():\n+ \"\"\"Prints a list of backends of all MONAI transforms.\"\"\"\n+\n+ class Colours:\n+ red = \"91\"\n+ green = \"92\"\n+ yellow = \"93\"\n+\n+ def print_colour(t, colour):\n+ print(f\"\\033[{colour}m{t}\\033[00m\")\n+\n+ tr_total = 0\n+ tr_t_or_np = 0\n+ tr_t = 0\n+ tr_np = 0\n+ tr_uncategorised = 0\n+ unique_transforms = []\n+ for n, obj in getmembers(monai.transforms):\n+ # skip aliases\n+ if obj in unique_transforms:\n+ continue\n+ unique_transforms.append(obj)\n+\n+ if isclass(obj) and issubclass(obj, Transform):\n+ if n in [\n+ \"Transform\",\n+ \"InvertibleTransform\",\n+ \"Lambda\",\n+ \"LambdaD\",\n+ \"Compose\",\n+ \"RandomizableTransform\",\n+ \"OneOf\",\n+ \"BatchInverseTransform\",\n+ \"InverteD\",\n+ ]:\n+ continue\n+ tr_total += 1\n+ if obj.backend == [\"torch\", \"numpy\"]:\n+ tr_t_or_np += 1\n+ print_colour(f\"TorchOrNumpy: {n}\", Colours.green)\n+ elif obj.backend == [\"torch\"]:\n+ tr_t += 1\n+ print_colour(f\"Torch: {n}\", Colours.green)\n+ elif obj.backend == [\"numpy\"]:\n+ tr_np += 1\n+ print_colour(f\"Numpy: {n}\", Colours.yellow)\n+ else:\n+ tr_uncategorised += 1\n+ print_colour(f\"Uncategorised: {n}\", Colours.red)\n+ print(\"Total number of transforms:\", tr_total)\n+ print_colour(f\"Number transforms allowing both torch and numpy: {tr_t_or_np}\", Colours.green)\n+ print_colour(f\"Number of TorchTransform: {tr_t}\", Colours.green)\n+ print_colour(f\"Number of NumpyTransform: {tr_np}\", Colours.yellow)\n+ print_colour(f\"Number of uncategorised: {tr_uncategorised}\", Colours.red)\ndiff --git a/monai/utils/__init__.py b/monai/utils/__init__.py\nindex 16231ba17e..dd300fce34 100644\n--- a/monai/utils/__init__.py\n+++ b/monai/utils/__init__.py\n@@ -30,6 +30,7 @@\n NumpyPadMode,\n PytorchPadMode,\n SkipMode,\n+ TransformBackends,\n UpsampleMode,\n Weight,\n )\ndiff --git a/monai/utils/enums.py b/monai/utils/enums.py\nindex 014363e14f..847df9e2d3 100644\n--- a/monai/utils/enums.py\n+++ b/monai/utils/enums.py\n@@ -29,6 +29,7 @@\n \"InverseKeys\",\n \"CommonKeys\",\n \"ForwardMode\",\n+ \"TransformBackends\",\n ]\n \n \n@@ -233,3 +234,12 @@ class CommonKeys:\n LABEL = \"label\"\n PRED = \"pred\"\n LOSS = \"loss\"\n+\n+\n+class TransformBackends(Enum):\n+ \"\"\"\n+ Transform backends.\n+ \"\"\"\n+\n+ TORCH = \"torch\"\n+ NUMPY = \"numpy\"\n", |
| "test_patch": "diff --git a/tests/test_print_transform_backends.py b/tests/test_print_transform_backends.py\nnew file mode 100644\nindex 0000000000..09828f0a27\n--- /dev/null\n+++ b/tests/test_print_transform_backends.py\n@@ -0,0 +1,23 @@\n+# Copyright 2020 - 2021 MONAI Consortium\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+# http://www.apache.org/licenses/LICENSE-2.0\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import unittest\n+\n+from monai.transforms.utils import print_transform_backends\n+\n+\n+class TestPrintTransformBackends(unittest.TestCase):\n+ def test_get_number_of_conversions(self):\n+ print_transform_backends()\n+\n+\n+if __name__ == \"__main__\":\n+ unittest.main()\n", |
| "problem_info": { |
| "first_commit_time": 1629290958.0, |
| "pr_title": "print backends of all MONAI transforms", |
| "pr_body": "### Description\r\nPrint backends of all MONAI transforms.\r\n\r\n### Status\r\n**Ready**\r\n\r\n### Types of changes\r\n<!--- Put an `x` in all the boxes that apply, and remove the not applicable items -->\r\n- [x] Non-breaking change (fix or new feature that would not break existing functionality).\r\n- [x] Quick tests passed locally by running `./runtests.sh --quick --unittests`.\r\n- [x] In-line docstrings updated.\r\n- [x] Documentation updated, tested `make html` command in the `docs/` folder.\r\n", |
| "pr_timeline": [ |
| { |
| "time": 1629300741.0, |
| "comment": "/build" |
| }, |
| { |
| "time": 1629382100.0, |
| "comment": "/black" |
| } |
| ], |
| "issues": {} |
| }, |
| "created_at": "2021-08-18T12:50:09Z", |
| "readmes": { |
| "README.md": "<p align=\"center\">\n <a href=\"https://gmago-leway.github.io/fea-bench.github.io/\">\n <img src=\"assets/FEA-Bench-full.png\" style=\"height: 10em\" alt=\"fea-bench\" />\n </a>\n</p>\n\n<p align=\"center\">\n <em>A benchmark that aims to evaluate the capability of implementing new features in the code repositories.</em>\n</p>\n\n<p align=\"center\">\n <a href=\"https://arxiv.org/abs/2503.06680\">\n <img alt=\"paper\" src=\"https://img.shields.io/badge/ArXiv-%23B31B1B?style=for-the-badge&logo=arXiv\">\n </a>\n <a href=\"./LICENSE\">\n <img alt=\"License\" src=\"https://img.shields.io/github/license/SWE-bench/SWE-bench?style=for-the-badge\">\n </a>\n <a href=\"https://gmago-leway.github.io/fea-bench.github.io/\">\n <img alt=\"Leaderboard\" src=\"https://img.shields.io/badge/leaderboard-%F0%9F%8F%86-1?style=for-the-badge\">\n </a>\n <a href=\"https://huggingface.co/datasets/microsoft/FEA-Bench\">\n <img alt=\"dataset\" src=\"https://img.shields.io/badge/Dataset-HF-FFD21E.svg?style=for-the-badge&logo=huggingface&logoColor=FFD21E\">\n </a>\n</p>\n\n---\n\n# Evaluation\n\nThis repository is the official implementation of the paper \"FEA-Bench: A Benchmark for Evaluating Repository-Level Code Generation for Feature Implementation.\" It can be used for baseline evaluation using the prompts mentioned in the paper.\n\nThe repository includes several functionalities, primarily for obtaining the full dataset, running model inference aligned with the paper, and evaluating the results. The complete pipeline is as follows:\n\n## 1. Environment Setup\n\nYou can create a new Python environment and install all dependencies using:\n```bash\npip install -e .\n```\nIf you plan to use VLLM inference, ensure that the installed libraries match your hardware.\n\n## 2. Building the Full Evaluation Dataset\n\nDue to licensing and company policies, we cannot release the full dataset. Our published version ([https://huggingface.co/datasets/microsoft/FEA-Bench](https://huggingface.co/datasets/microsoft/FEA-Bench)) only includes essential attributes, and the remaining content needs to be scraped from GitHub.\n\nTo construct the full FEA-Bench dataset and save it in the `feabench-data` folder, run the following command. Note that you need to replace `GITHUB_TOKEN` with your own GitHub token, which should have read-only access to public repositories:\n```bash\nexport GITHUB_TOKEN=\"xxx\"\n\npython -m feabench.get_dataset \\\n --dataset microsoft/FEA-Bench \\\n --testbed feabench-data/testbed \\\n --lite_ids instances_lite.json \\\n --medium_file feabench-data/FEA-Bench-v1.0-medium.jsonl \\\n --standard_dataset_path feabench-data/FEA-Bench-v1.0-Standard \\\n --oracle_dataset_path feabench-data/FEA-Bench-v1.0-Oracle \\\n --lite_standard_dataset_path feabench-data/FEA-Bench-v1.0-Lite-Standard \\\n --lite_oracle_dataset_path feabench-data/FEA-Bench-v1.0-Lite-Oracle\n```\n\n## 3. Running Model Inference\n\nOur repository only provides inference methods consistent with those in the paper. Agentless and other agent-based inferences can use the `FEA-Bench-v1.0-Lite-Standard` dataset constructed in the previous step, which is aligned with the format of SWE-Bench.\n\n### Example of VLLM Inference:\n```bash\nexport MAX_SEQ_LEN=128000\nexport MAX_GEN_LEN=4096\n\nDATASET_PATH=feabench-data/FEA-Bench-v1.0-Oracle\nMODEL_NAME=Qwen/Qwen2.5-Coder-3B-Instruct\nRESULTS_ROOT_DIR=scripts/experiments/results_full\n\nPROMPT_MODE=natural-detailed\npython -m feabench.run_prediction \\\n --dataset_name_or_path $DATASET_PATH \\\n --model_type vllm \\\n --model_name_or_path $MODEL_NAME \\\n --input_text $PROMPT_MODE \\\n --output_dir $RESULTS_ROOT_DIR/$PROMPT_MODE\n```\n\n### Example of OpenAI API-style Inference:\n(DEEPSEEK_TOKENIZER is only required when using DeepSeek model inference)\n```bash\nexport DEEPSEEK_TOKENIZER_PATH=\"xxx\"\nexport OPENAI_API_KEY=\"xxx\"\nexport OPENAI_BASE_URL=\"https://api.deepseek.com\"\n\nDATASET_PATH=feabench-data/FEA-Bench-v1.0-Oracle\nMODEL_NAME=deepseek-chat\nRESULTS_ROOT_DIR=scripts/experiments/results_full\n\nPROMPT_MODE=natural-detailed\npython -m feabench.run_prediction \\\n --dataset_name_or_path $DATASET_PATH \\\n --model_type openai \\\n --model_name_or_path $MODEL_NAME \\\n --input_text $PROMPT_MODE \\\n --output_dir $RESULTS_ROOT_DIR/$PROMPT_MODE \\\n --num_proc 1\n```\n\nAfter running the inference, you should see the output `.jsonl` result files in the specified `output_dir`.\n\n## 4. Running Model Evaluation\n\nOur evaluation process is based on the code provided by SWE-Bench. We have provided a patch file `swe-bench.diff` to include the environment configurations for the task instances we are involved in.\n\nClone the SWE-Bench repository and apply the patch:\n```bash\nmkdir -p evaluator\ncd evaluator\ngit clone https://github.com/SWE-bench/SWE-bench.git\ncd SWE-bench\ngit checkout a0536ee6f9fd5ff88acf17a36a384bf3da3d93d6\ngit apply ../../swe-bench.diff\nconda create --name fea-eval python=3.11\nconda activate fea-eval\npip install -e .\n```\n\nTo verify that the FEA-Bench task instances can run correctly on your machine, you can build a gold result based on the dataset:\n```bash\npython -m feabench.get_gold_results \\\n --dataset_name_or_path feabench-data/FEA-Bench-v1.0-Standard \\\n --save_dir feabench-data/experiments/gold \\\n --file_name Gold__FEABench_v1.0__test.jsonl\n```\n\nThe command to run the evaluation script is as follows (using the gold result constructed above as an example):\n```bash\npython -m swebench.harness.run_evaluation \\\n --dataset_name ../../feabench-data/FEA-Bench-v1.0-Standard \\\n --predictions_path ../../feabench-data/experiments/gold/Gold__FEABench_v1.0__test.jsonl \\\n --max_workers 10 \\\n --cache_level instance \\\n --timeout 900 \\\n --run_id FEABench_v1_Gold\n```\nThe usage is identical to SWE-Bench. You can set the cache level `cache_level` based on your disk size. You should then obtain a result file similar to the following `.json` format:\n```json\n{\n \"total_instances\": 1401,\n \"submitted_instances\": 1401,\n \"completed_instances\": 1401,\n \"resolved_instances\": 1401,\n \"unresolved_instances\": 0,\n \"empty_patch_instances\": 0,\n \"error_instances\": 0,\n ...\n}\n```\n\nCongratulations! You have completed the usage of FEA-Bench. If you have any questions, please raise them in the issues.\n\n---\n\nFor more details, please refer to the [FEA-Bench Paper](https://arxiv.org/abs/2503.06680).\nIf you find our work helpful, we would be grateful if you could cite our work.\n```\n@misc{li2025feabenchbenchmarkevaluatingrepositorylevel,\n title={FEA-Bench: A Benchmark for Evaluating Repository-Level Code Generation for Feature Implementation}, \n author={Wei Li and Xin Zhang and Zhongxin Guo and Shaoguang Mao and Wen Luo and Guangyue Peng and Yangyu Huang and Houfeng Wang and Scarlett Li},\n year={2025},\n eprint={2503.06680},\n archivePrefix={arXiv},\n primaryClass={cs.SE},\n url={https://arxiv.org/abs/2503.06680}, \n}\n```\n\n\n\n## Contributing\n\nThis project welcomes contributions and suggestions. Most contributions require you to agree to a\nContributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us\nthe rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.\n\nWhen you submit a pull request, a CLA bot will automatically determine whether you need to provide\na CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions\nprovided by the bot. You will only need to do this once across all repos using our CLA.\n\nThis project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).\nFor more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or\ncontact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.\n\n## Trademarks\n\nThis project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft \ntrademarks or logos is subject to and must follow \n[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).\nUse of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.\nAny use of third-party trademarks or logos are subject to those third-party's policies.\n" |
| }, |
| "files": { |
| "monai/transforms/__init__.py": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .adaptors import FunctionSignature, adaptor, apply_alias, to_kwargs\nfrom .compose import Compose, OneOf\nfrom .croppad.array import (\n BorderPad,\n BoundingRect,\n CenterScaleCrop,\n CenterSpatialCrop,\n CropForeground,\n DivisiblePad,\n RandCropByLabelClasses,\n RandCropByPosNegLabel,\n RandScaleCrop,\n RandSpatialCrop,\n RandSpatialCropSamples,\n RandWeightedCrop,\n ResizeWithPadOrCrop,\n SpatialCrop,\n SpatialPad,\n)\nfrom .croppad.batch import PadListDataCollate\nfrom .croppad.dictionary import (\n BorderPadd,\n BorderPadD,\n BorderPadDict,\n BoundingRectd,\n BoundingRectD,\n BoundingRectDict,\n CenterScaleCropd,\n CenterScaleCropD,\n CenterScaleCropDict,\n CenterSpatialCropd,\n CenterSpatialCropD,\n CenterSpatialCropDict,\n CropForegroundd,\n CropForegroundD,\n CropForegroundDict,\n DivisiblePadd,\n DivisiblePadD,\n DivisiblePadDict,\n NumpyPadModeSequence,\n RandCropByLabelClassesd,\n RandCropByLabelClassesD,\n RandCropByLabelClassesDict,\n RandCropByPosNegLabeld,\n RandCropByPosNegLabelD,\n RandCropByPosNegLabelDict,\n RandScaleCropd,\n RandScaleCropD,\n RandScaleCropDict,\n RandSpatialCropd,\n RandSpatialCropD,\n RandSpatialCropDict,\n RandSpatialCropSamplesd,\n RandSpatialCropSamplesD,\n RandSpatialCropSamplesDict,\n RandWeightedCropd,\n RandWeightedCropD,\n RandWeightedCropDict,\n ResizeWithPadOrCropd,\n ResizeWithPadOrCropD,\n ResizeWithPadOrCropDict,\n SpatialCropd,\n SpatialCropD,\n SpatialCropDict,\n SpatialPadd,\n SpatialPadD,\n SpatialPadDict,\n)\nfrom .intensity.array import (\n AdjustContrast,\n DetectEnvelope,\n GaussianSharpen,\n GaussianSmooth,\n GibbsNoise,\n HistogramNormalize,\n KSpaceSpikeNoise,\n MaskIntensity,\n NormalizeIntensity,\n RandAdjustContrast,\n RandBiasField,\n RandCoarseDropout,\n RandGaussianNoise,\n RandGaussianSharpen,\n RandGaussianSmooth,\n RandGibbsNoise,\n RandHistogramShift,\n RandKSpaceSpikeNoise,\n RandRicianNoise,\n RandScaleIntensity,\n RandShiftIntensity,\n RandStdShiftIntensity,\n SavitzkyGolaySmooth,\n ScaleIntensity,\n ScaleIntensityRange,\n ScaleIntensityRangePercentiles,\n ShiftIntensity,\n StdShiftIntensity,\n ThresholdIntensity,\n)\nfrom .intensity.dictionary import (\n AdjustContrastd,\n AdjustContrastD,\n AdjustContrastDict,\n GaussianSharpend,\n GaussianSharpenD,\n GaussianSharpenDict,\n GaussianSmoothd,\n GaussianSmoothD,\n GaussianSmoothDict,\n GibbsNoised,\n GibbsNoiseD,\n GibbsNoiseDict,\n HistogramNormalized,\n HistogramNormalizeD,\n HistogramNormalizeDict,\n KSpaceSpikeNoised,\n KSpaceSpikeNoiseD,\n KSpaceSpikeNoiseDict,\n MaskIntensityd,\n MaskIntensityD,\n MaskIntensityDict,\n NormalizeIntensityd,\n NormalizeIntensityD,\n NormalizeIntensityDict,\n RandAdjustContrastd,\n RandAdjustContrastD,\n RandAdjustContrastDict,\n RandBiasFieldd,\n RandBiasFieldD,\n RandBiasFieldDict,\n RandCoarseDropoutd,\n RandCoarseDropoutD,\n RandCoarseDropoutDict,\n RandGaussianNoised,\n RandGaussianNoiseD,\n RandGaussianNoiseDict,\n RandGaussianSharpend,\n RandGaussianSharpenD,\n RandGaussianSharpenDict,\n RandGaussianSmoothd,\n RandGaussianSmoothD,\n RandGaussianSmoothDict,\n RandGibbsNoised,\n RandGibbsNoiseD,\n RandGibbsNoiseDict,\n RandHistogramShiftd,\n RandHistogramShiftD,\n RandHistogramShiftDict,\n RandKSpaceSpikeNoised,\n RandKSpaceSpikeNoiseD,\n RandKSpaceSpikeNoiseDict,\n RandRicianNoised,\n RandRicianNoiseD,\n RandRicianNoiseDict,\n RandScaleIntensityd,\n RandScaleIntensityD,\n RandScaleIntensityDict,\n RandShiftIntensityd,\n RandShiftIntensityD,\n RandShiftIntensityDict,\n RandStdShiftIntensityd,\n RandStdShiftIntensityD,\n RandStdShiftIntensityDict,\n ScaleIntensityd,\n ScaleIntensityD,\n ScaleIntensityDict,\n ScaleIntensityRanged,\n ScaleIntensityRangeD,\n ScaleIntensityRangeDict,\n ScaleIntensityRangePercentilesd,\n ScaleIntensityRangePercentilesD,\n ScaleIntensityRangePercentilesDict,\n ShiftIntensityd,\n ShiftIntensityD,\n ShiftIntensityDict,\n StdShiftIntensityd,\n StdShiftIntensityD,\n StdShiftIntensityDict,\n ThresholdIntensityd,\n ThresholdIntensityD,\n ThresholdIntensityDict,\n)\nfrom .inverse import InvertibleTransform\nfrom .inverse_batch_transform import BatchInverseTransform, Decollated\nfrom .io.array import SUPPORTED_READERS, LoadImage, SaveImage\nfrom .io.dictionary import LoadImaged, LoadImageD, LoadImageDict, SaveImaged, SaveImageD, SaveImageDict\nfrom .nvtx import (\n Mark,\n Markd,\n MarkD,\n MarkDict,\n RandMark,\n RandMarkd,\n RandMarkD,\n RandMarkDict,\n RandRange,\n RandRanged,\n RandRangeD,\n RandRangeDict,\n RandRangePop,\n RandRangePopd,\n RandRangePopD,\n RandRangePopDict,\n RandRangePush,\n RandRangePushd,\n RandRangePushD,\n RandRangePushDict,\n Range,\n Ranged,\n RangeD,\n RangeDict,\n RangePop,\n RangePopd,\n RangePopD,\n RangePopDict,\n RangePush,\n RangePushd,\n RangePushD,\n RangePushDict,\n)\nfrom .post.array import (\n Activations,\n AsDiscrete,\n FillHoles,\n KeepLargestConnectedComponent,\n LabelFilter,\n LabelToContour,\n MeanEnsemble,\n ProbNMS,\n VoteEnsemble,\n)\nfrom .post.dictionary import (\n ActivationsD,\n Activationsd,\n ActivationsDict,\n AsDiscreteD,\n AsDiscreted,\n AsDiscreteDict,\n Ensembled,\n FillHolesD,\n FillHolesd,\n FillHolesDict,\n InvertD,\n Invertd,\n InvertDict,\n KeepLargestConnectedComponentD,\n KeepLargestConnectedComponentd,\n KeepLargestConnectedComponentDict,\n LabelFilterD,\n LabelFilterd,\n LabelFilterDict,\n LabelToContourD,\n LabelToContourd,\n LabelToContourDict,\n MeanEnsembleD,\n MeanEnsembled,\n MeanEnsembleDict,\n ProbNMSD,\n ProbNMSd,\n ProbNMSDict,\n SaveClassificationD,\n SaveClassificationd,\n SaveClassificationDict,\n VoteEnsembleD,\n VoteEnsembled,\n VoteEnsembleDict,\n)\nfrom .spatial.array import (\n AddCoordinateChannels,\n Affine,\n AffineGrid,\n Flip,\n Orientation,\n Rand2DElastic,\n Rand3DElastic,\n RandAffine,\n RandAffineGrid,\n RandAxisFlip,\n RandDeformGrid,\n RandFlip,\n RandRotate,\n RandRotate90,\n RandZoom,\n Resample,\n Resize,\n Rotate,\n Rotate90,\n Spacing,\n Zoom,\n)\nfrom .spatial.dictionary import (\n AddCoordinateChannelsd,\n AddCoordinateChannelsD,\n AddCoordinateChannelsDict,\n Affined,\n AffineD,\n AffineDict,\n Flipd,\n FlipD,\n FlipDict,\n Orientationd,\n OrientationD,\n OrientationDict,\n Rand2DElasticd,\n Rand2DElasticD,\n Rand2DElasticDict,\n Rand3DElasticd,\n Rand3DElasticD,\n Rand3DElasticDict,\n RandAffined,\n RandAffineD,\n RandAffineDict,\n RandAxisFlipd,\n RandAxisFlipD,\n RandAxisFlipDict,\n RandFlipd,\n RandFlipD,\n RandFlipDict,\n RandRotate90d,\n RandRotate90D,\n RandRotate90Dict,\n RandRotated,\n RandRotateD,\n RandRotateDict,\n RandZoomd,\n RandZoomD,\n RandZoomDict,\n Resized,\n ResizeD,\n ResizeDict,\n Rotate90d,\n Rotate90D,\n Rotate90Dict,\n Rotated,\n RotateD,\n RotateDict,\n Spacingd,\n SpacingD,\n SpacingDict,\n Zoomd,\n ZoomD,\n ZoomDict,\n)\nfrom .transform import MapTransform, Randomizable, RandomizableTransform, ThreadUnsafe, Transform, apply_transform\nfrom .utility.array import (\n AddChannel,\n AddExtremePointsChannel,\n AsChannelFirst,\n AsChannelLast,\n CastToType,\n ClassesToIndices,\n ConvertToMultiChannelBasedOnBratsClasses,\n DataStats,\n EnsureChannelFirst,\n EnsureType,\n FgBgToIndices,\n Identity,\n IntensityStats,\n LabelToMask,\n Lambda,\n MapLabelValue,\n RandLambda,\n RemoveRepeatedChannel,\n RepeatChannel,\n SimulateDelay,\n SplitChannel,\n SqueezeDim,\n ToCupy,\n ToDevice,\n ToNumpy,\n ToPIL,\n TorchVision,\n ToTensor,\n Transpose,\n)\nfrom .utility.dictionary import (\n AddChanneld,\n AddChannelD,\n AddChannelDict,\n AddExtremePointsChanneld,\n AddExtremePointsChannelD,\n AddExtremePointsChannelDict,\n AsChannelFirstd,\n AsChannelFirstD,\n AsChannelFirstDict,\n AsChannelLastd,\n AsChannelLastD,\n AsChannelLastDict,\n CastToTyped,\n CastToTypeD,\n CastToTypeDict,\n ClassesToIndicesd,\n ClassesToIndicesD,\n ClassesToIndicesDict,\n ConcatItemsd,\n ConcatItemsD,\n ConcatItemsDict,\n ConvertToMultiChannelBasedOnBratsClassesd,\n ConvertToMultiChannelBasedOnBratsClassesD,\n ConvertToMultiChannelBasedOnBratsClassesDict,\n CopyItemsd,\n CopyItemsD,\n CopyItemsDict,\n DataStatsd,\n DataStatsD,\n DataStatsDict,\n DeleteItemsd,\n DeleteItemsD,\n DeleteItemsDict,\n EnsureChannelFirstd,\n EnsureChannelFirstD,\n EnsureChannelFirstDict,\n EnsureTyped,\n EnsureTypeD,\n EnsureTypeDict,\n FgBgToIndicesd,\n FgBgToIndicesD,\n FgBgToIndicesDict,\n Identityd,\n IdentityD,\n IdentityDict,\n IntensityStatsd,\n IntensityStatsD,\n IntensityStatsDict,\n LabelToMaskd,\n LabelToMaskD,\n LabelToMaskDict,\n Lambdad,\n LambdaD,\n LambdaDict,\n MapLabelValued,\n MapLabelValueD,\n MapLabelValueDict,\n RandLambdad,\n RandLambdaD,\n RandLambdaDict,\n RandTorchVisiond,\n RandTorchVisionD,\n RandTorchVisionDict,\n RemoveRepeatedChanneld,\n RemoveRepeatedChannelD,\n RemoveRepeatedChannelDict,\n RepeatChanneld,\n RepeatChannelD,\n RepeatChannelDict,\n SelectItemsd,\n SelectItemsD,\n SelectItemsDict,\n SimulateDelayd,\n SimulateDelayD,\n SimulateDelayDict,\n SplitChanneld,\n SplitChannelD,\n SplitChannelDict,\n SqueezeDimd,\n SqueezeDimD,\n SqueezeDimDict,\n ToCupyd,\n ToCupyD,\n ToCupyDict,\n ToDeviced,\n ToDeviceD,\n ToDeviceDict,\n ToNumpyd,\n ToNumpyD,\n ToNumpyDict,\n ToPILd,\n ToPILD,\n ToPILDict,\n TorchVisiond,\n TorchVisionD,\n TorchVisionDict,\n ToTensord,\n ToTensorD,\n ToTensorDict,\n Transposed,\n TransposeD,\n TransposeDict,\n)\nfrom .utils import (\n Fourier,\n allow_missing_keys_mode,\n compute_divisible_spatial_size,\n convert_inverse_interp_mode,\n copypaste_arrays,\n create_control_grid,\n create_grid,\n create_rotate,\n create_scale,\n create_shear,\n create_translate,\n equalize_hist,\n extreme_points_to_image,\n generate_label_classes_crop_centers,\n generate_pos_neg_label_crop_centers,\n generate_spatial_bounding_box,\n get_extreme_points,\n get_largest_connected_component_mask,\n get_number_image_type_conversions,\n img_bounds,\n in_bounds,\n is_empty,\n is_positive,\n map_binary_to_indices,\n map_classes_to_indices,\n map_spatial_axes,\n rand_choice,\n rescale_array,\n rescale_array_int_max,\n rescale_instance_array,\n resize_center,\n weighted_patch_samples,\n zero_margins,\n)\n", |
| "monai/transforms/intensity/array.py": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA collection of \"vanilla\" transforms for intensity adjustment\nhttps://github.com/Project-MONAI/MONAI/wiki/MONAI_Design\n\"\"\"\n\nfrom collections.abc import Iterable\nfrom typing import Any, Callable, List, Optional, Sequence, Tuple, Union\nfrom warnings import warn\n\nimport numpy as np\nimport torch\n\nfrom monai.config import DtypeLike\nfrom monai.config.type_definitions import NdarrayTensor\nfrom monai.data.utils import get_random_patch, get_valid_patch_size\nfrom monai.networks.layers import GaussianFilter, HilbertTransform, SavitzkyGolayFilter\nfrom monai.transforms.transform import RandomizableTransform, Transform\nfrom monai.transforms.utils import Fourier, equalize_hist, is_positive, rescale_array\nfrom monai.utils import (\n PT_BEFORE_1_7,\n InvalidPyTorchVersionError,\n convert_data_type,\n convert_to_dst_type,\n dtype_torch_to_numpy,\n ensure_tuple,\n ensure_tuple_rep,\n ensure_tuple_size,\n fall_back_tuple,\n)\n\n__all__ = [\n \"RandGaussianNoise\",\n \"RandRicianNoise\",\n \"ShiftIntensity\",\n \"RandShiftIntensity\",\n \"StdShiftIntensity\",\n \"RandStdShiftIntensity\",\n \"RandBiasField\",\n \"ScaleIntensity\",\n \"RandScaleIntensity\",\n \"NormalizeIntensity\",\n \"ThresholdIntensity\",\n \"ScaleIntensityRange\",\n \"AdjustContrast\",\n \"RandAdjustContrast\",\n \"ScaleIntensityRangePercentiles\",\n \"MaskIntensity\",\n \"DetectEnvelope\",\n \"SavitzkyGolaySmooth\",\n \"GaussianSmooth\",\n \"RandGaussianSmooth\",\n \"GaussianSharpen\",\n \"RandGaussianSharpen\",\n \"RandHistogramShift\",\n \"GibbsNoise\",\n \"RandGibbsNoise\",\n \"KSpaceSpikeNoise\",\n \"RandKSpaceSpikeNoise\",\n \"RandCoarseDropout\",\n \"HistogramNormalize\",\n]\n\n\nclass RandGaussianNoise(RandomizableTransform):\n \"\"\"\n Add Gaussian noise to image.\n\n Args:\n prob: Probability to add Gaussian noise.\n mean: Mean or “centre” of the distribution.\n std: Standard deviation (spread) of distribution.\n \"\"\"\n\n backend = [\"torch\", \"numpy\"]\n\n def __init__(self, prob: float = 0.1, mean: Union[Sequence[float], float] = 0.0, std: float = 0.1) -> None:\n RandomizableTransform.__init__(self, prob)\n self.mean = mean\n self.std = std\n self._noise: np.ndarray\n\n def randomize(self, im_shape: Sequence[int]) -> None:\n super().randomize(None)\n self._noise = self.R.normal(self.mean, self.R.uniform(0, self.std), size=im_shape)\n\n def __call__(self, img: NdarrayTensor) -> NdarrayTensor:\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n self.randomize(img.shape)\n if self._noise is None:\n raise RuntimeError(\"randomized factor should not be None.\")\n if not self._do_transform:\n return img\n noise, *_ = convert_to_dst_type(self._noise, img)\n return img + noise # type: ignore\n\n\nclass RandRicianNoise(RandomizableTransform):\n \"\"\"\n Add Rician noise to image.\n Rician noise in MRI is the result of performing a magnitude operation on complex\n data with Gaussian noise of the same variance in both channels, as described in `Noise in Magnitude Magnetic Resonance Images\n <https://doi.org/10.1002/cmr.a.20124>`_. This transform is adapted from\n `DIPY<https://github.com/dipy/dipy>`_. See also: `The rician distribution of noisy mri data\n <https://doi.org/10.1002/mrm.1910340618>`_.\n\n Args:\n prob: Probability to add Rician noise.\n mean: Mean or \"centre\" of the Gaussian distributions sampled to make up\n the Rician noise.\n std: Standard deviation (spread) of the Gaussian distributions sampled\n to make up the Rician noise.\n channel_wise: If True, treats each channel of the image separately.\n relative: If True, the spread of the sampled Gaussian distributions will\n be std times the standard deviation of the image or channel's intensity\n histogram.\n sample_std: If True, sample the spread of the Gaussian distributions\n uniformly from 0 to std.\n \"\"\"\n\n def __init__(\n self,\n prob: float = 0.1,\n mean: Union[Sequence[float], float] = 0.0,\n std: Union[Sequence[float], float] = 1.0,\n channel_wise: bool = False,\n relative: bool = False,\n sample_std: bool = True,\n ) -> None:\n RandomizableTransform.__init__(self, prob)\n self.prob = prob\n self.mean = mean\n self.std = std\n self.channel_wise = channel_wise\n self.relative = relative\n self.sample_std = sample_std\n self._noise1: np.ndarray\n self._noise2: np.ndarray\n\n def _add_noise(self, img: Union[torch.Tensor, np.ndarray], mean: float, std: float):\n im_shape = img.shape\n _std = self.R.uniform(0, std) if self.sample_std else std\n self._noise1 = self.R.normal(mean, _std, size=im_shape)\n self._noise2 = self.R.normal(mean, _std, size=im_shape)\n if self._noise1 is None or self._noise2 is None:\n raise RuntimeError(\"noise should not be None.\")\n dtype = dtype_torch_to_numpy(img.dtype) if isinstance(img, torch.Tensor) else img.dtype\n return np.sqrt((img + self._noise1.astype(dtype)) ** 2 + self._noise2.astype(dtype) ** 2)\n\n def __call__(self, img: Union[torch.Tensor, np.ndarray]) -> Union[torch.Tensor, np.ndarray]:\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n super().randomize(None)\n if not self._do_transform:\n return img\n if self.channel_wise:\n _mean = ensure_tuple_rep(self.mean, len(img))\n _std = ensure_tuple_rep(self.std, len(img))\n for i, d in enumerate(img):\n img[i] = self._add_noise(d, mean=_mean[i], std=_std[i] * d.std() if self.relative else _std[i])\n else:\n if not isinstance(self.mean, (int, float)):\n raise RuntimeError(\"If channel_wise is False, mean must be a float or int number.\")\n if not isinstance(self.std, (int, float)):\n raise RuntimeError(\"If channel_wise is False, std must be a float or int number.\")\n std = self.std * img.std() if self.relative else self.std\n if not isinstance(std, (int, float)):\n raise RuntimeError(\"std must be a float or int number.\")\n img = self._add_noise(img, mean=self.mean, std=std)\n return img\n\n\nclass ShiftIntensity(Transform):\n \"\"\"\n Shift intensity uniformly for the entire image with specified `offset`.\n\n Args:\n offset: offset value to shift the intensity of image.\n \"\"\"\n\n def __init__(self, offset: float) -> None:\n self.offset = offset\n\n def __call__(self, img: np.ndarray, offset: Optional[float] = None) -> np.ndarray:\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n\n offset = self.offset if offset is None else offset\n return np.asarray((img + offset), dtype=img.dtype)\n\n\nclass RandShiftIntensity(RandomizableTransform):\n \"\"\"\n Randomly shift intensity with randomly picked offset.\n \"\"\"\n\n def __init__(self, offsets: Union[Tuple[float, float], float], prob: float = 0.1) -> None:\n \"\"\"\n Args:\n offsets: offset range to randomly shift.\n if single number, offset value is picked from (-offsets, offsets).\n prob: probability of shift.\n \"\"\"\n RandomizableTransform.__init__(self, prob)\n if isinstance(offsets, (int, float)):\n self.offsets = (min(-offsets, offsets), max(-offsets, offsets))\n elif len(offsets) != 2:\n raise ValueError(\"offsets should be a number or pair of numbers.\")\n else:\n self.offsets = (min(offsets), max(offsets))\n self._offset = self.offsets[0]\n self._shfiter = ShiftIntensity(self._offset)\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1])\n super().randomize(None)\n\n def __call__(self, img: np.ndarray, factor: Optional[float] = None) -> np.ndarray:\n \"\"\"\n Apply the transform to `img`.\n\n Args:\n img: input image to shift intensity.\n factor: a factor to multiply the random offset, then shift.\n can be some image specific value at runtime, like: max(img), etc.\n\n \"\"\"\n self.randomize()\n if not self._do_transform:\n return img\n return self._shfiter(img, self._offset if factor is None else self._offset * factor)\n\n\nclass StdShiftIntensity(Transform):\n \"\"\"\n Shift intensity for the image with a factor and the standard deviation of the image\n by: ``v = v + factor * std(v)``.\n This transform can focus on only non-zero values or the entire image,\n and can also calculate the std on each channel separately.\n\n Args:\n factor: factor shift by ``v = v + factor * std(v)``.\n nonzero: whether only count non-zero values.\n channel_wise: if True, calculate on each channel separately. Please ensure\n that the first dimension represents the channel of the image if True.\n dtype: output data type, defaults to float32.\n \"\"\"\n\n def __init__(\n self, factor: float, nonzero: bool = False, channel_wise: bool = False, dtype: DtypeLike = np.float32\n ) -> None:\n self.factor = factor\n self.nonzero = nonzero\n self.channel_wise = channel_wise\n self.dtype = dtype\n\n def _stdshift(self, img: np.ndarray) -> np.ndarray:\n slices = (img != 0) if self.nonzero else np.ones(img.shape, dtype=bool)\n if not np.any(slices):\n return img\n offset = self.factor * np.std(img[slices])\n img[slices] = img[slices] + offset\n return img\n\n def __call__(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n img = img.astype(self.dtype)\n if self.channel_wise:\n for i, d in enumerate(img):\n img[i] = self._stdshift(d)\n else:\n img = self._stdshift(img)\n return img\n\n\nclass RandStdShiftIntensity(RandomizableTransform):\n \"\"\"\n Shift intensity for the image with a factor and the standard deviation of the image\n by: ``v = v + factor * std(v)`` where the `factor` is randomly picked.\n \"\"\"\n\n def __init__(\n self,\n factors: Union[Tuple[float, float], float],\n prob: float = 0.1,\n nonzero: bool = False,\n channel_wise: bool = False,\n dtype: DtypeLike = np.float32,\n ) -> None:\n \"\"\"\n Args:\n factors: if tuple, the randomly picked range is (min(factors), max(factors)).\n If single number, the range is (-factors, factors).\n prob: probability of std shift.\n nonzero: whether only count non-zero values.\n channel_wise: if True, calculate on each channel separately.\n dtype: output data type, defaults to float32.\n\n \"\"\"\n RandomizableTransform.__init__(self, prob)\n if isinstance(factors, (int, float)):\n self.factors = (min(-factors, factors), max(-factors, factors))\n elif len(factors) != 2:\n raise ValueError(\"factors should be a number or pair of numbers.\")\n else:\n self.factors = (min(factors), max(factors))\n self.factor = self.factors[0]\n self.nonzero = nonzero\n self.channel_wise = channel_wise\n self.dtype = dtype\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1])\n super().randomize(None)\n\n def __call__(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n self.randomize()\n if not self._do_transform:\n return img\n shifter = StdShiftIntensity(\n factor=self.factor, nonzero=self.nonzero, channel_wise=self.channel_wise, dtype=self.dtype\n )\n return shifter(img)\n\n\nclass ScaleIntensity(Transform):\n \"\"\"\n Scale the intensity of input image to the given value range (minv, maxv).\n If `minv` and `maxv` not provided, use `factor` to scale image by ``v = v * (1 + factor)``.\n \"\"\"\n\n def __init__(\n self, minv: Optional[float] = 0.0, maxv: Optional[float] = 1.0, factor: Optional[float] = None\n ) -> None:\n \"\"\"\n Args:\n minv: minimum value of output data.\n maxv: maximum value of output data.\n factor: factor scale by ``v = v * (1 + factor)``. In order to use\n this parameter, please set `minv` and `maxv` into None.\n \"\"\"\n self.minv = minv\n self.maxv = maxv\n self.factor = factor\n\n def __call__(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply the transform to `img`.\n\n Raises:\n ValueError: When ``self.minv=None`` or ``self.maxv=None`` and ``self.factor=None``. Incompatible values.\n\n \"\"\"\n if self.minv is not None and self.maxv is not None:\n return np.asarray(rescale_array(img, self.minv, self.maxv, img.dtype))\n if self.factor is not None:\n return np.asarray(img * (1 + self.factor), dtype=img.dtype)\n raise ValueError(\"Incompatible values: minv=None or maxv=None and factor=None.\")\n\n\nclass RandScaleIntensity(RandomizableTransform):\n \"\"\"\n Randomly scale the intensity of input image by ``v = v * (1 + factor)`` where the `factor`\n is randomly picked.\n \"\"\"\n\n def __init__(self, factors: Union[Tuple[float, float], float], prob: float = 0.1) -> None:\n \"\"\"\n Args:\n factors: factor range to randomly scale by ``v = v * (1 + factor)``.\n if single number, factor value is picked from (-factors, factors).\n prob: probability of scale.\n\n \"\"\"\n RandomizableTransform.__init__(self, prob)\n if isinstance(factors, (int, float)):\n self.factors = (min(-factors, factors), max(-factors, factors))\n elif len(factors) != 2:\n raise ValueError(\"factors should be a number or pair of numbers.\")\n else:\n self.factors = (min(factors), max(factors))\n self.factor = self.factors[0]\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1])\n super().randomize(None)\n\n def __call__(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n self.randomize()\n if not self._do_transform:\n return img\n scaler = ScaleIntensity(minv=None, maxv=None, factor=self.factor)\n return scaler(img)\n\n\nclass RandBiasField(RandomizableTransform):\n \"\"\"\n Random bias field augmentation for MR images.\n The bias field is considered as a linear combination of smoothly varying basis (polynomial)\n functions, as described in `Automated Model-Based Tissue Classification of MR Images of the Brain\n <https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=811270>`_.\n This implementation adapted from `NiftyNet\n <https://github.com/NifTK/NiftyNet>`_.\n Referred to `Longitudinal segmentation of age-related white matter hyperintensities\n <https://www.sciencedirect.com/science/article/pii/S1361841517300257?via%3Dihub>`_.\n\n Args:\n degree: degree of freedom of the polynomials. The value should be no less than 1.\n Defaults to 3.\n coeff_range: range of the random coefficients. Defaults to (0.0, 0.1).\n dtype: output data type, defaults to float32.\n prob: probability to do random bias field.\n\n \"\"\"\n\n def __init__(\n self,\n degree: int = 3,\n coeff_range: Tuple[float, float] = (0.0, 0.1),\n dtype: DtypeLike = np.float32,\n prob: float = 1.0,\n ) -> None:\n RandomizableTransform.__init__(self, prob)\n if degree < 1:\n raise ValueError(\"degree should be no less than 1.\")\n self.degree = degree\n self.coeff_range = coeff_range\n self.dtype = dtype\n\n self._coeff = [1.0]\n\n def _generate_random_field(self, spatial_shape: Sequence[int], degree: int, coeff: Sequence[float]):\n \"\"\"\n products of polynomials as bias field estimations\n \"\"\"\n rank = len(spatial_shape)\n coeff_mat = np.zeros((degree + 1,) * rank)\n coords = [np.linspace(-1.0, 1.0, dim, dtype=np.float32) for dim in spatial_shape]\n if rank == 2:\n coeff_mat[np.tril_indices(degree + 1)] = coeff\n return np.polynomial.legendre.leggrid2d(coords[0], coords[1], coeff_mat)\n if rank == 3:\n pts: List[List[int]] = [[0, 0, 0]]\n for i in range(degree + 1):\n for j in range(degree + 1 - i):\n for k in range(degree + 1 - i - j):\n pts.append([i, j, k])\n if len(pts) > 1:\n pts = pts[1:]\n np_pts = np.stack(pts)\n coeff_mat[np_pts[:, 0], np_pts[:, 1], np_pts[:, 2]] = coeff\n return np.polynomial.legendre.leggrid3d(coords[0], coords[1], coords[2], coeff_mat)\n raise NotImplementedError(\"only supports 2D or 3D fields\")\n\n def randomize(self, data: np.ndarray) -> None:\n super().randomize(None)\n n_coeff = int(np.prod([(self.degree + k) / k for k in range(1, len(data.shape[1:]) + 1)]))\n self._coeff = self.R.uniform(*self.coeff_range, n_coeff).tolist()\n\n def __call__(self, img: np.ndarray):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n self.randomize(data=img)\n if not self._do_transform:\n return img\n num_channels, *spatial_shape = img.shape\n _bias_fields = np.stack(\n [\n self._generate_random_field(spatial_shape=spatial_shape, degree=self.degree, coeff=self._coeff)\n for _ in range(num_channels)\n ],\n axis=0,\n )\n return (img * np.exp(_bias_fields)).astype(self.dtype)\n\n\nclass NormalizeIntensity(Transform):\n \"\"\"\n Normalize input based on provided args, using calculated mean and std if not provided.\n This transform can normalize only non-zero values or entire image, and can also calculate\n mean and std on each channel separately.\n When `channel_wise` is True, the first dimension of `subtrahend` and `divisor` should\n be the number of image channels if they are not None.\n\n Args:\n subtrahend: the amount to subtract by (usually the mean).\n divisor: the amount to divide by (usually the standard deviation).\n nonzero: whether only normalize non-zero values.\n channel_wise: if using calculated mean and std, calculate on each channel separately\n or calculate on the entire image directly.\n dtype: output data type, defaults to float32.\n \"\"\"\n\n def __init__(\n self,\n subtrahend: Union[Sequence, np.ndarray, None] = None,\n divisor: Union[Sequence, np.ndarray, None] = None,\n nonzero: bool = False,\n channel_wise: bool = False,\n dtype: DtypeLike = np.float32,\n ) -> None:\n self.subtrahend = subtrahend\n self.divisor = divisor\n self.nonzero = nonzero\n self.channel_wise = channel_wise\n self.dtype = dtype\n\n def _normalize(self, img: np.ndarray, sub=None, div=None) -> np.ndarray:\n slices = (img != 0) if self.nonzero else np.ones(img.shape, dtype=bool)\n if not np.any(slices):\n return img\n\n _sub = sub if sub is not None else np.mean(img[slices])\n if isinstance(_sub, np.ndarray):\n _sub = _sub[slices]\n\n _div = div if div is not None else np.std(img[slices])\n if np.isscalar(_div):\n if _div == 0.0:\n _div = 1.0\n elif isinstance(_div, np.ndarray):\n _div = _div[slices]\n _div[_div == 0.0] = 1.0\n img[slices] = (img[slices] - _sub) / _div\n return img\n\n def __call__(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply the transform to `img`, assuming `img` is a channel-first array if `self.channel_wise` is True,\n \"\"\"\n if self.channel_wise:\n if self.subtrahend is not None and len(self.subtrahend) != len(img):\n raise ValueError(f\"img has {len(img)} channels, but subtrahend has {len(self.subtrahend)} components.\")\n if self.divisor is not None and len(self.divisor) != len(img):\n raise ValueError(f\"img has {len(img)} channels, but divisor has {len(self.divisor)} components.\")\n\n for i, d in enumerate(img):\n img[i] = self._normalize(\n d,\n sub=self.subtrahend[i] if self.subtrahend is not None else None,\n div=self.divisor[i] if self.divisor is not None else None,\n )\n else:\n img = self._normalize(img, self.subtrahend, self.divisor)\n\n return img.astype(self.dtype)\n\n\nclass ThresholdIntensity(Transform):\n \"\"\"\n Filter the intensity values of whole image to below threshold or above threshold.\n And fill the remaining parts of the image to the `cval` value.\n\n Args:\n threshold: the threshold to filter intensity values.\n above: filter values above the threshold or below the threshold, default is True.\n cval: value to fill the remaining parts of the image, default is 0.\n \"\"\"\n\n def __init__(self, threshold: float, above: bool = True, cval: float = 0.0) -> None:\n if not isinstance(threshold, (int, float)):\n raise ValueError(\"threshold must be a float or int number.\")\n self.threshold = threshold\n self.above = above\n self.cval = cval\n\n def __call__(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n return np.asarray(\n np.where(img > self.threshold if self.above else img < self.threshold, img, self.cval), dtype=img.dtype\n )\n\n\nclass ScaleIntensityRange(Transform):\n \"\"\"\n Apply specific intensity scaling to the whole numpy array.\n Scaling from [a_min, a_max] to [b_min, b_max] with clip option.\n\n Args:\n a_min: intensity original range min.\n a_max: intensity original range max.\n b_min: intensity target range min.\n b_max: intensity target range max.\n clip: whether to perform clip after scaling.\n \"\"\"\n\n def __init__(self, a_min: float, a_max: float, b_min: float, b_max: float, clip: bool = False) -> None:\n self.a_min = a_min\n self.a_max = a_max\n self.b_min = b_min\n self.b_max = b_max\n self.clip = clip\n\n def __call__(self, img: np.ndarray):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n if self.a_max - self.a_min == 0.0:\n warn(\"Divide by zero (a_min == a_max)\", Warning)\n return img - self.a_min + self.b_min\n\n img = (img - self.a_min) / (self.a_max - self.a_min)\n img = img * (self.b_max - self.b_min) + self.b_min\n if self.clip:\n img = np.asarray(np.clip(img, self.b_min, self.b_max))\n return img\n\n\nclass AdjustContrast(Transform):\n \"\"\"\n Changes image intensity by gamma. Each pixel/voxel intensity is updated as::\n\n x = ((x - min) / intensity_range) ^ gamma * intensity_range + min\n\n Args:\n gamma: gamma value to adjust the contrast as function.\n \"\"\"\n\n def __init__(self, gamma: float) -> None:\n if not isinstance(gamma, (int, float)):\n raise ValueError(\"gamma must be a float or int number.\")\n self.gamma = gamma\n\n def __call__(self, img: np.ndarray):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n epsilon = 1e-7\n img_min = img.min()\n img_range = img.max() - img_min\n return np.power(((img - img_min) / float(img_range + epsilon)), self.gamma) * img_range + img_min\n\n\nclass RandAdjustContrast(RandomizableTransform):\n \"\"\"\n Randomly changes image intensity by gamma. Each pixel/voxel intensity is updated as::\n\n x = ((x - min) / intensity_range) ^ gamma * intensity_range + min\n\n Args:\n prob: Probability of adjustment.\n gamma: Range of gamma values.\n If single number, value is picked from (0.5, gamma), default is (0.5, 4.5).\n \"\"\"\n\n def __init__(self, prob: float = 0.1, gamma: Union[Sequence[float], float] = (0.5, 4.5)) -> None:\n RandomizableTransform.__init__(self, prob)\n\n if isinstance(gamma, (int, float)):\n if gamma <= 0.5:\n raise ValueError(\n \"if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)\"\n )\n self.gamma = (0.5, gamma)\n elif len(gamma) != 2:\n raise ValueError(\"gamma should be a number or pair of numbers.\")\n else:\n self.gamma = (min(gamma), max(gamma))\n\n self.gamma_value: float\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1])\n\n def __call__(self, img: np.ndarray):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n self.randomize()\n if self.gamma_value is None:\n raise ValueError(\"gamma_value is not set.\")\n if not self._do_transform:\n return img\n adjuster = AdjustContrast(self.gamma_value)\n return adjuster(img)\n\n\nclass ScaleIntensityRangePercentiles(Transform):\n \"\"\"\n Apply range scaling to a numpy array based on the intensity distribution of the input.\n\n By default this transform will scale from [lower_intensity_percentile, upper_intensity_percentile] to [b_min, b_max], where\n {lower,upper}_intensity_percentile are the intensity values at the corresponding percentiles of ``img``.\n\n The ``relative`` parameter can also be set to scale from [lower_intensity_percentile, upper_intensity_percentile] to the\n lower and upper percentiles of the output range [b_min, b_max]\n\n For example:\n\n .. code-block:: python\n :emphasize-lines: 11, 22\n\n image = np.array(\n [[[1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5]]])\n\n # Scale from lower and upper image intensity percentiles\n # to output range [b_min, b_max]\n scaler = ScaleIntensityRangePercentiles(10, 90, 0, 200, False, False)\n print(scaler(image))\n [[[0., 50., 100., 150., 200.],\n [0., 50., 100., 150., 200.],\n [0., 50., 100., 150., 200.],\n [0., 50., 100., 150., 200.],\n [0., 50., 100., 150., 200.],\n [0., 50., 100., 150., 200.]]]\n\n # Scale from lower and upper image intensity percentiles\n # to lower and upper percentiles of the output range [b_min, b_max]\n rel_scaler = ScaleIntensityRangePercentiles(10, 90, 0, 200, False, True)\n print(rel_scaler(image))\n [[[20., 60., 100., 140., 180.],\n [20., 60., 100., 140., 180.],\n [20., 60., 100., 140., 180.],\n [20., 60., 100., 140., 180.],\n [20., 60., 100., 140., 180.],\n [20., 60., 100., 140., 180.]]]\n\n\n Args:\n lower: lower intensity percentile.\n upper: upper intensity percentile.\n b_min: intensity target range min.\n b_max: intensity target range max.\n clip: whether to perform clip after scaling.\n relative: whether to scale to the corresponding percentiles of [b_min, b_max].\n \"\"\"\n\n def __init__(\n self, lower: float, upper: float, b_min: float, b_max: float, clip: bool = False, relative: bool = False\n ) -> None:\n if lower < 0.0 or lower > 100.0:\n raise ValueError(\"Percentiles must be in the range [0, 100]\")\n if upper < 0.0 or upper > 100.0:\n raise ValueError(\"Percentiles must be in the range [0, 100]\")\n self.lower = lower\n self.upper = upper\n self.b_min = b_min\n self.b_max = b_max\n self.clip = clip\n self.relative = relative\n\n def __call__(self, img: np.ndarray):\n \"\"\"\n Apply the transform to `img`.\n \"\"\"\n a_min = np.percentile(img, self.lower)\n a_max = np.percentile(img, self.upper)\n b_min = self.b_min\n b_max = self.b_max\n\n if self.relative:\n b_min = ((self.b_max - self.b_min) * (self.lower / 100.0)) + self.b_min\n b_max = ((self.b_max - self.b_min) * (self.upper / 100.0)) + self.b_min\n\n scalar = ScaleIntensityRange(a_min=a_min, a_max=a_max, b_min=b_min, b_max=b_max, clip=False)\n img = scalar(img)\n\n if self.clip:\n img = np.asarray(np.clip(img, self.b_min, self.b_max))\n\n return img\n\n\nclass MaskIntensity(Transform):\n \"\"\"\n Mask the intensity values of input image with the specified mask data.\n Mask data must have the same spatial size as the input image, and all\n the intensity values of input image corresponding to the selected values\n in the mask data will keep the original value, others will be set to `0`.\n\n Args:\n mask_data: if `mask_data` is single channel, apply to every channel\n of input image. if multiple channels, the number of channels must\n match the input data. the intensity values of input image corresponding\n to the selected values in the mask data will keep the original value,\n others will be set to `0`. if None, must specify the `mask_data` at runtime.\n select_fn: function to select valid values of the `mask_data`, default is\n to select `values > 0`.\n\n \"\"\"\n\n def __init__(self, mask_data: Optional[np.ndarray] = None, select_fn: Callable = is_positive) -> None:\n self.mask_data = mask_data\n self.select_fn = select_fn\n\n def __call__(self, img: np.ndarray, mask_data: Optional[np.ndarray] = None) -> np.ndarray:\n \"\"\"\n Args:\n mask_data: if mask data is single channel, apply to every channel\n of input image. if multiple channels, the channel number must\n match input data. mask_data will be converted to `bool` values\n by `mask_data > 0` before applying transform to input image.\n\n Raises:\n - ValueError: When both ``mask_data`` and ``self.mask_data`` are None.\n - ValueError: When ``mask_data`` and ``img`` channels differ and ``mask_data`` is not single channel.\n\n \"\"\"\n mask_data = self.mask_data if mask_data is None else mask_data\n if mask_data is None:\n raise ValueError(\"must provide the mask_data when initializing the transform or at runtime.\")\n\n mask_data = np.asarray(self.select_fn(mask_data))\n if mask_data.shape[0] != 1 and mask_data.shape[0] != img.shape[0]:\n raise ValueError(\n \"When mask_data is not single channel, mask_data channels must match img, \"\n f\"got img channels={img.shape[0]} mask_data channels={mask_data.shape[0]}.\"\n )\n\n return np.asarray(img * mask_data)\n\n\nclass SavitzkyGolaySmooth(Transform):\n \"\"\"\n Smooth the input data along the given axis using a Savitzky-Golay filter.\n\n Args:\n window_length: Length of the filter window, must be a positive odd integer.\n order: Order of the polynomial to fit to each window, must be less than ``window_length``.\n axis: Optional axis along which to apply the filter kernel. Default 1 (first spatial dimension).\n mode: Optional padding mode, passed to convolution class. ``'zeros'``, ``'reflect'``, ``'replicate'``\n or ``'circular'``. Default: ``'zeros'``. See ``torch.nn.Conv1d()`` for more information.\n \"\"\"\n\n backend = [\"numpy\"]\n\n def __init__(self, window_length: int, order: int, axis: int = 1, mode: str = \"zeros\"):\n\n if axis < 0:\n raise ValueError(\"axis must be zero or positive.\")\n\n self.window_length = window_length\n self.order = order\n self.axis = axis\n self.mode = mode\n self.img_t: torch.Tensor = torch.tensor(0.0)\n\n def __call__(self, img: NdarrayTensor) -> torch.Tensor:\n \"\"\"\n Args:\n img: array containing input data. Must be real and in shape [channels, spatial1, spatial2, ...].\n\n Returns:\n array containing smoothed result.\n\n \"\"\"\n self.img_t, *_ = convert_data_type(img, torch.Tensor)\n\n # add one to transform axis because a batch axis will be added at dimension 0\n savgol_filter = SavitzkyGolayFilter(self.window_length, self.order, self.axis + 1, self.mode)\n # convert to Tensor and add Batch axis expected by HilbertTransform\n out: torch.Tensor = savgol_filter(self.img_t.unsqueeze(0)).squeeze(0)\n return out\n\n\nclass DetectEnvelope(Transform):\n \"\"\"\n Find the envelope of the input data along the requested axis using a Hilbert transform.\n Requires PyTorch 1.7.0+ and the PyTorch FFT module (which is not included in NVIDIA PyTorch Release 20.10).\n\n Args:\n axis: Axis along which to detect the envelope. Default 1, i.e. the first spatial dimension.\n N: FFT size. Default img.shape[axis]. Input will be zero-padded or truncated to this size along dimension\n ``axis``.\n\n \"\"\"\n\n def __init__(self, axis: int = 1, n: Union[int, None] = None) -> None:\n\n if PT_BEFORE_1_7:\n raise InvalidPyTorchVersionError(\"1.7.0\", self.__class__.__name__)\n\n if axis < 0:\n raise ValueError(\"axis must be zero or positive.\")\n\n self.axis = axis\n self.n = n\n\n def __call__(self, img: np.ndarray):\n \"\"\"\n\n Args:\n img: numpy.ndarray containing input data. Must be real and in shape [channels, spatial1, spatial2, ...].\n\n Returns:\n np.ndarray containing envelope of data in img along the specified axis.\n\n \"\"\"\n # add one to transform axis because a batch axis will be added at dimension 0\n hilbert_transform = HilbertTransform(self.axis + 1, self.n)\n # convert to Tensor and add Batch axis expected by HilbertTransform\n input_data = torch.as_tensor(np.ascontiguousarray(img)).unsqueeze(0)\n return np.abs(hilbert_transform(input_data).squeeze(0).numpy())\n\n\nclass GaussianSmooth(Transform):\n \"\"\"\n Apply Gaussian smooth to the input data based on specified `sigma` parameter.\n A default value `sigma=1.0` is provided for reference.\n\n Args:\n sigma: if a list of values, must match the count of spatial dimensions of input data,\n and apply every value in the list to 1 spatial dimension. if only 1 value provided,\n use it for all spatial dimensions.\n approx: discrete Gaussian kernel type, available options are \"erf\", \"sampled\", and \"scalespace\".\n see also :py:meth:`monai.networks.layers.GaussianFilter`.\n\n \"\"\"\n\n def __init__(self, sigma: Union[Sequence[float], float] = 1.0, approx: str = \"erf\") -> None:\n self.sigma = sigma\n self.approx = approx\n\n def __call__(self, img: np.ndarray):\n gaussian_filter = GaussianFilter(img.ndim - 1, self.sigma, approx=self.approx)\n input_data = torch.as_tensor(np.ascontiguousarray(img), dtype=torch.float).unsqueeze(0)\n return gaussian_filter(input_data).squeeze(0).detach().numpy()\n\n\nclass RandGaussianSmooth(RandomizableTransform):\n \"\"\"\n Apply Gaussian smooth to the input data based on randomly selected `sigma` parameters.\n\n Args:\n sigma_x: randomly select sigma value for the first spatial dimension.\n sigma_y: randomly select sigma value for the second spatial dimension if have.\n sigma_z: randomly select sigma value for the third spatial dimension if have.\n prob: probability of Gaussian smooth.\n approx: discrete Gaussian kernel type, available options are \"erf\", \"sampled\", and \"scalespace\".\n see also :py:meth:`monai.networks.layers.GaussianFilter`.\n\n \"\"\"\n\n def __init__(\n self,\n sigma_x: Tuple[float, float] = (0.25, 1.5),\n sigma_y: Tuple[float, float] = (0.25, 1.5),\n sigma_z: Tuple[float, float] = (0.25, 1.5),\n prob: float = 0.1,\n approx: str = \"erf\",\n ) -> None:\n RandomizableTransform.__init__(self, prob)\n self.sigma_x = sigma_x\n self.sigma_y = sigma_y\n self.sigma_z = sigma_z\n self.approx = approx\n\n self.x = self.sigma_x[0]\n self.y = self.sigma_y[0]\n self.z = self.sigma_z[0]\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n self.x = self.R.uniform(low=self.sigma_x[0], high=self.sigma_x[1])\n self.y = self.R.uniform(low=self.sigma_y[0], high=self.sigma_y[1])\n self.z = self.R.uniform(low=self.sigma_z[0], high=self.sigma_z[1])\n\n def __call__(self, img: np.ndarray):\n self.randomize()\n if not self._do_transform:\n return img\n sigma = ensure_tuple_size(tup=(self.x, self.y, self.z), dim=img.ndim - 1)\n return GaussianSmooth(sigma=sigma, approx=self.approx)(img)\n\n\nclass GaussianSharpen(Transform):\n \"\"\"\n Sharpen images using the Gaussian Blur filter.\n Referring to: http://scipy-lectures.org/advanced/image_processing/auto_examples/plot_sharpen.html.\n The algorithm is shown as below\n\n .. code-block:: python\n\n blurred_f = gaussian_filter(img, sigma1)\n filter_blurred_f = gaussian_filter(blurred_f, sigma2)\n img = blurred_f + alpha * (blurred_f - filter_blurred_f)\n\n A set of default values `sigma1=3.0`, `sigma2=1.0` and `alpha=30.0` is provide for reference.\n\n Args:\n sigma1: sigma parameter for the first gaussian kernel. if a list of values, must match the count\n of spatial dimensions of input data, and apply every value in the list to 1 spatial dimension.\n if only 1 value provided, use it for all spatial dimensions.\n sigma2: sigma parameter for the second gaussian kernel. if a list of values, must match the count\n of spatial dimensions of input data, and apply every value in the list to 1 spatial dimension.\n if only 1 value provided, use it for all spatial dimensions.\n alpha: weight parameter to compute the final result.\n approx: discrete Gaussian kernel type, available options are \"erf\", \"sampled\", and \"scalespace\".\n see also :py:meth:`monai.networks.layers.GaussianFilter`.\n\n \"\"\"\n\n def __init__(\n self,\n sigma1: Union[Sequence[float], float] = 3.0,\n sigma2: Union[Sequence[float], float] = 1.0,\n alpha: float = 30.0,\n approx: str = \"erf\",\n ) -> None:\n self.sigma1 = sigma1\n self.sigma2 = sigma2\n self.alpha = alpha\n self.approx = approx\n\n def __call__(self, img: np.ndarray):\n gaussian_filter1 = GaussianFilter(img.ndim - 1, self.sigma1, approx=self.approx)\n gaussian_filter2 = GaussianFilter(img.ndim - 1, self.sigma2, approx=self.approx)\n input_data = torch.as_tensor(np.ascontiguousarray(img), dtype=torch.float).unsqueeze(0)\n blurred_f = gaussian_filter1(input_data)\n filter_blurred_f = gaussian_filter2(blurred_f)\n return (blurred_f + self.alpha * (blurred_f - filter_blurred_f)).squeeze(0).detach().numpy()\n\n\nclass RandGaussianSharpen(RandomizableTransform):\n \"\"\"\n Sharpen images using the Gaussian Blur filter based on randomly selected `sigma1`, `sigma2` and `alpha`.\n The algorithm is :py:class:`monai.transforms.GaussianSharpen`.\n\n Args:\n sigma1_x: randomly select sigma value for the first spatial dimension of first gaussian kernel.\n sigma1_y: randomly select sigma value for the second spatial dimension(if have) of first gaussian kernel.\n sigma1_z: randomly select sigma value for the third spatial dimension(if have) of first gaussian kernel.\n sigma2_x: randomly select sigma value for the first spatial dimension of second gaussian kernel.\n if only 1 value `X` provided, it must be smaller than `sigma1_x` and randomly select from [X, sigma1_x].\n sigma2_y: randomly select sigma value for the second spatial dimension(if have) of second gaussian kernel.\n if only 1 value `Y` provided, it must be smaller than `sigma1_y` and randomly select from [Y, sigma1_y].\n sigma2_z: randomly select sigma value for the third spatial dimension(if have) of second gaussian kernel.\n if only 1 value `Z` provided, it must be smaller than `sigma1_z` and randomly select from [Z, sigma1_z].\n alpha: randomly select weight parameter to compute the final result.\n approx: discrete Gaussian kernel type, available options are \"erf\", \"sampled\", and \"scalespace\".\n see also :py:meth:`monai.networks.layers.GaussianFilter`.\n prob: probability of Gaussian sharpen.\n\n \"\"\"\n\n def __init__(\n self,\n sigma1_x: Tuple[float, float] = (0.5, 1.0),\n sigma1_y: Tuple[float, float] = (0.5, 1.0),\n sigma1_z: Tuple[float, float] = (0.5, 1.0),\n sigma2_x: Union[Tuple[float, float], float] = 0.5,\n sigma2_y: Union[Tuple[float, float], float] = 0.5,\n sigma2_z: Union[Tuple[float, float], float] = 0.5,\n alpha: Tuple[float, float] = (10.0, 30.0),\n approx: str = \"erf\",\n prob: float = 0.1,\n ) -> None:\n RandomizableTransform.__init__(self, prob)\n self.sigma1_x = sigma1_x\n self.sigma1_y = sigma1_y\n self.sigma1_z = sigma1_z\n self.sigma2_x = sigma2_x\n self.sigma2_y = sigma2_y\n self.sigma2_z = sigma2_z\n self.alpha = alpha\n self.approx = approx\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n self.x1 = self.R.uniform(low=self.sigma1_x[0], high=self.sigma1_x[1])\n self.y1 = self.R.uniform(low=self.sigma1_y[0], high=self.sigma1_y[1])\n self.z1 = self.R.uniform(low=self.sigma1_z[0], high=self.sigma1_z[1])\n sigma2_x = (self.sigma2_x, self.x1) if not isinstance(self.sigma2_x, Iterable) else self.sigma2_x\n sigma2_y = (self.sigma2_y, self.y1) if not isinstance(self.sigma2_y, Iterable) else self.sigma2_y\n sigma2_z = (self.sigma2_z, self.z1) if not isinstance(self.sigma2_z, Iterable) else self.sigma2_z\n self.x2 = self.R.uniform(low=sigma2_x[0], high=sigma2_x[1])\n self.y2 = self.R.uniform(low=sigma2_y[0], high=sigma2_y[1])\n self.z2 = self.R.uniform(low=sigma2_z[0], high=sigma2_z[1])\n self.a = self.R.uniform(low=self.alpha[0], high=self.alpha[1])\n\n def __call__(self, img: np.ndarray):\n self.randomize()\n if not self._do_transform:\n return img\n sigma1 = ensure_tuple_size(tup=(self.x1, self.y1, self.z1), dim=img.ndim - 1)\n sigma2 = ensure_tuple_size(tup=(self.x2, self.y2, self.z2), dim=img.ndim - 1)\n return GaussianSharpen(sigma1=sigma1, sigma2=sigma2, alpha=self.a, approx=self.approx)(img)\n\n\nclass RandHistogramShift(RandomizableTransform):\n \"\"\"\n Apply random nonlinear transform to the image's intensity histogram.\n\n Args:\n num_control_points: number of control points governing the nonlinear intensity mapping.\n a smaller number of control points allows for larger intensity shifts. if two values provided, number of\n control points selecting from range (min_value, max_value).\n prob: probability of histogram shift.\n \"\"\"\n\n def __init__(self, num_control_points: Union[Tuple[int, int], int] = 10, prob: float = 0.1) -> None:\n RandomizableTransform.__init__(self, prob)\n\n if isinstance(num_control_points, int):\n if num_control_points <= 2:\n raise ValueError(\"num_control_points should be greater than or equal to 3\")\n self.num_control_points = (num_control_points, num_control_points)\n else:\n if len(num_control_points) != 2:\n raise ValueError(\"num_control points should be a number or a pair of numbers\")\n if min(num_control_points) <= 2:\n raise ValueError(\"num_control_points should be greater than or equal to 3\")\n self.num_control_points = (min(num_control_points), max(num_control_points))\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n num_control_point = self.R.randint(self.num_control_points[0], self.num_control_points[1] + 1)\n self.reference_control_points = np.linspace(0, 1, num_control_point)\n self.floating_control_points = np.copy(self.reference_control_points)\n for i in range(1, num_control_point - 1):\n self.floating_control_points[i] = self.R.uniform(\n self.floating_control_points[i - 1], self.floating_control_points[i + 1]\n )\n\n def __call__(self, img: np.ndarray) -> np.ndarray:\n self.randomize()\n if not self._do_transform:\n return img\n img_min, img_max = img.min(), img.max()\n reference_control_points_scaled = self.reference_control_points * (img_max - img_min) + img_min\n floating_control_points_scaled = self.floating_control_points * (img_max - img_min) + img_min\n return np.asarray(\n np.interp(img, reference_control_points_scaled, floating_control_points_scaled), dtype=img.dtype\n )\n\n\nclass RandGibbsNoise(RandomizableTransform):\n \"\"\"\n Naturalistic image augmentation via Gibbs artifacts. The transform\n randomly applies Gibbs noise to 2D/3D MRI images. Gibbs artifacts\n are one of the common type of type artifacts appearing in MRI scans.\n\n The transform is applied to all the channels in the data.\n\n For general information on Gibbs artifacts, please refer to:\n https://pubs.rsna.org/doi/full/10.1148/rg.313105115\n https://pubs.rsna.org/doi/full/10.1148/radiographics.22.4.g02jl14949\n\n\n Args:\n prob (float): probability of applying the transform.\n alpha (float, Sequence(float)): Parametrizes the intensity of the Gibbs noise filter applied. Takes\n values in the interval [0,1] with alpha = 0 acting as the identity mapping.\n If a length-2 list is given as [a,b] then the value of alpha will be\n sampled uniformly from the interval [a,b]. 0 <= a <= b <= 1.\n as_tensor_output: if true return torch.Tensor, else return np.array. default: True.\n \"\"\"\n\n def __init__(self, prob: float = 0.1, alpha: Sequence[float] = (0.0, 1.0), as_tensor_output: bool = True) -> None:\n\n if len(alpha) != 2:\n raise ValueError(\"alpha length must be 2.\")\n if alpha[1] > 1 or alpha[0] < 0:\n raise ValueError(\"alpha must take values in the interval [0,1]\")\n if alpha[0] > alpha[1]:\n raise ValueError(\"When alpha = [a,b] we need a < b.\")\n\n self.alpha = alpha\n self.sampled_alpha = -1.0 # stores last alpha sampled by randomize()\n self.as_tensor_output = as_tensor_output\n\n RandomizableTransform.__init__(self, prob=prob)\n\n def __call__(self, img: Union[np.ndarray, torch.Tensor]) -> Union[torch.Tensor, np.ndarray]:\n\n # randomize application and possibly alpha\n self._randomize(None)\n\n if self._do_transform:\n # apply transform\n transform = GibbsNoise(self.sampled_alpha, self.as_tensor_output)\n img = transform(img)\n else:\n if isinstance(img, np.ndarray) and self.as_tensor_output:\n img = torch.Tensor(img)\n elif isinstance(img, torch.Tensor) and not self.as_tensor_output:\n img = img.detach().cpu().numpy()\n return img\n\n def _randomize(self, _: Any) -> None:\n \"\"\"\n (1) Set random variable to apply the transform.\n (2) Get alpha from uniform distribution.\n \"\"\"\n super().randomize(None)\n self.sampled_alpha = self.R.uniform(self.alpha[0], self.alpha[1])\n\n\nclass GibbsNoise(Transform, Fourier):\n \"\"\"\n The transform applies Gibbs noise to 2D/3D MRI images. Gibbs artifacts\n are one of the common type of type artifacts appearing in MRI scans.\n\n The transform is applied to all the channels in the data.\n\n For general information on Gibbs artifacts, please refer to:\n\n `An Image-based Approach to Understanding the Physics of MR Artifacts\n <https://pubs.rsna.org/doi/full/10.1148/rg.313105115>`_.\n\n `The AAPM/RSNA Physics Tutorial for Residents\n <https://pubs.rsna.org/doi/full/10.1148/radiographics.22.4.g02jl14949>`_\n\n Args:\n alpha: Parametrizes the intensity of the Gibbs noise filter applied. Takes\n values in the interval [0,1] with alpha = 0 acting as the identity mapping.\n as_tensor_output: if true return torch.Tensor, else return np.array. Default: True.\n \"\"\"\n\n def __init__(self, alpha: float = 0.5, as_tensor_output: bool = True) -> None:\n\n if alpha > 1 or alpha < 0:\n raise ValueError(\"alpha must take values in the interval [0,1].\")\n self.alpha = alpha\n self.as_tensor_output = as_tensor_output\n\n def __call__(self, img: Union[np.ndarray, torch.Tensor]) -> Union[torch.Tensor, np.ndarray]:\n n_dims = len(img.shape[1:])\n\n if isinstance(img, np.ndarray):\n img = torch.Tensor(img)\n # FT\n k = self.shift_fourier(img, n_dims)\n # build and apply mask\n k = self._apply_mask(k)\n # map back\n img = self.inv_shift_fourier(k, n_dims)\n\n return img if self.as_tensor_output else img.cpu().detach().numpy()\n\n def _apply_mask(self, k: torch.Tensor) -> torch.Tensor:\n \"\"\"Builds and applies a mask on the spatial dimensions.\n\n Args:\n k (np.ndarray): k-space version of the image.\n Returns:\n masked version of the k-space image.\n \"\"\"\n shape = k.shape[1:]\n\n # compute masking radius and center\n r = (1 - self.alpha) * np.max(shape) * np.sqrt(2) / 2.0\n center = (np.array(shape) - 1) / 2\n\n # gives list w/ len==self.dim. Each dim gives coordinate in that dimension\n coords = np.ogrid[tuple(slice(0, i) for i in shape)]\n\n # need to subtract center coord and then square for Euc distance\n coords_from_center_sq = [(coord - c) ** 2 for coord, c in zip(coords, center)]\n dist_from_center = np.sqrt(sum(coords_from_center_sq))\n mask = dist_from_center <= r\n\n # add channel dimension into mask\n mask = np.repeat(mask[None], k.shape[0], axis=0)\n\n # apply binary mask\n k_masked = k * torch.tensor(mask, device=k.device)\n return k_masked\n\n\nclass KSpaceSpikeNoise(Transform, Fourier):\n \"\"\"\n Apply localized spikes in `k`-space at the given locations and intensities.\n Spike (Herringbone) artifact is a type of data acquisition artifact which\n may occur during MRI scans.\n\n For general information on spike artifacts, please refer to:\n\n `AAPM/RSNA physics tutorial for residents: fundamental physics of MR imaging\n <https://pubmed.ncbi.nlm.nih.gov/16009826>`_.\n\n `Body MRI artifacts in clinical practice: A physicist's and radiologist's\n perspective <https://doi.org/10.1002/jmri.24288>`_.\n\n Args:\n loc: spatial location for the spikes. For\n images with 3D spatial dimensions, the user can provide (C, X, Y, Z)\n to fix which channel C is affected, or (X, Y, Z) to place the same\n spike in all channels. For 2D cases, the user can provide (C, X, Y)\n or (X, Y).\n k_intensity: value for the log-intensity of the\n `k`-space version of the image. If one location is passed to ``loc`` or the\n channel is not specified, then this argument should receive a float. If\n ``loc`` is given a sequence of locations, then this argument should\n receive a sequence of intensities. This value should be tested as it is\n data-dependent. The default values are the 2.5 the mean of the\n log-intensity for each channel.\n as_tensor_output: if ``True`` return torch.Tensor, else return np.array.\n Default: ``True``.\n\n Example:\n When working with 4D data, ``KSpaceSpikeNoise(loc = ((3,60,64,32), (64,60,32)), k_intensity = (13,14))``\n will place a spike at `[3, 60, 64, 32]` with `log-intensity = 13`, and\n one spike per channel located respectively at `[: , 64, 60, 32]`\n with `log-intensity = 14`.\n \"\"\"\n\n def __init__(\n self,\n loc: Union[Tuple, Sequence[Tuple]],\n k_intensity: Optional[Union[Sequence[float], float]] = None,\n as_tensor_output: bool = True,\n ):\n\n self.loc = ensure_tuple(loc)\n self.as_tensor_output = as_tensor_output\n self.k_intensity = k_intensity\n\n # assert one-to-one relationship between factors and locations\n if isinstance(k_intensity, Sequence):\n if not isinstance(loc[0], Sequence):\n raise ValueError(\n \"If a sequence is passed to k_intensity, then a sequence of locations must be passed to loc\"\n )\n if len(k_intensity) != len(loc):\n raise ValueError(\"There must be one intensity_factor value for each tuple of indices in loc.\")\n if isinstance(self.loc[0], Sequence) and k_intensity is not None and not isinstance(self.k_intensity, Sequence):\n raise ValueError(\"There must be one intensity_factor value for each tuple of indices in loc.\")\n\n def __call__(self, img: Union[np.ndarray, torch.Tensor]) -> Union[torch.Tensor, np.ndarray]:\n \"\"\"\n Args:\n img: image with dimensions (C, H, W) or (C, H, W, D)\n \"\"\"\n # checking that tuples in loc are consistent with img size\n self._check_indices(img)\n\n if len(img.shape) < 3:\n raise RuntimeError(\"Image needs a channel direction.\")\n if isinstance(self.loc[0], int) and len(img.shape) == 4 and len(self.loc) == 2:\n raise RuntimeError(\"Input images of dimension 4 need location tuple to be length 3 or 4\")\n if isinstance(self.loc[0], Sequence) and len(img.shape) == 4 and min(map(lambda x: len(x), self.loc)) == 2:\n raise RuntimeError(\"Input images of dimension 4 need location tuple to be length 3 or 4\")\n\n n_dims = len(img.shape[1:])\n\n if isinstance(img, np.ndarray):\n img = torch.Tensor(img)\n # FT\n k = self.shift_fourier(img, n_dims)\n log_abs = torch.log(torch.absolute(k) + 1e-10)\n phase = torch.angle(k)\n\n k_intensity = self.k_intensity\n # default log intensity\n if k_intensity is None:\n k_intensity = tuple(torch.mean(log_abs, dim=tuple(range(-n_dims, 0))) * 2.5)\n\n # highlight\n if isinstance(self.loc[0], Sequence):\n for idx, val in zip(self.loc, ensure_tuple(k_intensity)):\n self._set_spike(log_abs, idx, val)\n else:\n self._set_spike(log_abs, self.loc, k_intensity)\n # map back\n k = torch.exp(log_abs) * torch.exp(1j * phase)\n img = self.inv_shift_fourier(k, n_dims)\n\n return img if self.as_tensor_output else img.cpu().detach().numpy()\n\n def _check_indices(self, img) -> None:\n \"\"\"Helper method to check consistency of self.loc and input image.\n\n Raises assertion error if any index in loc is out of bounds.\"\"\"\n\n loc = list(self.loc)\n if not isinstance(loc[0], Sequence):\n loc = [loc]\n for i in range(len(loc)):\n if len(loc[i]) < len(img.shape):\n loc[i] = [0] + list(loc[i])\n\n for i in range(len(img.shape)):\n if img.shape[i] <= max(x[i] for x in loc):\n raise ValueError(\n f\"The index value at position {i} of one of the tuples in loc = {self.loc} is out of bounds for current image.\"\n )\n\n def _set_spike(self, k: torch.Tensor, idx: Tuple, val: Union[Sequence[float], float]):\n \"\"\"\n Helper function to introduce a given intensity at given location.\n\n Args:\n k: intensity array to alter.\n idx: index of location where to apply change.\n val: value of intensity to write in.\n \"\"\"\n if len(k.shape) == len(idx):\n k[idx] = val[idx[0]] if isinstance(val, Sequence) else val\n elif len(k.shape) == 4 and len(idx) == 3:\n k[:, idx[0], idx[1], idx[2]] = val # type: ignore\n elif len(k.shape) == 3 and len(idx) == 2:\n k[:, idx[0], idx[1]] = val # type: ignore\n\n\nclass RandKSpaceSpikeNoise(RandomizableTransform, Fourier):\n \"\"\"\n Naturalistic data augmentation via spike artifacts. The transform applies\n localized spikes in `k`-space, and it is the random version of\n :py:class:`monai.transforms.KSpaceSpikeNoise`.\n\n Spike (Herringbone) artifact is a type of data acquisition artifact which\n may occur during MRI scans. For general information on spike artifacts,\n please refer to:\n\n `AAPM/RSNA physics tutorial for residents: fundamental physics of MR imaging\n <https://pubmed.ncbi.nlm.nih.gov/16009826>`_.\n\n `Body MRI artifacts in clinical practice: A physicist's and radiologist's\n perspective <https://doi.org/10.1002/jmri.24288>`_.\n\n Args:\n prob: probability of applying the transform, either on all\n channels at once, or channel-wise if ``channel_wise = True``.\n intensity_range: pass a tuple\n (a, b) to sample the log-intensity from the interval (a, b)\n uniformly for all channels. Or pass sequence of intevals\n ((a0, b0), (a1, b1), ...) to sample for each respective channel.\n In the second case, the number of 2-tuples must match the number of\n channels.\n Default ranges is `(0.95x, 1.10x)` where `x` is the mean\n log-intensity for each channel.\n channel_wise: treat each channel independently. True by\n default.\n as_tensor_output: if True return torch.Tensor, else\n return np.array. default: True.\n\n Example:\n To apply `k`-space spikes randomly with probability 0.5, and\n log-intensity sampled from the interval [11, 12] for each channel\n independently, one uses\n ``RandKSpaceSpikeNoise(prob=0.5, intensity_range=(11, 12), channel_wise=True)``\n \"\"\"\n\n def __init__(\n self,\n prob: float = 0.1,\n intensity_range: Optional[Sequence[Union[Sequence[float], float]]] = None,\n channel_wise=True,\n as_tensor_output: bool = True,\n ):\n\n self.intensity_range = intensity_range\n self.channel_wise = channel_wise\n self.as_tensor_output = as_tensor_output\n self.sampled_k_intensity: List = []\n self.sampled_locs: List[Tuple] = []\n\n if intensity_range is not None and isinstance(intensity_range[0], Sequence) and not channel_wise:\n raise ValueError(\"When channel_wise = False, intensity_range should be a 2-tuple (low, high) or None.\")\n\n super().__init__(prob)\n\n def __call__(self, img: Union[np.ndarray, torch.Tensor]) -> Union[torch.Tensor, np.ndarray]:\n \"\"\"\n Apply transform to `img`. Assumes data is in channel-first form.\n\n Args:\n img: image with dimensions (C, H, W) or (C, H, W, D)\n \"\"\"\n if (\n self.intensity_range is not None\n and isinstance(self.intensity_range[0], Sequence)\n and len(self.intensity_range) != img.shape[0]\n ):\n raise RuntimeError(\n \"If intensity_range is a sequence of sequences, then there must be one (low, high) tuple for each channel.\"\n )\n\n self.sampled_k_intensity = []\n self.sampled_locs = []\n\n if not isinstance(img, torch.Tensor):\n img = torch.Tensor(img)\n\n intensity_range = self._make_sequence(img)\n self._randomize(img, intensity_range)\n\n # build/appy transform only if there are spike locations\n if self.sampled_locs:\n transform = KSpaceSpikeNoise(self.sampled_locs, self.sampled_k_intensity, self.as_tensor_output)\n return transform(img)\n\n return img if self.as_tensor_output else img.detach().numpy()\n\n def _randomize(self, img: torch.Tensor, intensity_range: Sequence[Sequence[float]]) -> None:\n \"\"\"\n Helper method to sample both the location and intensity of the spikes.\n When not working channel wise (channel_wise=False) it use the random\n variable ``self._do_transform`` to decide whether to sample a location\n and intensity.\n\n When working channel wise, the method randomly samples a location and\n intensity for each channel depending on ``self._do_transform``.\n \"\"\"\n # randomizing per channel\n if self.channel_wise:\n for i, chan in enumerate(img):\n super().randomize(None)\n if self._do_transform:\n self.sampled_locs.append((i,) + tuple(self.R.randint(0, k) for k in chan.shape))\n self.sampled_k_intensity.append(self.R.uniform(intensity_range[i][0], intensity_range[i][1]))\n # working with all channels together\n else:\n super().randomize(None)\n if self._do_transform:\n spatial = tuple(self.R.randint(0, k) for k in img.shape[1:])\n self.sampled_locs = [(i,) + spatial for i in range(img.shape[0])]\n if isinstance(intensity_range[0], Sequence):\n self.sampled_k_intensity = [self.R.uniform(p[0], p[1]) for p in intensity_range]\n else:\n self.sampled_k_intensity = [self.R.uniform(intensity_range[0], intensity_range[1])] * len(img)\n\n def _make_sequence(self, x: torch.Tensor) -> Sequence[Sequence[float]]:\n \"\"\"\n Formats the sequence of intensities ranges to Sequence[Sequence[float]].\n \"\"\"\n if self.intensity_range is None:\n # set default range if one not provided\n return self._set_default_range(x)\n\n if not isinstance(self.intensity_range[0], Sequence):\n return (ensure_tuple(self.intensity_range),) * x.shape[0]\n return ensure_tuple(self.intensity_range)\n\n def _set_default_range(self, img: torch.Tensor) -> Sequence[Sequence[float]]:\n \"\"\"\n Sets default intensity ranges to be sampled.\n\n Args:\n img: image to transform.\n \"\"\"\n n_dims = len(img.shape[1:])\n\n k = self.shift_fourier(img, n_dims)\n log_abs = torch.log(torch.absolute(k) + 1e-10)\n shifted_means = torch.mean(log_abs, dim=tuple(range(-n_dims, 0))) * 2.5\n return tuple((i * 0.95, i * 1.1) for i in shifted_means)\n\n\nclass RandCoarseDropout(RandomizableTransform):\n \"\"\"\n Randomly coarse dropout regions in the image, then fill in the rectangular regions with specified value.\n Refer to: https://arxiv.org/abs/1708.04552 and:\n https://albumentations.ai/docs/api_reference/augmentations/transforms/\n #albumentations.augmentations.transforms.CoarseDropout.\n\n Args:\n holes: number of regions to dropout, if `max_holes` is not None, use this arg as the minimum number to\n randomly select the expected number of regions.\n spatial_size: spatial size of the regions to dropout, if `max_spatial_size` is not None, use this arg\n as the minimum spatial size to randomly select size for every region.\n if some components of the `spatial_size` are non-positive values, the transform will use the\n corresponding components of input img size. For example, `spatial_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n fill_value: target value to fill the dropout regions.\n max_holes: if not None, define the maximum number to randomly select the expected number of regions.\n max_spatial_size: if not None, define the maximum spatial size to randomly select size for every region.\n if some components of the `max_spatial_size` are non-positive values, the transform will use the\n corresponding components of input img size. For example, `max_spatial_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n prob: probability of applying the transform.\n\n \"\"\"\n\n def __init__(\n self,\n holes: int,\n spatial_size: Union[Sequence[int], int],\n fill_value: Union[float, int] = 0,\n max_holes: Optional[int] = None,\n max_spatial_size: Optional[Union[Sequence[int], int]] = None,\n prob: float = 0.1,\n ) -> None:\n RandomizableTransform.__init__(self, prob)\n if holes < 1:\n raise ValueError(\"number of holes must be greater than 0.\")\n self.holes = holes\n self.spatial_size = spatial_size\n self.fill_value = fill_value\n self.max_holes = max_holes\n self.max_spatial_size = max_spatial_size\n self.hole_coords: List = []\n\n def randomize(self, img_size: Sequence[int]) -> None:\n super().randomize(None)\n size = fall_back_tuple(self.spatial_size, img_size)\n self.hole_coords = [] # clear previously computed coords\n num_holes = self.holes if self.max_holes is None else self.R.randint(self.holes, self.max_holes + 1)\n for _ in range(num_holes):\n if self.max_spatial_size is not None:\n max_size = fall_back_tuple(self.max_spatial_size, img_size)\n size = tuple(self.R.randint(low=size[i], high=max_size[i] + 1) for i in range(len(img_size)))\n valid_size = get_valid_patch_size(img_size, size)\n self.hole_coords.append((slice(None),) + get_random_patch(img_size, valid_size, self.R))\n\n def __call__(self, img: np.ndarray):\n self.randomize(img.shape[1:])\n if self._do_transform:\n for h in self.hole_coords:\n img[h] = self.fill_value\n\n return img\n\n\nclass HistogramNormalize(Transform):\n \"\"\"\n Apply the histogram normalization to input image.\n Refer to: https://github.com/facebookresearch/CovidPrognosis/blob/master/covidprognosis/data/transforms.py#L83.\n\n Args:\n num_bins: number of the bins to use in histogram, default to `256`. for more details:\n https://numpy.org/doc/stable/reference/generated/numpy.histogram.html.\n min: the min value to normalize input image, default to `0`.\n max: the max value to normalize input image, default to `255`.\n mask: if provided, must be ndarray of bools or 0s and 1s, and same shape as `image`.\n only points at which `mask==True` are used for the equalization.\n can also provide the mask along with img at runtime.\n dtype: data type of the output, default to `float32`.\n\n \"\"\"\n\n def __init__(\n self,\n num_bins: int = 256,\n min: int = 0,\n max: int = 255,\n mask: Optional[np.ndarray] = None,\n dtype: DtypeLike = np.float32,\n ) -> None:\n self.num_bins = num_bins\n self.min = min\n self.max = max\n self.mask = mask\n self.dtype = dtype\n\n def __call__(self, img: np.ndarray, mask: Optional[np.ndarray] = None) -> np.ndarray:\n return equalize_hist(\n img=img,\n mask=mask if mask is not None else self.mask,\n num_bins=self.num_bins,\n min=self.min,\n max=self.max,\n dtype=self.dtype,\n )\n", |
| "monai/transforms/intensity/dictionary.py": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA collection of dictionary-based wrappers around the \"vanilla\" transforms for intensity adjustment\ndefined in :py:class:`monai.transforms.intensity.array`.\n\nClass names are ended with 'd' to denote dictionary-based transforms.\n\"\"\"\n\nfrom collections.abc import Iterable\nfrom typing import Any, Callable, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch\n\nfrom monai.config import DtypeLike, KeysCollection, NdarrayTensor\nfrom monai.data.utils import get_random_patch, get_valid_patch_size\nfrom monai.transforms.intensity.array import (\n AdjustContrast,\n GaussianSharpen,\n GaussianSmooth,\n GibbsNoise,\n HistogramNormalize,\n KSpaceSpikeNoise,\n MaskIntensity,\n NormalizeIntensity,\n RandBiasField,\n RandKSpaceSpikeNoise,\n RandRicianNoise,\n ScaleIntensity,\n ScaleIntensityRange,\n ScaleIntensityRangePercentiles,\n ShiftIntensity,\n StdShiftIntensity,\n ThresholdIntensity,\n)\nfrom monai.transforms.transform import MapTransform, RandomizableTransform\nfrom monai.transforms.utils import is_positive\nfrom monai.utils import convert_to_dst_type, ensure_tuple, ensure_tuple_rep, ensure_tuple_size, fall_back_tuple\n\n__all__ = [\n \"RandGaussianNoised\",\n \"RandRicianNoised\",\n \"ShiftIntensityd\",\n \"RandShiftIntensityd\",\n \"ScaleIntensityd\",\n \"RandScaleIntensityd\",\n \"StdShiftIntensityd\",\n \"RandStdShiftIntensityd\",\n \"RandBiasFieldd\",\n \"NormalizeIntensityd\",\n \"ThresholdIntensityd\",\n \"ScaleIntensityRanged\",\n \"AdjustContrastd\",\n \"RandAdjustContrastd\",\n \"ScaleIntensityRangePercentilesd\",\n \"MaskIntensityd\",\n \"GaussianSmoothd\",\n \"RandGaussianSmoothd\",\n \"GaussianSharpend\",\n \"RandGaussianSharpend\",\n \"GibbsNoised\",\n \"RandGibbsNoised\",\n \"KSpaceSpikeNoised\",\n \"RandKSpaceSpikeNoised\",\n \"RandHistogramShiftd\",\n \"RandCoarseDropoutd\",\n \"HistogramNormalized\",\n \"RandGaussianNoiseD\",\n \"RandGaussianNoiseDict\",\n \"ShiftIntensityD\",\n \"ShiftIntensityDict\",\n \"RandShiftIntensityD\",\n \"RandShiftIntensityDict\",\n \"ScaleIntensityD\",\n \"ScaleIntensityDict\",\n \"StdShiftIntensityD\",\n \"StdShiftIntensityDict\",\n \"RandScaleIntensityD\",\n \"RandScaleIntensityDict\",\n \"RandStdShiftIntensityD\",\n \"RandStdShiftIntensityDict\",\n \"RandBiasFieldD\",\n \"RandBiasFieldDict\",\n \"NormalizeIntensityD\",\n \"NormalizeIntensityDict\",\n \"ThresholdIntensityD\",\n \"ThresholdIntensityDict\",\n \"ScaleIntensityRangeD\",\n \"ScaleIntensityRangeDict\",\n \"AdjustContrastD\",\n \"AdjustContrastDict\",\n \"RandAdjustContrastD\",\n \"RandAdjustContrastDict\",\n \"ScaleIntensityRangePercentilesD\",\n \"ScaleIntensityRangePercentilesDict\",\n \"MaskIntensityD\",\n \"MaskIntensityDict\",\n \"GaussianSmoothD\",\n \"GaussianSmoothDict\",\n \"RandGaussianSmoothD\",\n \"RandGaussianSmoothDict\",\n \"GaussianSharpenD\",\n \"GaussianSharpenDict\",\n \"RandGaussianSharpenD\",\n \"RandGaussianSharpenDict\",\n \"GibbsNoiseD\",\n \"GibbsNoiseDict\",\n \"RandGibbsNoiseD\",\n \"RandGibbsNoiseDict\",\n \"KSpaceSpikeNoiseD\",\n \"KSpaceSpikeNoiseDict\",\n \"RandHistogramShiftD\",\n \"RandHistogramShiftDict\",\n \"RandRicianNoiseD\",\n \"RandRicianNoiseDict\",\n \"RandCoarseDropoutD\",\n \"RandCoarseDropoutDict\",\n \"HistogramNormalizeD\",\n \"HistogramNormalizeDict\",\n]\n\n\nclass RandGaussianNoised(RandomizableTransform, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandGaussianNoise`.\n Add Gaussian noise to image. This transform assumes all the expected fields have same shape, if want to add\n different noise for every field, please use this transform separately.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n prob: Probability to add Gaussian noise.\n mean: Mean or “centre” of the distribution.\n std: Standard deviation (spread) of distribution.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n\n backend = [\"torch\", \"numpy\"]\n\n def __init__(\n self,\n keys: KeysCollection,\n prob: float = 0.1,\n mean: Union[Sequence[float], float] = 0.0,\n std: float = 0.1,\n allow_missing_keys: bool = False,\n ) -> None:\n MapTransform.__init__(self, keys, allow_missing_keys)\n RandomizableTransform.__init__(self, prob)\n self.mean = ensure_tuple_rep(mean, len(self.keys))\n self.std = std\n self._noise: List[np.ndarray] = []\n\n def randomize(self, im_shape: Sequence[int]) -> None:\n super().randomize(None)\n self._noise.clear()\n for m in self.mean:\n self._noise.append(self.R.normal(m, self.R.uniform(0, self.std), size=im_shape))\n\n def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, NdarrayTensor]:\n d = dict(data)\n\n image_shape = d[self.keys[0]].shape # image shape from the first data key\n self.randomize(image_shape)\n if len(self._noise) != len(self.keys):\n raise RuntimeError(\"inconsistent noise items and keys.\")\n if not self._do_transform:\n return d\n for key, noise in self.key_iterator(d, self._noise):\n noise, *_ = convert_to_dst_type(noise, d[key])\n d[key] = d[key] + noise\n return d\n\n\nclass RandRicianNoised(RandomizableTransform, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandRicianNoise`.\n Add Rician noise to image. This transform assumes all the expected fields have same shape, if want to add\n different noise for every field, please use this transform separately.\n\n Args:\n keys: Keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n global_prob: Probability to add Rician noise to the dictionary.\n prob: Probability to add Rician noise to each item in the dictionary,\n once asserted that noise will be added to the dictionary at all.\n mean: Mean or \"centre\" of the Gaussian distributions sampled to make up\n the Rician noise.\n std: Standard deviation (spread) of the Gaussian distributions sampled\n to make up the Rician noise.\n channel_wise: If True, treats each channel of the image separately.\n relative: If True, the spread of the sampled Gaussian distributions will\n be std times the standard deviation of the image or channel's intensity\n histogram.\n sample_std: If True, sample the spread of the Gaussian distributions\n uniformly from 0 to std.\n allow_missing_keys: Don't raise exception if key is missing.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n global_prob: float = 0.1,\n prob: float = 1.0,\n mean: Union[Sequence[float], float] = 0.0,\n std: Union[Sequence[float], float] = 1.0,\n channel_wise: bool = False,\n relative: bool = False,\n sample_std: bool = True,\n allow_missing_keys: bool = False,\n ) -> None:\n MapTransform.__init__(self, keys, allow_missing_keys)\n RandomizableTransform.__init__(self, global_prob)\n self.rand_rician_noise = RandRicianNoise(prob, mean, std, channel_wise, relative, sample_std)\n\n def __call__(\n self, data: Mapping[Hashable, Union[torch.Tensor, np.ndarray]]\n ) -> Dict[Hashable, Union[torch.Tensor, np.ndarray]]:\n d = dict(data)\n super().randomize(None)\n if not self._do_transform:\n return d\n for key in self.key_iterator(d):\n d[key] = self.rand_rician_noise(d[key])\n return d\n\n\nclass ShiftIntensityd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.ShiftIntensity`.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n offset: float,\n factor_key: Optional[str] = None,\n meta_keys: Optional[KeysCollection] = None,\n meta_key_postfix: str = \"meta_dict\",\n allow_missing_keys: bool = False,\n ) -> None:\n \"\"\"\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n offset: offset value to shift the intensity of image.\n factor_key: if not None, use it as the key to extract a value from the corresponding\n meta data dictionary of `key` at runtime, and multiply the `offset` to shift intensity.\n Usually, `IntensityStatsd` transform can pre-compute statistics of intensity values\n and store in the meta data.\n it also can be a sequence of strings, map to `keys`.\n meta_keys: explicitly indicate the key of the corresponding meta data dictionary.\n used to extract the factor value is `factor_key` is not None.\n for example, for data with key `image`, the metadata by default is in `image_meta_dict`.\n the meta data is a dictionary object which contains: filename, original_shape, etc.\n it can be a sequence of string, map to the `keys`.\n if None, will try to construct meta_keys by `key_{meta_key_postfix}`.\n meta_key_postfix: if meta_keys is None, use `key_{postfix}` to to fetch the meta data according\n to the key data, default is `meta_dict`, the meta data is a dictionary object.\n used to extract the factor value is `factor_key` is not None.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n super().__init__(keys, allow_missing_keys)\n self.factor_key = ensure_tuple_rep(factor_key, len(self.keys))\n self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)\n if len(self.keys) != len(self.meta_keys):\n raise ValueError(\"meta_keys should have the same length as keys.\")\n self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))\n self.shifter = ShiftIntensity(offset)\n\n def __call__(self, data) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key, factor_key, meta_key, meta_key_postfix in self.key_iterator(\n d, self.factor_key, self.meta_keys, self.meta_key_postfix\n ):\n meta_key = meta_key or f\"{key}_{meta_key_postfix}\"\n factor: Optional[float] = d[meta_key].get(factor_key) if meta_key in d else None\n offset = None if factor is None else self.shifter.offset * factor\n d[key] = self.shifter(d[key], offset=offset)\n return d\n\n\nclass RandShiftIntensityd(RandomizableTransform, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandShiftIntensity`.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n offsets: Union[Tuple[float, float], float],\n factor_key: Optional[str] = None,\n meta_keys: Optional[KeysCollection] = None,\n meta_key_postfix: str = \"meta_dict\",\n prob: float = 0.1,\n allow_missing_keys: bool = False,\n ) -> None:\n \"\"\"\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n offsets: offset range to randomly shift.\n if single number, offset value is picked from (-offsets, offsets).\n factor_key: if not None, use it as the key to extract a value from the corresponding\n meta data dictionary of `key` at runtime, and multiply the random `offset` to shift intensity.\n Usually, `IntensityStatsd` transform can pre-compute statistics of intensity values\n and store in the meta data.\n it also can be a sequence of strings, map to `keys`.\n meta_keys: explicitly indicate the key of the corresponding meta data dictionary.\n used to extract the factor value is `factor_key` is not None.\n for example, for data with key `image`, the metadata by default is in `image_meta_dict`.\n the meta data is a dictionary object which contains: filename, original_shape, etc.\n it can be a sequence of string, map to the `keys`.\n if None, will try to construct meta_keys by `key_{meta_key_postfix}`.\n meta_key_postfix: if meta_keys is None, use `key_{postfix}` to to fetch the meta data according\n to the key data, default is `meta_dict`, the meta data is a dictionary object.\n used to extract the factor value is `factor_key` is not None.\n prob: probability of rotating.\n (Default 0.1, with 10% probability it returns a rotated array.)\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n MapTransform.__init__(self, keys, allow_missing_keys)\n RandomizableTransform.__init__(self, prob)\n\n if isinstance(offsets, (int, float)):\n self.offsets = (min(-offsets, offsets), max(-offsets, offsets))\n else:\n if len(offsets) != 2:\n raise ValueError(\"offsets should be a number or pair of numbers.\")\n self.offsets = (min(offsets), max(offsets))\n self._offset = self.offsets[0]\n self.factor_key = ensure_tuple_rep(factor_key, len(self.keys))\n self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)\n if len(self.keys) != len(self.meta_keys):\n raise ValueError(\"meta_keys should have the same length as keys.\")\n self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))\n self.shifter = ShiftIntensity(self._offset)\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1])\n super().randomize(None)\n\n def __call__(self, data) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n self.randomize()\n if not self._do_transform:\n return d\n for key, factor_key, meta_key, meta_key_postfix in self.key_iterator(\n d, self.factor_key, self.meta_keys, self.meta_key_postfix\n ):\n meta_key = meta_key or f\"{key}_{meta_key_postfix}\"\n factor: Optional[float] = d[meta_key].get(factor_key) if meta_key in d else None\n offset = self._offset if factor is None else self._offset * factor\n d[key] = self.shifter(d[key], offset=offset)\n return d\n\n\nclass StdShiftIntensityd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.StdShiftIntensity`.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n factor: float,\n nonzero: bool = False,\n channel_wise: bool = False,\n dtype: DtypeLike = np.float32,\n allow_missing_keys: bool = False,\n ) -> None:\n \"\"\"\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n factor: factor shift by ``v = v + factor * std(v)``.\n nonzero: whether only count non-zero values.\n channel_wise: if True, calculate on each channel separately. Please ensure\n that the first dimension represents the channel of the image if True.\n dtype: output data type, defaults to float32.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n super().__init__(keys, allow_missing_keys)\n self.shifter = StdShiftIntensity(factor, nonzero, channel_wise, dtype)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.key_iterator(d):\n d[key] = self.shifter(d[key])\n return d\n\n\nclass RandStdShiftIntensityd(RandomizableTransform, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandStdShiftIntensity`.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n factors: Union[Tuple[float, float], float],\n prob: float = 0.1,\n nonzero: bool = False,\n channel_wise: bool = False,\n dtype: DtypeLike = np.float32,\n allow_missing_keys: bool = False,\n ) -> None:\n \"\"\"\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n factors: if tuple, the randomly picked range is (min(factors), max(factors)).\n If single number, the range is (-factors, factors).\n prob: probability of std shift.\n nonzero: whether only count non-zero values.\n channel_wise: if True, calculate on each channel separately.\n dtype: output data type, defaults to float32.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n MapTransform.__init__(self, keys, allow_missing_keys)\n RandomizableTransform.__init__(self, prob)\n\n if isinstance(factors, (int, float)):\n self.factors = (min(-factors, factors), max(-factors, factors))\n elif len(factors) != 2:\n raise ValueError(\"factors should be a number or pair of numbers.\")\n else:\n self.factors = (min(factors), max(factors))\n self.factor = self.factors[0]\n self.nonzero = nonzero\n self.channel_wise = channel_wise\n self.dtype = dtype\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1])\n super().randomize(None)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n self.randomize()\n if not self._do_transform:\n return d\n shifter = StdShiftIntensity(self.factor, self.nonzero, self.channel_wise, self.dtype)\n for key in self.key_iterator(d):\n d[key] = shifter(d[key])\n return d\n\n\nclass ScaleIntensityd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.ScaleIntensity`.\n Scale the intensity of input image to the given value range (minv, maxv).\n If `minv` and `maxv` not provided, use `factor` to scale image by ``v = v * (1 + factor)``.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n minv: Optional[float] = 0.0,\n maxv: Optional[float] = 1.0,\n factor: Optional[float] = None,\n allow_missing_keys: bool = False,\n ) -> None:\n \"\"\"\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n minv: minimum value of output data.\n maxv: maximum value of output data.\n factor: factor scale by ``v = v * (1 + factor)``. In order to use\n this parameter, please set `minv` and `maxv` into None.\n allow_missing_keys: don't raise exception if key is missing.\n\n \"\"\"\n super().__init__(keys, allow_missing_keys)\n self.scaler = ScaleIntensity(minv, maxv, factor)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.key_iterator(d):\n d[key] = self.scaler(d[key])\n return d\n\n\nclass RandScaleIntensityd(RandomizableTransform, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandScaleIntensity`.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n factors: Union[Tuple[float, float], float],\n prob: float = 0.1,\n allow_missing_keys: bool = False,\n ) -> None:\n \"\"\"\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n factors: factor range to randomly scale by ``v = v * (1 + factor)``.\n if single number, factor value is picked from (-factors, factors).\n prob: probability of rotating.\n (Default 0.1, with 10% probability it returns a rotated array.)\n allow_missing_keys: don't raise exception if key is missing.\n\n \"\"\"\n MapTransform.__init__(self, keys, allow_missing_keys)\n RandomizableTransform.__init__(self, prob)\n\n if isinstance(factors, (int, float)):\n self.factors = (min(-factors, factors), max(-factors, factors))\n elif len(factors) != 2:\n raise ValueError(\"factors should be a number or pair of numbers.\")\n else:\n self.factors = (min(factors), max(factors))\n self.factor = self.factors[0]\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1])\n super().randomize(None)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n self.randomize()\n if not self._do_transform:\n return d\n scaler = ScaleIntensity(minv=None, maxv=None, factor=self.factor)\n for key in self.key_iterator(d):\n d[key] = scaler(d[key])\n return d\n\n\nclass RandBiasFieldd(RandomizableTransform, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandBiasField`.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n degree: int = 3,\n coeff_range: Tuple[float, float] = (0.0, 0.1),\n dtype: DtypeLike = np.float32,\n prob: float = 1.0,\n allow_missing_keys: bool = False,\n ) -> None:\n \"\"\"\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n degree: degree of freedom of the polynomials. The value should be no less than 1.\n Defaults to 3.\n coeff_range: range of the random coefficients. Defaults to (0.0, 0.1).\n dtype: output data type, defaults to float32.\n prob: probability to do random bias field.\n allow_missing_keys: don't raise exception if key is missing.\n\n \"\"\"\n MapTransform.__init__(self, keys, allow_missing_keys)\n RandomizableTransform.__init__(self, prob)\n\n self.rand_bias_field = RandBiasField(degree, coeff_range, dtype, prob)\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n self.randomize()\n if not self._do_transform:\n return d\n for key in self.key_iterator(d):\n d[key] = self.rand_bias_field(d[key])\n return d\n\n\nclass NormalizeIntensityd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.NormalizeIntensity`.\n This transform can normalize only non-zero values or entire image, and can also calculate\n mean and std on each channel separately.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n subtrahend: the amount to subtract by (usually the mean)\n divisor: the amount to divide by (usually the standard deviation)\n nonzero: whether only normalize non-zero values.\n channel_wise: if using calculated mean and std, calculate on each channel separately\n or calculate on the entire image directly.\n dtype: output data type, defaults to float32.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n subtrahend: Optional[np.ndarray] = None,\n divisor: Optional[np.ndarray] = None,\n nonzero: bool = False,\n channel_wise: bool = False,\n dtype: DtypeLike = np.float32,\n allow_missing_keys: bool = False,\n ) -> None:\n super().__init__(keys, allow_missing_keys)\n self.normalizer = NormalizeIntensity(subtrahend, divisor, nonzero, channel_wise, dtype)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.key_iterator(d):\n d[key] = self.normalizer(d[key])\n return d\n\n\nclass ThresholdIntensityd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.ThresholdIntensity`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n threshold: the threshold to filter intensity values.\n above: filter values above the threshold or below the threshold, default is True.\n cval: value to fill the remaining parts of the image, default is 0.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n threshold: float,\n above: bool = True,\n cval: float = 0.0,\n allow_missing_keys: bool = False,\n ) -> None:\n super().__init__(keys, allow_missing_keys)\n self.filter = ThresholdIntensity(threshold, above, cval)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.key_iterator(d):\n d[key] = self.filter(d[key])\n return d\n\n\nclass ScaleIntensityRanged(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.ScaleIntensityRange`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n a_min: intensity original range min.\n a_max: intensity original range max.\n b_min: intensity target range min.\n b_max: intensity target range max.\n clip: whether to perform clip after scaling.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n a_min: float,\n a_max: float,\n b_min: float,\n b_max: float,\n clip: bool = False,\n allow_missing_keys: bool = False,\n ) -> None:\n super().__init__(keys, allow_missing_keys)\n self.scaler = ScaleIntensityRange(a_min, a_max, b_min, b_max, clip)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.key_iterator(d):\n d[key] = self.scaler(d[key])\n return d\n\n\nclass AdjustContrastd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.AdjustContrast`.\n Changes image intensity by gamma. Each pixel/voxel intensity is updated as:\n\n `x = ((x - min) / intensity_range) ^ gamma * intensity_range + min`\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n gamma: gamma value to adjust the contrast as function.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n\n def __init__(self, keys: KeysCollection, gamma: float, allow_missing_keys: bool = False) -> None:\n super().__init__(keys, allow_missing_keys)\n self.adjuster = AdjustContrast(gamma)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.key_iterator(d):\n d[key] = self.adjuster(d[key])\n return d\n\n\nclass RandAdjustContrastd(RandomizableTransform, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandAdjustContrast`.\n Randomly changes image intensity by gamma. Each pixel/voxel intensity is updated as:\n\n `x = ((x - min) / intensity_range) ^ gamma * intensity_range + min`\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n prob: Probability of adjustment.\n gamma: Range of gamma values.\n If single number, value is picked from (0.5, gamma), default is (0.5, 4.5).\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n prob: float = 0.1,\n gamma: Union[Tuple[float, float], float] = (0.5, 4.5),\n allow_missing_keys: bool = False,\n ) -> None:\n MapTransform.__init__(self, keys, allow_missing_keys)\n RandomizableTransform.__init__(self, prob)\n\n if isinstance(gamma, (int, float)):\n if gamma <= 0.5:\n raise ValueError(\n \"if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)\"\n )\n self.gamma = (0.5, gamma)\n elif len(gamma) != 2:\n raise ValueError(\"gamma should be a number or pair of numbers.\")\n else:\n self.gamma = (min(gamma), max(gamma))\n\n self.gamma_value: Optional[float] = None\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1])\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n self.randomize()\n if self.gamma_value is None:\n raise RuntimeError(\"gamma_value is not set.\")\n if not self._do_transform:\n return d\n adjuster = AdjustContrast(self.gamma_value)\n for key in self.key_iterator(d):\n d[key] = adjuster(d[key])\n return d\n\n\nclass ScaleIntensityRangePercentilesd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.ScaleIntensityRangePercentiles`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n lower: lower percentile.\n upper: upper percentile.\n b_min: intensity target range min.\n b_max: intensity target range max.\n clip: whether to perform clip after scaling.\n relative: whether to scale to the corresponding percentiles of [b_min, b_max]\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n lower: float,\n upper: float,\n b_min: float,\n b_max: float,\n clip: bool = False,\n relative: bool = False,\n allow_missing_keys: bool = False,\n ) -> None:\n super().__init__(keys, allow_missing_keys)\n self.scaler = ScaleIntensityRangePercentiles(lower, upper, b_min, b_max, clip, relative)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.key_iterator(d):\n d[key] = self.scaler(d[key])\n return d\n\n\nclass MaskIntensityd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.MaskIntensity`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n mask_data: if mask data is single channel, apply to every channel\n of input image. if multiple channels, the channel number must\n match input data. the intensity values of input image corresponding\n to the selected values in the mask data will keep the original value,\n others will be set to `0`. if None, will extract the mask data from\n input data based on `mask_key`.\n mask_key: the key to extract mask data from input dictionary, only works\n when `mask_data` is None.\n select_fn: function to select valid values of the `mask_data`, default is\n to select `values > 0`.\n allow_missing_keys: don't raise exception if key is missing.\n\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n mask_data: Optional[np.ndarray] = None,\n mask_key: Optional[str] = None,\n select_fn: Callable = is_positive,\n allow_missing_keys: bool = False,\n ) -> None:\n super().__init__(keys, allow_missing_keys)\n self.converter = MaskIntensity(mask_data=mask_data, select_fn=select_fn)\n self.mask_key = mask_key if mask_data is None else None\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.key_iterator(d):\n d[key] = self.converter(d[key], d[self.mask_key]) if self.mask_key is not None else self.converter(d[key])\n return d\n\n\nclass GaussianSmoothd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.GaussianSmooth`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n sigma: if a list of values, must match the count of spatial dimensions of input data,\n and apply every value in the list to 1 spatial dimension. if only 1 value provided,\n use it for all spatial dimensions.\n approx: discrete Gaussian kernel type, available options are \"erf\", \"sampled\", and \"scalespace\".\n see also :py:meth:`monai.networks.layers.GaussianFilter`.\n allow_missing_keys: don't raise exception if key is missing.\n\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n sigma: Union[Sequence[float], float],\n approx: str = \"erf\",\n allow_missing_keys: bool = False,\n ) -> None:\n super().__init__(keys, allow_missing_keys)\n self.converter = GaussianSmooth(sigma, approx=approx)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.key_iterator(d):\n d[key] = self.converter(d[key])\n return d\n\n\nclass RandGaussianSmoothd(RandomizableTransform, MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.GaussianSmooth`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n sigma_x: randomly select sigma value for the first spatial dimension.\n sigma_y: randomly select sigma value for the second spatial dimension if have.\n sigma_z: randomly select sigma value for the third spatial dimension if have.\n approx: discrete Gaussian kernel type, available options are \"erf\", \"sampled\", and \"scalespace\".\n see also :py:meth:`monai.networks.layers.GaussianFilter`.\n prob: probability of Gaussian smooth.\n allow_missing_keys: don't raise exception if key is missing.\n\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n sigma_x: Tuple[float, float] = (0.25, 1.5),\n sigma_y: Tuple[float, float] = (0.25, 1.5),\n sigma_z: Tuple[float, float] = (0.25, 1.5),\n approx: str = \"erf\",\n prob: float = 0.1,\n allow_missing_keys: bool = False,\n ) -> None:\n MapTransform.__init__(self, keys, allow_missing_keys)\n RandomizableTransform.__init__(self, prob)\n self.sigma_x, self.sigma_y, self.sigma_z = sigma_x, sigma_y, sigma_z\n self.approx = approx\n\n self.x, self.y, self.z = self.sigma_x[0], self.sigma_y[0], self.sigma_z[0]\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n self.x = self.R.uniform(low=self.sigma_x[0], high=self.sigma_x[1])\n self.y = self.R.uniform(low=self.sigma_y[0], high=self.sigma_y[1])\n self.z = self.R.uniform(low=self.sigma_z[0], high=self.sigma_z[1])\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n self.randomize()\n if not self._do_transform:\n return d\n for key in self.key_iterator(d):\n sigma = ensure_tuple_size(tup=(self.x, self.y, self.z), dim=d[key].ndim - 1)\n d[key] = GaussianSmooth(sigma=sigma, approx=self.approx)(d[key])\n return d\n\n\nclass GaussianSharpend(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.GaussianSharpen`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n sigma1: sigma parameter for the first gaussian kernel. if a list of values, must match the count\n of spatial dimensions of input data, and apply every value in the list to 1 spatial dimension.\n if only 1 value provided, use it for all spatial dimensions.\n sigma2: sigma parameter for the second gaussian kernel. if a list of values, must match the count\n of spatial dimensions of input data, and apply every value in the list to 1 spatial dimension.\n if only 1 value provided, use it for all spatial dimensions.\n alpha: weight parameter to compute the final result.\n approx: discrete Gaussian kernel type, available options are \"erf\", \"sampled\", and \"scalespace\".\n see also :py:meth:`monai.networks.layers.GaussianFilter`.\n allow_missing_keys: don't raise exception if key is missing.\n\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n sigma1: Union[Sequence[float], float] = 3.0,\n sigma2: Union[Sequence[float], float] = 1.0,\n alpha: float = 30.0,\n approx: str = \"erf\",\n allow_missing_keys: bool = False,\n ) -> None:\n super().__init__(keys, allow_missing_keys)\n self.converter = GaussianSharpen(sigma1, sigma2, alpha, approx=approx)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.key_iterator(d):\n d[key] = self.converter(d[key])\n return d\n\n\nclass RandGaussianSharpend(RandomizableTransform, MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.GaussianSharpen`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n sigma1_x: randomly select sigma value for the first spatial dimension of first gaussian kernel.\n sigma1_y: randomly select sigma value for the second spatial dimension(if have) of first gaussian kernel.\n sigma1_z: randomly select sigma value for the third spatial dimension(if have) of first gaussian kernel.\n sigma2_x: randomly select sigma value for the first spatial dimension of second gaussian kernel.\n if only 1 value `X` provided, it must be smaller than `sigma1_x` and randomly select from [X, sigma1_x].\n sigma2_y: randomly select sigma value for the second spatial dimension(if have) of second gaussian kernel.\n if only 1 value `Y` provided, it must be smaller than `sigma1_y` and randomly select from [Y, sigma1_y].\n sigma2_z: randomly select sigma value for the third spatial dimension(if have) of second gaussian kernel.\n if only 1 value `Z` provided, it must be smaller than `sigma1_z` and randomly select from [Z, sigma1_z].\n alpha: randomly select weight parameter to compute the final result.\n approx: discrete Gaussian kernel type, available options are \"erf\", \"sampled\", and \"scalespace\".\n see also :py:meth:`monai.networks.layers.GaussianFilter`.\n prob: probability of Gaussian sharpen.\n allow_missing_keys: don't raise exception if key is missing.\n\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n sigma1_x: Tuple[float, float] = (0.5, 1.0),\n sigma1_y: Tuple[float, float] = (0.5, 1.0),\n sigma1_z: Tuple[float, float] = (0.5, 1.0),\n sigma2_x: Union[Tuple[float, float], float] = 0.5,\n sigma2_y: Union[Tuple[float, float], float] = 0.5,\n sigma2_z: Union[Tuple[float, float], float] = 0.5,\n alpha: Tuple[float, float] = (10.0, 30.0),\n approx: str = \"erf\",\n prob: float = 0.1,\n allow_missing_keys: bool = False,\n ):\n MapTransform.__init__(self, keys, allow_missing_keys)\n RandomizableTransform.__init__(self, prob)\n self.sigma1_x = sigma1_x\n self.sigma1_y = sigma1_y\n self.sigma1_z = sigma1_z\n self.sigma2_x = sigma2_x\n self.sigma2_y = sigma2_y\n self.sigma2_z = sigma2_z\n self.alpha = alpha\n self.approx = approx\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n self.x1 = self.R.uniform(low=self.sigma1_x[0], high=self.sigma1_x[1])\n self.y1 = self.R.uniform(low=self.sigma1_y[0], high=self.sigma1_y[1])\n self.z1 = self.R.uniform(low=self.sigma1_z[0], high=self.sigma1_z[1])\n sigma2_x = (self.sigma2_x, self.x1) if not isinstance(self.sigma2_x, Iterable) else self.sigma2_x\n sigma2_y = (self.sigma2_y, self.y1) if not isinstance(self.sigma2_y, Iterable) else self.sigma2_y\n sigma2_z = (self.sigma2_z, self.z1) if not isinstance(self.sigma2_z, Iterable) else self.sigma2_z\n self.x2 = self.R.uniform(low=sigma2_x[0], high=sigma2_x[1])\n self.y2 = self.R.uniform(low=sigma2_y[0], high=sigma2_y[1])\n self.z2 = self.R.uniform(low=sigma2_z[0], high=sigma2_z[1])\n self.a = self.R.uniform(low=self.alpha[0], high=self.alpha[1])\n\n def __call__(self, data):\n d = dict(data)\n self.randomize()\n if not self._do_transform:\n return d\n for key in self.key_iterator(d):\n sigma1 = ensure_tuple_size(tup=(self.x1, self.y1, self.z1), dim=d[key].ndim - 1)\n sigma2 = ensure_tuple_size(tup=(self.x2, self.y2, self.z2), dim=d[key].ndim - 1)\n d[key] = GaussianSharpen(sigma1=sigma1, sigma2=sigma2, alpha=self.a, approx=self.approx)(d[key])\n return d\n\n\nclass RandHistogramShiftd(RandomizableTransform, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandHistogramShift`.\n Apply random nonlinear transform the the image's intensity histogram.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n num_control_points: number of control points governing the nonlinear intensity mapping.\n a smaller number of control points allows for larger intensity shifts. if two values provided, number of\n control points selecting from range (min_value, max_value).\n prob: probability of histogram shift.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n num_control_points: Union[Tuple[int, int], int] = 10,\n prob: float = 0.1,\n allow_missing_keys: bool = False,\n ) -> None:\n MapTransform.__init__(self, keys, allow_missing_keys)\n RandomizableTransform.__init__(self, prob)\n if isinstance(num_control_points, int):\n if num_control_points <= 2:\n raise ValueError(\"num_control_points should be greater than or equal to 3\")\n self.num_control_points = (num_control_points, num_control_points)\n else:\n if len(num_control_points) != 2:\n raise ValueError(\"num_control points should be a number or a pair of numbers\")\n if min(num_control_points) <= 2:\n raise ValueError(\"num_control_points should be greater than or equal to 3\")\n self.num_control_points = (min(num_control_points), max(num_control_points))\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n num_control_point = self.R.randint(self.num_control_points[0], self.num_control_points[1] + 1)\n self.reference_control_points = np.linspace(0, 1, num_control_point)\n self.floating_control_points = np.copy(self.reference_control_points)\n for i in range(1, num_control_point - 1):\n self.floating_control_points[i] = self.R.uniform(\n self.floating_control_points[i - 1], self.floating_control_points[i + 1]\n )\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n self.randomize()\n if not self._do_transform:\n return d\n for key in self.key_iterator(d):\n img_min, img_max = d[key].min(), d[key].max()\n reference_control_points_scaled = self.reference_control_points * (img_max - img_min) + img_min\n floating_control_points_scaled = self.floating_control_points * (img_max - img_min) + img_min\n dtype = d[key].dtype\n d[key] = np.interp(d[key], reference_control_points_scaled, floating_control_points_scaled).astype(dtype)\n return d\n\n\nclass RandGibbsNoised(RandomizableTransform, MapTransform):\n \"\"\"\n Dictionary-based version of RandGibbsNoise.\n\n Naturalistic image augmentation via Gibbs artifacts. The transform\n randomly applies Gibbs noise to 2D/3D MRI images. Gibbs artifacts\n are one of the common type of type artifacts appearing in MRI scans.\n\n The transform is applied to all the channels in the data.\n\n For general information on Gibbs artifacts, please refer to:\n https://pubs.rsna.org/doi/full/10.1148/rg.313105115\n https://pubs.rsna.org/doi/full/10.1148/radiographics.22.4.g02jl14949\n\n Args:\n keys: 'image', 'label', or ['image', 'label'] depending on which data\n you need to transform.\n prob (float): probability of applying the transform.\n alpha (float, List[float]): Parametrizes the intensity of the Gibbs noise filter applied. Takes\n values in the interval [0,1] with alpha = 0 acting as the identity mapping.\n If a length-2 list is given as [a,b] then the value of alpha will be sampled\n uniformly from the interval [a,b].\n as_tensor_output: if true return torch.Tensor, else return np.array. default: True.\n allow_missing_keys: do not raise exception if key is missing.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n prob: float = 0.1,\n alpha: Sequence[float] = (0.0, 1.0),\n as_tensor_output: bool = True,\n allow_missing_keys: bool = False,\n ) -> None:\n\n MapTransform.__init__(self, keys, allow_missing_keys)\n RandomizableTransform.__init__(self, prob=prob)\n self.alpha = alpha\n self.sampled_alpha = -1.0 # stores last alpha sampled by randomize()\n self.as_tensor_output = as_tensor_output\n\n def __call__(\n self, data: Mapping[Hashable, Union[torch.Tensor, np.ndarray]]\n ) -> Dict[Hashable, Union[torch.Tensor, np.ndarray]]:\n\n d = dict(data)\n self._randomize(None)\n\n for i, key in enumerate(self.key_iterator(d)):\n if self._do_transform:\n if i == 0:\n transform = GibbsNoise(self.sampled_alpha, self.as_tensor_output)\n d[key] = transform(d[key])\n else:\n if isinstance(d[key], np.ndarray) and self.as_tensor_output:\n d[key] = torch.Tensor(d[key])\n elif isinstance(d[key], torch.Tensor) and not self.as_tensor_output:\n d[key] = self._to_numpy(d[key])\n return d\n\n def _randomize(self, _: Any) -> None:\n \"\"\"\n (1) Set random variable to apply the transform.\n (2) Get alpha from uniform distribution.\n \"\"\"\n super().randomize(None)\n self.sampled_alpha = self.R.uniform(self.alpha[0], self.alpha[1])\n\n def _to_numpy(self, d: Union[torch.Tensor, np.ndarray]) -> np.ndarray:\n if isinstance(d, torch.Tensor):\n d_numpy: np.ndarray = d.cpu().detach().numpy()\n return d_numpy\n\n\nclass GibbsNoised(MapTransform):\n \"\"\"\n Dictionary-based version of GibbsNoise.\n\n The transform applies Gibbs noise to 2D/3D MRI images. Gibbs artifacts\n are one of the common type of type artifacts appearing in MRI scans.\n\n For general information on Gibbs artifacts, please refer to:\n https://pubs.rsna.org/doi/full/10.1148/rg.313105115\n https://pubs.rsna.org/doi/full/10.1148/radiographics.22.4.g02jl14949\n\n Args:\n keys: 'image', 'label', or ['image', 'label'] depending on which data\n you need to transform.\n alpha (float): Parametrizes the intensity of the Gibbs noise filter applied. Takes\n values in the interval [0,1] with alpha = 0 acting as the identity mapping.\n as_tensor_output: if true return torch.Tensor, else return np.array. default: True.\n allow_missing_keys: do not raise exception if key is missing.\n \"\"\"\n\n def __init__(\n self, keys: KeysCollection, alpha: float = 0.5, as_tensor_output: bool = True, allow_missing_keys: bool = False\n ) -> None:\n\n MapTransform.__init__(self, keys, allow_missing_keys)\n self.transform = GibbsNoise(alpha, as_tensor_output)\n\n def __call__(\n self, data: Mapping[Hashable, Union[torch.Tensor, np.ndarray]]\n ) -> Dict[Hashable, Union[torch.Tensor, np.ndarray]]:\n\n d = dict(data)\n for key in self.key_iterator(d):\n d[key] = self.transform(d[key])\n return d\n\n\nclass KSpaceSpikeNoised(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.KSpaceSpikeNoise`.\n\n Applies localized spikes in `k`-space at the given locations and intensities.\n Spike (Herringbone) artifact is a type of data acquisition artifact which\n may occur during MRI scans.\n\n For general information on spike artifacts, please refer to:\n\n `AAPM/RSNA physics tutorial for residents: fundamental physics of MR imaging\n <https://pubmed.ncbi.nlm.nih.gov/16009826>`_.\n\n `Body MRI artifacts in clinical practice: A physicist's and radiologist's\n perspective <https://doi.org/10.1002/jmri.24288>`_.\n\n Args:\n keys: \"image\", \"label\", or [\"image\", \"label\"] depending\n on which data you need to transform.\n loc: spatial location for the spikes. For\n images with 3D spatial dimensions, the user can provide (C, X, Y, Z)\n to fix which channel C is affected, or (X, Y, Z) to place the same\n spike in all channels. For 2D cases, the user can provide (C, X, Y)\n or (X, Y).\n k_intensity: value for the log-intensity of the\n `k`-space version of the image. If one location is passed to ``loc`` or the\n channel is not specified, then this argument should receive a float. If\n ``loc`` is given a sequence of locations, then this argument should\n receive a sequence of intensities. This value should be tested as it is\n data-dependent. The default values are the 2.5 the mean of the\n log-intensity for each channel.\n as_tensor_output: if ``True`` return torch.Tensor, else return np.array.\n Default: ``True``.\n allow_missing_keys: do not raise exception if key is missing.\n\n Example:\n When working with 4D data,\n ``KSpaceSpikeNoised(\"image\", loc = ((3,60,64,32), (64,60,32)), k_intensity = (13,14))``\n will place a spike at `[3, 60, 64, 32]` with `log-intensity = 13`, and\n one spike per channel located respectively at `[: , 64, 60, 32]`\n with `log-intensity = 14`.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n loc: Union[Tuple, Sequence[Tuple]],\n k_intensity: Optional[Union[Sequence[float], float]] = None,\n as_tensor_output: bool = True,\n allow_missing_keys: bool = False,\n ) -> None:\n\n super().__init__(keys, allow_missing_keys)\n self.transform = KSpaceSpikeNoise(loc, k_intensity, as_tensor_output)\n\n def __call__(\n self, data: Mapping[Hashable, Union[torch.Tensor, np.ndarray]]\n ) -> Dict[Hashable, Union[torch.Tensor, np.ndarray]]:\n \"\"\"\n Args:\n data: Expects image/label to have dimensions (C, H, W) or\n (C, H, W, D), where C is the channel.\n \"\"\"\n d = dict(data)\n for key in self.key_iterator(d):\n d[key] = self.transform(d[key])\n return d\n\n\nclass RandKSpaceSpikeNoised(RandomizableTransform, MapTransform):\n \"\"\"\n Dictionary-based version of :py:class:`monai.transforms.RandKSpaceSpikeNoise`.\n\n Naturalistic data augmentation via spike artifacts. The transform applies\n localized spikes in `k`-space.\n\n For general information on spike artifacts, please refer to:\n\n `AAPM/RSNA physics tutorial for residents: fundamental physics of MR imaging\n <https://pubmed.ncbi.nlm.nih.gov/16009826>`_.\n\n `Body MRI artifacts in clinical practice: A physicist's and radiologist's\n perspective <https://doi.org/10.1002/jmri.24288>`_.\n\n Args:\n keys: \"image\", \"label\", or [\"image\", \"label\"] depending\n on which data you need to transform.\n global_prob: probability of applying transform to the dictionary.\n prob: probability to add spike artifact to each item in the\n dictionary provided it is realized that the noise will be applied\n to the dictionary.\n intensity_ranges: Dictionary with intensity\n ranges to sample for each key. Given a dictionary value of `(a, b)` the\n transform will sample the log-intensity from the interval `(a, b)` uniformly for all\n channels of the respective key. If a sequence of intevals `((a0, b0), (a1, b1), ...)`\n is given, then the transform will sample from each interval for each\n respective channel. In the second case, the number of 2-tuples must\n match the number of channels. Default ranges is `(0.95x, 1.10x)`\n where `x` is the mean log-intensity for each channel.\n channel_wise: treat each channel independently. True by\n default.\n common_sampling: If ``True`` same values for location and log-intensity\n will be sampled for the image and label.\n common_seed: Seed to be used in case ``common_sampling = True``.\n as_tensor_output: if ``True`` return torch.Tensor, else return\n np.array. Default: ``True``.\n allow_missing_keys: do not raise exception if key is missing.\n\n Example:\n To apply `k`-space spikes randomly on the image only, with probability\n 0.5, and log-intensity sampled from the interval [13, 15] for each\n channel independently, one uses\n ``RandKSpaceSpikeNoised(\"image\", prob=0.5, intensity_ranges={\"image\":(13,15)}, channel_wise=True)``.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n global_prob: float = 1.0,\n prob: float = 0.1,\n intensity_ranges: Optional[Mapping[Hashable, Sequence[Union[Sequence[float], float]]]] = None,\n channel_wise: bool = True,\n common_sampling: bool = False,\n common_seed: int = 42,\n as_tensor_output: bool = True,\n allow_missing_keys: bool = False,\n ):\n\n MapTransform.__init__(self, keys, allow_missing_keys)\n RandomizableTransform.__init__(self, global_prob)\n\n self.common_sampling = common_sampling\n self.common_seed = common_seed\n self.as_tensor_output = as_tensor_output\n # the spikes artifact is amplitude dependent so we instantiate one per key\n self.transforms = {}\n if isinstance(intensity_ranges, Mapping):\n for k in self.keys:\n self.transforms[k] = RandKSpaceSpikeNoise(\n prob, intensity_ranges[k], channel_wise, self.as_tensor_output\n )\n else:\n for k in self.keys:\n self.transforms[k] = RandKSpaceSpikeNoise(prob, None, channel_wise, self.as_tensor_output)\n\n def __call__(\n self, data: Mapping[Hashable, Union[torch.Tensor, np.ndarray]]\n ) -> Dict[Hashable, Union[torch.Tensor, np.ndarray]]:\n \"\"\"\n Args:\n data: Expects image/label to have dimensions (C, H, W) or\n (C, H, W, D), where C is the channel.\n \"\"\"\n d = dict(data)\n super().randomize(None)\n\n # In case the same spikes are desired for both image and label.\n if self.common_sampling:\n for k in self.keys:\n self.transforms[k].set_random_state(self.common_seed)\n\n for key, t in self.key_iterator(d, self.transforms):\n if self._do_transform:\n d[key] = self.transforms[t](d[key])\n else:\n if isinstance(d[key], np.ndarray) and self.as_tensor_output:\n d[key] = torch.Tensor(d[key])\n elif isinstance(d[key], torch.Tensor) and not self.as_tensor_output:\n d[key] = self._to_numpy(d[key])\n return d\n\n def set_rand_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None) -> None:\n \"\"\"\n Set the random state locally to control the randomness.\n User should use this method instead of ``set_random_state``.\n\n Args:\n seed: set the random state with an integer seed.\n state: set the random state with a `np.random.RandomState` object.\"\"\"\n\n self.set_random_state(seed, state)\n for key in self.keys:\n self.transforms[key].set_random_state(seed, state)\n\n def _to_numpy(self, d: Union[torch.Tensor, np.ndarray]) -> np.ndarray:\n if isinstance(d, torch.Tensor):\n d_numpy: np.ndarray = d.cpu().detach().numpy()\n return d_numpy\n\n\nclass RandCoarseDropoutd(RandomizableTransform, MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.RandCoarseDropout`.\n Expect all the data specified by `keys` have same spatial shape and will randomly dropout the same regions\n for every key, if want to dropout differently for every key, please use this transform separately.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n holes: number of regions to dropout, if `max_holes` is not None, use this arg as the minimum number to\n randomly select the expected number of regions.\n spatial_size: spatial size of the regions to dropout, if `max_spatial_size` is not None, use this arg\n as the minimum spatial size to randomly select size for every region.\n if some components of the `spatial_size` are non-positive values, the transform will use the\n corresponding components of input img size. For example, `spatial_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n fill_value: target value to fill the dropout regions.\n max_holes: if not None, define the maximum number to randomly select the expected number of regions.\n max_spatial_size: if not None, define the maximum spatial size to randomly select size for every region.\n if some components of the `max_spatial_size` are non-positive values, the transform will use the\n corresponding components of input img size. For example, `max_spatial_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n prob: probability of applying the transform.\n allow_missing_keys: don't raise exception if key is missing.\n\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n holes: int,\n spatial_size: Union[Sequence[int], int],\n fill_value: Union[float, int] = 0,\n max_holes: Optional[int] = None,\n max_spatial_size: Optional[Union[Sequence[int], int]] = None,\n prob: float = 0.1,\n allow_missing_keys: bool = False,\n ):\n MapTransform.__init__(self, keys, allow_missing_keys)\n RandomizableTransform.__init__(self, prob)\n if holes < 1:\n raise ValueError(\"number of holes must be greater than 0.\")\n self.holes = holes\n self.spatial_size = spatial_size\n self.fill_value = fill_value\n self.max_holes = max_holes\n self.max_spatial_size = max_spatial_size\n self.hole_coords: List = []\n\n def randomize(self, img_size: Sequence[int]) -> None:\n super().randomize(None)\n size = fall_back_tuple(self.spatial_size, img_size)\n self.hole_coords = [] # clear previously computed coords\n num_holes = self.holes if self.max_holes is None else self.R.randint(self.holes, self.max_holes + 1)\n for _ in range(num_holes):\n if self.max_spatial_size is not None:\n max_size = fall_back_tuple(self.max_spatial_size, img_size)\n size = tuple(self.R.randint(low=size[i], high=max_size[i] + 1) for i in range(len(img_size)))\n valid_size = get_valid_patch_size(img_size, size)\n self.hole_coords.append((slice(None),) + get_random_patch(img_size, valid_size, self.R))\n\n def __call__(self, data):\n d = dict(data)\n # expect all the specified keys have same spatial shape\n self.randomize(d[self.keys[0]].shape[1:])\n if self._do_transform:\n for key in self.key_iterator(d):\n for h in self.hole_coords:\n d[key][h] = self.fill_value\n return d\n\n\nclass HistogramNormalized(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.HistogramNormalize`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n num_bins: number of the bins to use in histogram, default to `256`. for more details:\n https://numpy.org/doc/stable/reference/generated/numpy.histogram.html.\n min: the min value to normalize input image, default to `255`.\n max: the max value to normalize input image, default to `255`.\n mask: if provided, must be ndarray of bools or 0s and 1s, and same shape as `image`.\n only points at which `mask==True` are used for the equalization.\n can also provide the mask by `mask_key` at runtime.\n mask_key: if mask is None, will try to get the mask with `mask_key`.\n dtype: data type of the output, default to `float32`.\n allow_missing_keys: do not raise exception if key is missing.\n\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n num_bins: int = 256,\n min: int = 0,\n max: int = 255,\n mask: Optional[np.ndarray] = None,\n mask_key: Optional[str] = None,\n dtype: DtypeLike = np.float32,\n allow_missing_keys: bool = False,\n ) -> None:\n super().__init__(keys, allow_missing_keys)\n self.transform = HistogramNormalize(num_bins=num_bins, min=min, max=max, mask=mask, dtype=dtype)\n self.mask_key = mask_key if mask is None else None\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.key_iterator(d):\n d[key] = self.transform(d[key], d[self.mask_key]) if self.mask_key is not None else self.transform(d[key])\n\n return d\n\n\nRandGaussianNoiseD = RandGaussianNoiseDict = RandGaussianNoised\nRandRicianNoiseD = RandRicianNoiseDict = RandRicianNoised\nShiftIntensityD = ShiftIntensityDict = ShiftIntensityd\nRandShiftIntensityD = RandShiftIntensityDict = RandShiftIntensityd\nStdShiftIntensityD = StdShiftIntensityDict = StdShiftIntensityd\nRandStdShiftIntensityD = RandStdShiftIntensityDict = RandStdShiftIntensityd\nRandBiasFieldD = RandBiasFieldDict = RandBiasFieldd\nScaleIntensityD = ScaleIntensityDict = ScaleIntensityd\nRandScaleIntensityD = RandScaleIntensityDict = RandScaleIntensityd\nNormalizeIntensityD = NormalizeIntensityDict = NormalizeIntensityd\nThresholdIntensityD = ThresholdIntensityDict = ThresholdIntensityd\nScaleIntensityRangeD = ScaleIntensityRangeDict = ScaleIntensityRanged\nAdjustContrastD = AdjustContrastDict = AdjustContrastd\nRandAdjustContrastD = RandAdjustContrastDict = RandAdjustContrastd\nScaleIntensityRangePercentilesD = ScaleIntensityRangePercentilesDict = ScaleIntensityRangePercentilesd\nMaskIntensityD = MaskIntensityDict = MaskIntensityd\nGaussianSmoothD = GaussianSmoothDict = GaussianSmoothd\nRandGaussianSmoothD = RandGaussianSmoothDict = RandGaussianSmoothd\nGaussianSharpenD = GaussianSharpenDict = GaussianSharpend\nRandGaussianSharpenD = RandGaussianSharpenDict = RandGaussianSharpend\nRandHistogramShiftD = RandHistogramShiftDict = RandHistogramShiftd\nRandGibbsNoiseD = RandGibbsNoiseDict = RandGibbsNoised\nGibbsNoiseD = GibbsNoiseDict = GibbsNoised\nKSpaceSpikeNoiseD = KSpaceSpikeNoiseDict = KSpaceSpikeNoised\nRandKSpaceSpikeNoiseD = RandKSpaceSpikeNoiseDict = RandKSpaceSpikeNoised\nRandCoarseDropoutD = RandCoarseDropoutDict = RandCoarseDropoutd\nHistogramNormalizeD = HistogramNormalizeDict = HistogramNormalized\n", |
| "monai/transforms/transform.py": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA collection of generic interfaces for MONAI transforms.\n\"\"\"\n\nimport logging\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Callable, Dict, Generator, Hashable, Iterable, List, Optional, Tuple, TypeVar, Union\n\nimport numpy as np\nimport torch\n\nfrom monai import transforms\nfrom monai.config import KeysCollection\nfrom monai.utils import MAX_SEED, ensure_tuple\n\n__all__ = [\n \"ThreadUnsafe\",\n \"apply_transform\",\n \"Randomizable\",\n \"RandomizableTransform\",\n \"Transform\",\n \"MapTransform\",\n]\n\nReturnType = TypeVar(\"ReturnType\")\n\n\ndef _apply_transform(\n transform: Callable[..., ReturnType], parameters: Any, unpack_parameters: bool = False\n) -> ReturnType:\n \"\"\"\n Perform transformation `transform` with the provided parameters `parameters`.\n\n If `parameters` is a tuple and `unpack_items` is True, each parameter of `parameters` is unpacked\n as arguments to `transform`.\n Otherwise `parameters` is considered as single argument to `transform`.\n\n Args:\n transform (Callable[..., ReturnType]): a callable to be used to transform `data`.\n parameters (Any): parameters for the `transform`.\n unpack_parameters (bool, optional): whether to unpack parameters for `transform`. Defaults to False.\n\n Returns:\n ReturnType: The return type of `transform`.\n \"\"\"\n if isinstance(parameters, tuple) and unpack_parameters:\n return transform(*parameters)\n\n return transform(parameters)\n\n\ndef apply_transform(\n transform: Callable[..., ReturnType],\n data: Any,\n map_items: bool = True,\n unpack_items: bool = False,\n) -> Union[List[ReturnType], ReturnType]:\n \"\"\"\n Transform `data` with `transform`.\n\n If `data` is a list or tuple and `map_data` is True, each item of `data` will be transformed\n and this method returns a list of outcomes.\n otherwise transform will be applied once with `data` as the argument.\n\n Args:\n transform (Callable[..., ReturnType]): a callable to be used to transform `data`.\n data (Any): an object to be transformed.\n map_items (bool, optional): whether to apply transform to each item in `data`,\n if `data` is a list or tuple. Defaults to True.\n unpack_items (bool, optional): [description]. Defaults to False.\n\n Raises:\n Exception: When ``transform`` raises an exception.\n\n Returns:\n Union[List[ReturnType], ReturnType]: The return type of `transform` or a list thereof.\n \"\"\"\n try:\n if isinstance(data, (list, tuple)) and map_items:\n return [_apply_transform(transform, item, unpack_items) for item in data]\n return _apply_transform(transform, data, unpack_items)\n except Exception as e:\n\n if not isinstance(transform, transforms.compose.Compose):\n # log the input data information of exact transform in the transform chain\n datastats = transforms.utility.array.DataStats(data_shape=False, value_range=False)\n logger = logging.getLogger(datastats._logger_name)\n logger.info(f\"\\n=== Transform input info -- {type(transform).__name__} ===\")\n if isinstance(data, (list, tuple)):\n data = data[0]\n\n def _log_stats(data, prefix: Optional[str] = \"Data\"):\n if isinstance(data, (np.ndarray, torch.Tensor)):\n # log data type, shape, range for array\n datastats(img=data, data_shape=True, value_range=True, prefix=prefix) # type: ignore\n else:\n # log data type and value for other meta data\n datastats(img=data, data_value=True, prefix=prefix)\n\n if isinstance(data, dict):\n for k, v in data.items():\n _log_stats(data=v, prefix=k)\n else:\n _log_stats(data=data)\n raise RuntimeError(f\"applying transform {transform}\") from e\n\n\nclass ThreadUnsafe:\n \"\"\"\n A class to denote that the transform will mutate its member variables,\n when being applied. Transforms inheriting this class should be used\n cautiously in a multi-thread context.\n\n This type is typically used by :py:class:`monai.data.CacheDataset` and\n its extensions, where the transform cache is built with multiple threads.\n \"\"\"\n\n pass\n\n\nclass Randomizable(ABC, ThreadUnsafe):\n \"\"\"\n An interface for handling random state locally, currently based on a class\n variable `R`, which is an instance of `np.random.RandomState`. This\n provides the flexibility of component-specific determinism without\n affecting the global states. It is recommended to use this API with\n :py:class:`monai.data.DataLoader` for deterministic behaviour of the\n preprocessing pipelines. This API is not thread-safe. Additionally,\n deepcopying instance of this class often causes insufficient randomness as\n the random states will be duplicated.\n \"\"\"\n\n R: np.random.RandomState = np.random.RandomState()\n\n def set_random_state(\n self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None\n ) -> \"Randomizable\":\n \"\"\"\n Set the random state locally, to control the randomness, the derived\n classes should use :py:attr:`self.R` instead of `np.random` to introduce random\n factors.\n\n Args:\n seed: set the random state with an integer seed.\n state: set the random state with a `np.random.RandomState` object.\n\n Raises:\n TypeError: When ``state`` is not an ``Optional[np.random.RandomState]``.\n\n Returns:\n a Randomizable instance.\n\n \"\"\"\n if seed is not None:\n _seed = id(seed) if not isinstance(seed, (int, np.integer)) else seed\n _seed = _seed % MAX_SEED\n self.R = np.random.RandomState(_seed)\n return self\n\n if state is not None:\n if not isinstance(state, np.random.RandomState):\n raise TypeError(f\"state must be None or a np.random.RandomState but is {type(state).__name__}.\")\n self.R = state\n return self\n\n self.R = np.random.RandomState()\n return self\n\n def randomize(self, data: Any) -> None:\n \"\"\"\n Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors.\n\n all :py:attr:`self.R` calls happen here so that we have a better chance to\n identify errors of sync the random state.\n\n This method can generate the random factors based on properties of the input data.\n\n Raises:\n NotImplementedError: When the subclass does not override this method.\n \"\"\"\n raise NotImplementedError(f\"Subclass {self.__class__.__name__} must implement this method.\")\n\n\nclass Transform(ABC):\n \"\"\"\n An abstract class of a ``Transform``.\n A transform is callable that processes ``data``.\n\n It could be stateful and may modify ``data`` in place,\n the implementation should be aware of:\n\n #. thread safety when mutating its own states.\n When used from a multi-process context, transform's instance variables are read-only.\n thread-unsafe transforms should inherit :py:class:`monai.transforms.ThreadUnsafe`.\n #. ``data`` content unused by this transform may still be used in the\n subsequent transforms in a composed transform.\n #. storing too much information in ``data`` may cause some memory issue or IPC sync issue,\n especially in the multi-processing environment of PyTorch DataLoader.\n\n See Also\n\n :py:class:`monai.transforms.Compose`\n \"\"\"\n\n backend: List[str] = []\n \"\"\"Transforms should add data types to this list if they are capable of performing a transform without\n modifying the input type. For example, [\\\"torch.Tensor\\\", \\\"np.ndarray\\\"] means that no copies of the data\n are required if the input is either \\\"torch.Tensor\\\" or \\\"np.ndarray\\\".\"\"\"\n\n @abstractmethod\n def __call__(self, data: Any):\n \"\"\"\n ``data`` is an element which often comes from an iteration over an\n iterable, such as :py:class:`torch.utils.data.Dataset`. This method should\n return an updated version of ``data``.\n To simplify the input validations, most of the transforms assume that\n\n - ``data`` is a Numpy ndarray, PyTorch Tensor or string\n - the data shape can be:\n\n #. string data without shape, `LoadImage` transform expects file paths\n #. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``,\n except that `AddChannel` expects (spatial_dim_1[, spatial_dim_2, ...]) and\n `AsChannelFirst` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels)\n #. most of the post-processing transforms expect\n ``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])``\n\n - the channel dimension is not omitted even if number of channels is one\n\n This method can optionally take additional arguments to help execute transformation operation.\n\n Raises:\n NotImplementedError: When the subclass does not override this method.\n\n \"\"\"\n raise NotImplementedError(f\"Subclass {self.__class__.__name__} must implement this method.\")\n\n\nclass RandomizableTransform(Randomizable, Transform):\n \"\"\"\n An interface for handling random state locally, currently based on a class variable `R`,\n which is an instance of `np.random.RandomState`.\n This class introduces a randomized flag `_do_transform`, is mainly for randomized data augmentation transforms.\n For example:\n\n .. code-block:: python\n\n from monai.transforms import RandomizableTransform\n\n class RandShiftIntensity100(RandomizableTransform):\n def randomize(self):\n super().randomize(None)\n self._offset = self.R.uniform(low=0, high=100)\n\n def __call__(self, img):\n self.randomize()\n if not self._do_transform:\n return img\n return img + self._offset\n\n transform = RandShiftIntensity()\n transform.set_random_state(seed=0)\n print(transform(10))\n\n \"\"\"\n\n def __init__(self, prob: float = 1.0, do_transform: bool = True):\n self._do_transform = do_transform\n self.prob = min(max(prob, 0.0), 1.0)\n\n def randomize(self, data: Any) -> None:\n \"\"\"\n Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors.\n\n all :py:attr:`self.R` calls happen here so that we have a better chance to\n identify errors of sync the random state.\n\n This method can generate the random factors based on properties of the input data.\n \"\"\"\n self._do_transform = self.R.rand() < self.prob\n\n\nclass MapTransform(Transform):\n \"\"\"\n A subclass of :py:class:`monai.transforms.Transform` with an assumption\n that the ``data`` input of ``self.__call__`` is a MutableMapping such as ``dict``.\n\n The ``keys`` parameter will be used to get and set the actual data\n item to transform. That is, the callable of this transform should\n follow the pattern:\n\n .. code-block:: python\n\n def __call__(self, data):\n for key in self.keys:\n if key in data:\n # update output data with some_transform_function(data[key]).\n else:\n # raise exception unless allow_missing_keys==True.\n return data\n\n Raises:\n ValueError: When ``keys`` is an empty iterable.\n TypeError: When ``keys`` type is not in ``Union[Hashable, Iterable[Hashable]]``.\n\n \"\"\"\n\n def __init__(self, keys: KeysCollection, allow_missing_keys: bool = False) -> None:\n self.keys: Tuple[Hashable, ...] = ensure_tuple(keys)\n self.allow_missing_keys = allow_missing_keys\n if not self.keys:\n raise ValueError(\"keys must be non empty.\")\n for key in self.keys:\n if not isinstance(key, Hashable):\n raise TypeError(f\"keys must be one of (Hashable, Iterable[Hashable]) but is {type(keys).__name__}.\")\n\n @abstractmethod\n def __call__(self, data):\n \"\"\"\n ``data`` often comes from an iteration over an iterable,\n such as :py:class:`torch.utils.data.Dataset`.\n\n To simplify the input validations, this method assumes:\n\n - ``data`` is a Python dictionary\n - ``data[key]`` is a Numpy ndarray, PyTorch Tensor or string, where ``key`` is an element\n of ``self.keys``, the data shape can be:\n\n #. string data without shape, `LoadImaged` transform expects file paths\n #. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``,\n except that `AddChanneld` expects (spatial_dim_1[, spatial_dim_2, ...]) and\n `AsChannelFirstd` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels)\n #. most of the post-processing transforms expect\n ``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])``\n\n - the channel dimension is not omitted even if number of channels is one\n\n Raises:\n NotImplementedError: When the subclass does not override this method.\n\n returns:\n An updated dictionary version of ``data`` by applying the transform.\n\n \"\"\"\n raise NotImplementedError(f\"Subclass {self.__class__.__name__} must implement this method.\")\n\n def key_iterator(\n self,\n data: Dict[Hashable, Any],\n *extra_iterables: Optional[Iterable],\n ) -> Generator:\n \"\"\"\n Iterate across keys and optionally extra iterables. If key is missing, exception is raised if\n `allow_missing_keys==False` (default). If `allow_missing_keys==True`, key is skipped.\n\n Args:\n data: data that the transform will be applied to\n extra_iterables: anything else to be iterated through\n \"\"\"\n # if no extra iterables given, create a dummy list of Nones\n ex_iters = extra_iterables or [[None] * len(self.keys)]\n\n # loop over keys and any extra iterables\n _ex_iters: List[Any]\n for key, *_ex_iters in zip(self.keys, *ex_iters):\n # all normal, yield (what we yield depends on whether extra iterables were given)\n if key in data:\n yield (key,) + tuple(_ex_iters) if extra_iterables else key\n elif not self.allow_missing_keys:\n raise KeyError(f\"Key was missing ({key}) and allow_missing_keys==False\")\n", |
| "monai/transforms/utils.py": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport random\nimport warnings\nfrom contextlib import contextmanager\nfrom typing import Any, Callable, Hashable, Iterable, List, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch\n\nimport monai.transforms.transform\nfrom monai.config import DtypeLike, IndexSelection\nfrom monai.networks.layers import GaussianFilter\nfrom monai.transforms.compose import Compose, OneOf\nfrom monai.transforms.transform import MapTransform\nfrom monai.utils import (\n GridSampleMode,\n InterpolateMode,\n InverseKeys,\n ensure_tuple,\n ensure_tuple_rep,\n ensure_tuple_size,\n fall_back_tuple,\n issequenceiterable,\n min_version,\n optional_import,\n)\n\nmeasure, _ = optional_import(\"skimage.measure\", \"0.14.2\", min_version)\nndimage, _ = optional_import(\"scipy.ndimage\")\ncp, has_cp = optional_import(\"cupy\")\ncp_ndarray, _ = optional_import(\"cupy\", name=\"ndarray\")\nexposure, has_skimage = optional_import(\"skimage.exposure\")\n\n__all__ = [\n \"allow_missing_keys_mode\",\n \"compute_divisible_spatial_size\",\n \"convert_inverse_interp_mode\",\n \"copypaste_arrays\",\n \"create_control_grid\",\n \"create_grid\",\n \"create_rotate\",\n \"create_scale\",\n \"create_shear\",\n \"create_translate\",\n \"extreme_points_to_image\",\n \"fill_holes\",\n \"Fourier\",\n \"generate_label_classes_crop_centers\",\n \"generate_pos_neg_label_crop_centers\",\n \"generate_spatial_bounding_box\",\n \"get_extreme_points\",\n \"get_largest_connected_component_mask\",\n \"img_bounds\",\n \"in_bounds\",\n \"is_empty\",\n \"is_positive\",\n \"map_binary_to_indices\",\n \"map_classes_to_indices\",\n \"map_spatial_axes\",\n \"rand_choice\",\n \"rescale_array\",\n \"rescale_array_int_max\",\n \"rescale_instance_array\",\n \"resize_center\",\n \"weighted_patch_samples\",\n \"zero_margins\",\n \"equalize_hist\",\n \"get_number_image_type_conversions\",\n]\n\n\ndef rand_choice(prob: float = 0.5) -> bool:\n \"\"\"\n Returns True if a randomly chosen number is less than or equal to `prob`, by default this is a 50/50 chance.\n \"\"\"\n return bool(random.random() <= prob)\n\n\ndef img_bounds(img: np.ndarray):\n \"\"\"\n Returns the minimum and maximum indices of non-zero lines in axis 0 of `img`, followed by that for axis 1.\n \"\"\"\n ax0 = np.any(img, axis=0)\n ax1 = np.any(img, axis=1)\n return np.concatenate((np.where(ax0)[0][[0, -1]], np.where(ax1)[0][[0, -1]]))\n\n\ndef in_bounds(x: float, y: float, margin: float, maxx: float, maxy: float) -> bool:\n \"\"\"\n Returns True if (x,y) is within the rectangle (margin, margin, maxx-margin, maxy-margin).\n \"\"\"\n return bool(margin <= x < (maxx - margin) and margin <= y < (maxy - margin))\n\n\ndef is_empty(img: Union[np.ndarray, torch.Tensor]) -> bool:\n \"\"\"\n Returns True if `img` is empty, that is its maximum value is not greater than its minimum.\n \"\"\"\n return not (img.max() > img.min()) # use > instead of <= so that an image full of NaNs will result in True\n\n\ndef is_positive(img):\n \"\"\"\n Returns a boolean version of `img` where the positive values are converted into True, the other values are False.\n \"\"\"\n return img > 0\n\n\ndef zero_margins(img: np.ndarray, margin: int) -> bool:\n \"\"\"\n Returns True if the values within `margin` indices of the edges of `img` in dimensions 1 and 2 are 0.\n \"\"\"\n if np.any(img[:, :, :margin]) or np.any(img[:, :, -margin:]):\n return False\n\n return not np.any(img[:, :margin, :]) and not np.any(img[:, -margin:, :])\n\n\ndef rescale_array(arr: np.ndarray, minv: float = 0.0, maxv: float = 1.0, dtype: DtypeLike = np.float32):\n \"\"\"\n Rescale the values of numpy array `arr` to be from `minv` to `maxv`.\n \"\"\"\n if dtype is not None:\n arr = arr.astype(dtype)\n\n mina = np.min(arr)\n maxa = np.max(arr)\n\n if mina == maxa:\n return arr * minv\n\n norm = (arr - mina) / (maxa - mina) # normalize the array first\n return (norm * (maxv - minv)) + minv # rescale by minv and maxv, which is the normalized array by default\n\n\ndef rescale_instance_array(\n arr: np.ndarray, minv: float = 0.0, maxv: float = 1.0, dtype: DtypeLike = np.float32\n) -> np.ndarray:\n \"\"\"\n Rescale each array slice along the first dimension of `arr` independently.\n \"\"\"\n out: np.ndarray = np.zeros(arr.shape, dtype)\n for i in range(arr.shape[0]):\n out[i] = rescale_array(arr[i], minv, maxv, dtype)\n\n return out\n\n\ndef rescale_array_int_max(arr: np.ndarray, dtype: DtypeLike = np.uint16) -> np.ndarray:\n \"\"\"\n Rescale the array `arr` to be between the minimum and maximum values of the type `dtype`.\n \"\"\"\n info: np.iinfo = np.iinfo(dtype)\n return np.asarray(rescale_array(arr, info.min, info.max), dtype=dtype)\n\n\ndef copypaste_arrays(\n src_shape,\n dest_shape,\n srccenter: Sequence[int],\n destcenter: Sequence[int],\n dims: Sequence[Optional[int]],\n) -> Tuple[Tuple[slice, ...], Tuple[slice, ...]]:\n \"\"\"\n Calculate the slices to copy a sliced area of array in `src_shape` into array in `dest_shape`.\n\n The area has dimensions `dims` (use 0 or None to copy everything in that dimension),\n the source area is centered at `srccenter` index in `src` and copied into area centered at `destcenter` in `dest`.\n The dimensions of the copied area will be clipped to fit within the\n source and destination arrays so a smaller area may be copied than expected. Return value is the tuples of slice\n objects indexing the copied area in `src`, and those indexing the copy area in `dest`.\n\n Example\n\n .. code-block:: python\n\n src_shape = (6,6)\n src = np.random.randint(0,10,src_shape)\n dest = np.zeros_like(src)\n srcslices, destslices = copypaste_arrays(src_shape, dest.shape, (3, 2),(2, 1),(3, 4))\n dest[destslices] = src[srcslices]\n print(src)\n print(dest)\n\n >>> [[9 5 6 6 9 6]\n [4 3 5 6 1 2]\n [0 7 3 2 4 1]\n [3 0 0 1 5 1]\n [9 4 7 1 8 2]\n [6 6 5 8 6 7]]\n [[0 0 0 0 0 0]\n [7 3 2 4 0 0]\n [0 0 1 5 0 0]\n [4 7 1 8 0 0]\n [0 0 0 0 0 0]\n [0 0 0 0 0 0]]\n\n \"\"\"\n s_ndim = len(src_shape)\n d_ndim = len(dest_shape)\n srcslices = [slice(None)] * s_ndim\n destslices = [slice(None)] * d_ndim\n\n for i, ss, ds, sc, dc, dim in zip(range(s_ndim), src_shape, dest_shape, srccenter, destcenter, dims):\n if dim:\n # dimension before midpoint, clip to size fitting in both arrays\n d1 = np.clip(dim // 2, 0, min(sc, dc))\n # dimension after midpoint, clip to size fitting in both arrays\n d2 = np.clip(dim // 2 + 1, 0, min(ss - sc, ds - dc))\n\n srcslices[i] = slice(sc - d1, sc + d2)\n destslices[i] = slice(dc - d1, dc + d2)\n\n return tuple(srcslices), tuple(destslices)\n\n\ndef resize_center(img: np.ndarray, *resize_dims: Optional[int], fill_value: float = 0.0, inplace: bool = True):\n \"\"\"\n Resize `img` by cropping or expanding the image from the center. The `resize_dims` values are the output dimensions\n (or None to use original dimension of `img`). If a dimension is smaller than that of `img` then the result will be\n cropped and if larger padded with zeros, in both cases this is done relative to the center of `img`. The result is\n a new image with the specified dimensions and values from `img` copied into its center.\n \"\"\"\n\n resize_dims = fall_back_tuple(resize_dims, img.shape)\n\n half_img_shape = (np.asarray(img.shape) // 2).tolist()\n half_dest_shape = (np.asarray(resize_dims) // 2).tolist()\n srcslices, destslices = copypaste_arrays(img.shape, resize_dims, half_img_shape, half_dest_shape, resize_dims)\n\n if not inplace:\n dest = np.full(resize_dims, fill_value, img.dtype) # type: ignore\n dest[destslices] = img[srcslices]\n return dest\n return img[srcslices]\n\n\ndef map_binary_to_indices(\n label: np.ndarray,\n image: Optional[np.ndarray] = None,\n image_threshold: float = 0.0,\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Compute the foreground and background of input label data, return the indices after fattening.\n For example:\n ``label = np.array([[[0, 1, 1], [1, 0, 1], [1, 1, 0]]])``\n ``foreground indices = np.array([1, 2, 3, 5, 6, 7])`` and ``background indices = np.array([0, 4, 8])``\n\n Args:\n label: use the label data to get the foreground/background information.\n image: if image is not None, use ``label = 0 & image > image_threshold``\n to define background. so the output items will not map to all the voxels in the label.\n image_threshold: if enabled `image`, use ``image > image_threshold`` to\n determine the valid image content area and select background only in this area.\n\n \"\"\"\n # Prepare fg/bg indices\n if label.shape[0] > 1:\n label = label[1:] # for One-Hot format data, remove the background channel\n label_flat = np.any(label, axis=0).ravel() # in case label has multiple dimensions\n fg_indices = np.nonzero(label_flat)[0]\n if image is not None:\n img_flat = np.any(image > image_threshold, axis=0).ravel()\n bg_indices = np.nonzero(np.logical_and(img_flat, ~label_flat))[0]\n else:\n bg_indices = np.nonzero(~label_flat)[0]\n\n return fg_indices, bg_indices\n\n\ndef map_classes_to_indices(\n label: np.ndarray,\n num_classes: Optional[int] = None,\n image: Optional[np.ndarray] = None,\n image_threshold: float = 0.0,\n) -> List[np.ndarray]:\n \"\"\"\n Filter out indices of every class of the input label data, return the indices after fattening.\n It can handle both One-Hot format label and Argmax format label, must provide `num_classes` for\n Argmax label.\n\n For example:\n ``label = np.array([[[0, 1, 2], [2, 0, 1], [1, 2, 0]]])`` and `num_classes=3`, will return a list\n which contains the indices of the 3 classes:\n ``[np.array([0, 4, 8]), np.array([1, 5, 6]), np.array([2, 3, 7])]``\n\n Args:\n label: use the label data to get the indices of every class.\n num_classes: number of classes for argmax label, not necessary for One-Hot label.\n image: if image is not None, only return the indices of every class that are within the valid\n region of the image (``image > image_threshold``).\n image_threshold: if enabled `image`, use ``image > image_threshold`` to\n determine the valid image content area and select class indices only in this area.\n\n \"\"\"\n img_flat: Optional[np.ndarray] = None\n if image is not None:\n img_flat = np.any(image > image_threshold, axis=0).ravel()\n\n indices: List[np.ndarray] = []\n # assuming the first dimension is channel\n channels = len(label)\n\n num_classes_: int = channels\n if channels == 1:\n if num_classes is None:\n raise ValueError(\"if not One-Hot format label, must provide the num_classes.\")\n num_classes_ = num_classes\n\n for c in range(num_classes_):\n label_flat = np.any(label[c : c + 1] if channels > 1 else label == c, axis=0).ravel()\n label_flat = np.logical_and(img_flat, label_flat) if img_flat is not None else label_flat\n indices.append(np.nonzero(label_flat)[0])\n\n return indices\n\n\ndef weighted_patch_samples(\n spatial_size: Union[int, Sequence[int]],\n w: np.ndarray,\n n_samples: int = 1,\n r_state: Optional[np.random.RandomState] = None,\n) -> List:\n \"\"\"\n Computes `n_samples` of random patch sampling locations, given the sampling weight map `w` and patch `spatial_size`.\n\n Args:\n spatial_size: length of each spatial dimension of the patch.\n w: weight map, the weights must be non-negative. each element denotes a sampling weight of the spatial location.\n 0 indicates no sampling.\n The weight map shape is assumed ``(spatial_dim_0, spatial_dim_1, ..., spatial_dim_n)``.\n n_samples: number of patch samples\n r_state: a random state container\n\n Returns:\n a list of `n_samples` N-D integers representing the spatial sampling location of patches.\n\n \"\"\"\n if w is None:\n raise ValueError(\"w must be an ND array.\")\n if r_state is None:\n r_state = np.random.RandomState()\n img_size = np.asarray(w.shape, dtype=int)\n win_size = np.asarray(fall_back_tuple(spatial_size, img_size), dtype=int)\n\n s = tuple(slice(w // 2, m - w + w // 2) if m > w else slice(m // 2, m // 2 + 1) for w, m in zip(win_size, img_size))\n v = w[s] # weight map in the 'valid' mode\n v_size = v.shape\n v = v.ravel()\n if np.any(v < 0):\n v -= np.min(v) # shifting to non-negative\n v = v.cumsum()\n if not v[-1] or not np.isfinite(v[-1]) or v[-1] < 0: # uniform sampling\n idx = r_state.randint(0, len(v), size=n_samples)\n else:\n idx = v.searchsorted(r_state.random(n_samples) * v[-1], side=\"right\")\n # compensate 'valid' mode\n diff = np.minimum(win_size, img_size) // 2\n return [np.unravel_index(i, v_size) + diff for i in np.asarray(idx, dtype=int)]\n\n\ndef correct_crop_centers(\n centers: List[np.ndarray], spatial_size: Union[Sequence[int], int], label_spatial_shape: Sequence[int]\n) -> List[np.ndarray]:\n \"\"\"\n Utility to correct the crop center if the crop size is bigger than the image size.\n\n Args:\n ceters: pre-computed crop centers, will correct based on the valid region.\n spatial_size: spatial size of the ROIs to be sampled.\n label_spatial_shape: spatial shape of the original label data to compare with ROI.\n\n \"\"\"\n spatial_size = fall_back_tuple(spatial_size, default=label_spatial_shape)\n if not (np.subtract(label_spatial_shape, spatial_size) >= 0).all():\n raise ValueError(\"The size of the proposed random crop ROI is larger than the image size.\")\n\n # Select subregion to assure valid roi\n valid_start = np.floor_divide(spatial_size, 2)\n # add 1 for random\n valid_end = np.subtract(label_spatial_shape + np.array(1), spatial_size / np.array(2)).astype(np.uint16)\n # int generation to have full range on upper side, but subtract unfloored size/2 to prevent rounded range\n # from being too high\n for i, valid_s in enumerate(valid_start):\n # need this because np.random.randint does not work with same start and end\n if valid_s == valid_end[i]:\n valid_end[i] += 1\n\n for i, c in enumerate(centers):\n center_i = c\n if c < valid_start[i]:\n center_i = valid_start[i]\n if c >= valid_end[i]:\n center_i = valid_end[i] - 1\n centers[i] = center_i\n\n return centers\n\n\ndef generate_pos_neg_label_crop_centers(\n spatial_size: Union[Sequence[int], int],\n num_samples: int,\n pos_ratio: float,\n label_spatial_shape: Sequence[int],\n fg_indices: np.ndarray,\n bg_indices: np.ndarray,\n rand_state: Optional[np.random.RandomState] = None,\n) -> List[List[np.ndarray]]:\n \"\"\"\n Generate valid sample locations based on the label with option for specifying foreground ratio\n Valid: samples sitting entirely within image, expected input shape: [C, H, W, D] or [C, H, W]\n\n Args:\n spatial_size: spatial size of the ROIs to be sampled.\n num_samples: total sample centers to be generated.\n pos_ratio: ratio of total locations generated that have center being foreground.\n label_spatial_shape: spatial shape of the original label data to unravel selected centers.\n fg_indices: pre-computed foreground indices in 1 dimension.\n bg_indices: pre-computed background indices in 1 dimension.\n rand_state: numpy randomState object to align with other modules.\n\n Raises:\n ValueError: When the proposed roi is larger than the image.\n ValueError: When the foreground and background indices lengths are 0.\n\n \"\"\"\n if rand_state is None:\n rand_state = np.random.random.__self__ # type: ignore\n\n centers = []\n fg_indices, bg_indices = np.asarray(fg_indices), np.asarray(bg_indices)\n if fg_indices.size == 0 and bg_indices.size == 0:\n raise ValueError(\"No sampling location available.\")\n\n if fg_indices.size == 0 or bg_indices.size == 0:\n warnings.warn(\n f\"N foreground {len(fg_indices)}, N background {len(bg_indices)},\"\n \"unable to generate class balanced samples.\"\n )\n pos_ratio = 0 if fg_indices.size == 0 else 1\n\n for _ in range(num_samples):\n indices_to_use = fg_indices if rand_state.rand() < pos_ratio else bg_indices\n random_int = rand_state.randint(len(indices_to_use))\n center = np.unravel_index(indices_to_use[random_int], label_spatial_shape)\n # shift center to range of valid centers\n center_ori = list(center)\n centers.append(correct_crop_centers(center_ori, spatial_size, label_spatial_shape))\n\n return centers\n\n\ndef generate_label_classes_crop_centers(\n spatial_size: Union[Sequence[int], int],\n num_samples: int,\n label_spatial_shape: Sequence[int],\n indices: List[np.ndarray],\n ratios: Optional[List[Union[float, int]]] = None,\n rand_state: Optional[np.random.RandomState] = None,\n) -> List[List[np.ndarray]]:\n \"\"\"\n Generate valid sample locations based on the specified ratios of label classes.\n Valid: samples sitting entirely within image, expected input shape: [C, H, W, D] or [C, H, W]\n\n Args:\n spatial_size: spatial size of the ROIs to be sampled.\n num_samples: total sample centers to be generated.\n label_spatial_shape: spatial shape of the original label data to unravel selected centers.\n indices: sequence of pre-computed foreground indices of every class in 1 dimension.\n ratios: ratios of every class in the label to generate crop centers, including background class.\n if None, every class will have the same ratio to generate crop centers.\n rand_state: numpy randomState object to align with other modules.\n\n \"\"\"\n if rand_state is None:\n rand_state = np.random.random.__self__ # type: ignore\n\n if num_samples < 1:\n raise ValueError(\"num_samples must be an int number and greater than 0.\")\n ratios_: List[Union[float, int]] = ([1] * len(indices)) if ratios is None else ratios\n if len(ratios_) != len(indices):\n raise ValueError(\"random crop radios must match the number of indices of classes.\")\n if any(i < 0 for i in ratios_):\n raise ValueError(\"ratios should not contain negative number.\")\n\n # ensure indices are numpy array\n indices = [np.asarray(i) for i in indices]\n for i, array in enumerate(indices):\n if len(array) == 0:\n warnings.warn(f\"no available indices of class {i} to crop, set the crop ratio of this class to zero.\")\n ratios_[i] = 0\n\n centers = []\n classes = rand_state.choice(len(ratios_), size=num_samples, p=np.asarray(ratios_) / np.sum(ratios_))\n for i in classes:\n # randomly select the indices of a class based on the ratios\n indices_to_use = indices[i]\n random_int = rand_state.randint(len(indices_to_use))\n center = np.unravel_index(indices_to_use[random_int], label_spatial_shape)\n # shift center to range of valid centers\n center_ori = list(center)\n centers.append(correct_crop_centers(center_ori, spatial_size, label_spatial_shape))\n\n return centers\n\n\ndef create_grid(\n spatial_size: Sequence[int],\n spacing: Optional[Sequence[float]] = None,\n homogeneous: bool = True,\n dtype: DtypeLike = float,\n):\n \"\"\"\n compute a `spatial_size` mesh.\n\n Args:\n spatial_size: spatial size of the grid.\n spacing: same len as ``spatial_size``, defaults to 1.0 (dense grid).\n homogeneous: whether to make homogeneous coordinates.\n dtype: output grid data type.\n \"\"\"\n spacing = spacing or tuple(1.0 for _ in spatial_size)\n ranges = [np.linspace(-(d - 1.0) / 2.0 * s, (d - 1.0) / 2.0 * s, int(d)) for d, s in zip(spatial_size, spacing)]\n coords = np.asarray(np.meshgrid(*ranges, indexing=\"ij\"), dtype=dtype)\n if not homogeneous:\n return coords\n return np.concatenate([coords, np.ones_like(coords[:1])])\n\n\ndef create_control_grid(\n spatial_shape: Sequence[int], spacing: Sequence[float], homogeneous: bool = True, dtype: DtypeLike = float\n):\n \"\"\"\n control grid with two additional point in each direction\n \"\"\"\n grid_shape = []\n for d, s in zip(spatial_shape, spacing):\n d = int(d)\n if d % 2 == 0:\n grid_shape.append(np.ceil((d - 1.0) / (2.0 * s) + 0.5) * 2.0 + 2.0)\n else:\n grid_shape.append(np.ceil((d - 1.0) / (2.0 * s)) * 2.0 + 3.0)\n return create_grid(grid_shape, spacing, homogeneous, dtype)\n\n\ndef create_rotate(spatial_dims: int, radians: Union[Sequence[float], float]) -> np.ndarray:\n \"\"\"\n create a 2D or 3D rotation matrix\n\n Args:\n spatial_dims: {``2``, ``3``} spatial rank\n radians: rotation radians\n when spatial_dims == 3, the `radians` sequence corresponds to\n rotation in the 1st, 2nd, and 3rd dim respectively.\n\n Raises:\n ValueError: When ``radians`` is empty.\n ValueError: When ``spatial_dims`` is not one of [2, 3].\n\n \"\"\"\n radians = ensure_tuple(radians)\n if spatial_dims == 2:\n if len(radians) >= 1:\n sin_, cos_ = np.sin(radians[0]), np.cos(radians[0])\n return np.array([[cos_, -sin_, 0.0], [sin_, cos_, 0.0], [0.0, 0.0, 1.0]])\n raise ValueError(\"radians must be non empty.\")\n\n if spatial_dims == 3:\n affine = None\n if len(radians) >= 1:\n sin_, cos_ = np.sin(radians[0]), np.cos(radians[0])\n affine = np.array(\n [[1.0, 0.0, 0.0, 0.0], [0.0, cos_, -sin_, 0.0], [0.0, sin_, cos_, 0.0], [0.0, 0.0, 0.0, 1.0]]\n )\n if len(radians) >= 2:\n sin_, cos_ = np.sin(radians[1]), np.cos(radians[1])\n if affine is None:\n raise ValueError(\"Affine should be a matrix.\")\n affine = affine @ np.array(\n [[cos_, 0.0, sin_, 0.0], [0.0, 1.0, 0.0, 0.0], [-sin_, 0.0, cos_, 0.0], [0.0, 0.0, 0.0, 1.0]]\n )\n if len(radians) >= 3:\n sin_, cos_ = np.sin(radians[2]), np.cos(radians[2])\n if affine is None:\n raise ValueError(\"Affine should be a matrix.\")\n affine = affine @ np.array(\n [[cos_, -sin_, 0.0, 0.0], [sin_, cos_, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]\n )\n if affine is None:\n raise ValueError(\"radians must be non empty.\")\n return affine\n\n raise ValueError(f\"Unsupported spatial_dims: {spatial_dims}, available options are [2, 3].\")\n\n\ndef create_shear(spatial_dims: int, coefs: Union[Sequence[float], float]) -> np.ndarray:\n \"\"\"\n create a shearing matrix\n\n Args:\n spatial_dims: spatial rank\n coefs: shearing factors, a tuple of 2 floats for 2D, a tuple of 6 floats for 3D),\n take a 3D affine as example::\n\n [\n [1.0, coefs[0], coefs[1], 0.0],\n [coefs[2], 1.0, coefs[3], 0.0],\n [coefs[4], coefs[5], 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n\n Raises:\n NotImplementedError: When ``spatial_dims`` is not one of [2, 3].\n\n \"\"\"\n if spatial_dims == 2:\n coefs = ensure_tuple_size(coefs, dim=2, pad_val=0.0)\n return np.array([[1, coefs[0], 0.0], [coefs[1], 1.0, 0.0], [0.0, 0.0, 1.0]])\n if spatial_dims == 3:\n coefs = ensure_tuple_size(coefs, dim=6, pad_val=0.0)\n return np.array(\n [\n [1.0, coefs[0], coefs[1], 0.0],\n [coefs[2], 1.0, coefs[3], 0.0],\n [coefs[4], coefs[5], 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n )\n raise NotImplementedError(\"Currently only spatial_dims in [2, 3] are supported.\")\n\n\ndef create_scale(spatial_dims: int, scaling_factor: Union[Sequence[float], float]):\n \"\"\"\n create a scaling matrix\n\n Args:\n spatial_dims: spatial rank\n scaling_factor: scaling factors for every spatial dim, defaults to 1.\n \"\"\"\n scaling_factor = ensure_tuple_size(scaling_factor, dim=spatial_dims, pad_val=1.0)\n return np.diag(scaling_factor[:spatial_dims] + (1.0,))\n\n\ndef create_translate(spatial_dims: int, shift: Union[Sequence[float], float]) -> np.ndarray:\n \"\"\"\n create a translation matrix\n\n Args:\n spatial_dims: spatial rank\n shift: translate pixel/voxel for every spatial dim, defaults to 0.\n \"\"\"\n shift = ensure_tuple(shift)\n affine = np.eye(spatial_dims + 1)\n for i, a in enumerate(shift[:spatial_dims]):\n affine[i, spatial_dims] = a\n return np.asarray(affine)\n\n\ndef generate_spatial_bounding_box(\n img: np.ndarray,\n select_fn: Callable = is_positive,\n channel_indices: Optional[IndexSelection] = None,\n margin: Union[Sequence[int], int] = 0,\n) -> Tuple[List[int], List[int]]:\n \"\"\"\n generate the spatial bounding box of foreground in the image with start-end positions.\n Users can define arbitrary function to select expected foreground from the whole image or specified channels.\n And it can also add margin to every dim of the bounding box.\n The output format of the coordinates is:\n\n [1st_spatial_dim_start, 2nd_spatial_dim_start, ..., Nth_spatial_dim_start],\n [1st_spatial_dim_end, 2nd_spatial_dim_end, ..., Nth_spatial_dim_end]\n\n The bounding boxes edges are aligned with the input image edges.\n This function returns [-1, -1, ...], [-1, -1, ...] if there's no positive intensity.\n\n Args:\n img: source image to generate bounding box from.\n select_fn: function to select expected foreground, default is to select values > 0.\n channel_indices: if defined, select foreground only on the specified channels\n of image. if None, select foreground on the whole image.\n margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims.\n \"\"\"\n data = img[list(ensure_tuple(channel_indices))] if channel_indices is not None else img\n data = np.any(select_fn(data), axis=0)\n ndim = len(data.shape)\n margin = ensure_tuple_rep(margin, ndim)\n for m in margin:\n if m < 0:\n raise ValueError(\"margin value should not be negative number.\")\n\n box_start = [0] * ndim\n box_end = [0] * ndim\n\n for di, ax in enumerate(itertools.combinations(reversed(range(ndim)), ndim - 1)):\n dt = data.any(axis=ax)\n if not np.any(dt):\n # if no foreground, return all zero bounding box coords\n return [0] * ndim, [0] * ndim\n\n min_d = max(np.argmax(dt) - margin[di], 0)\n max_d = max(data.shape[di] - max(np.argmax(dt[::-1]) - margin[di], 0), min_d + 1)\n box_start[di], box_end[di] = min_d, max_d\n\n return box_start, box_end\n\n\ndef get_largest_connected_component_mask(img: torch.Tensor, connectivity: Optional[int] = None) -> torch.Tensor:\n \"\"\"\n Gets the largest connected component mask of an image.\n\n Args:\n img: Image to get largest connected component from. Shape is (spatial_dim1 [, spatial_dim2, ...])\n connectivity: Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor.\n Accepted values are ranging from 1 to input.ndim. If ``None``, a full\n connectivity of ``input.ndim`` is used.\n \"\"\"\n img_arr = img.detach().cpu().numpy()\n largest_cc = np.zeros(shape=img_arr.shape, dtype=img_arr.dtype)\n img_arr = measure.label(img_arr, connectivity=connectivity)\n if img_arr.max() != 0:\n largest_cc[...] = img_arr == (np.argmax(np.bincount(img_arr.flat)[1:]) + 1)\n\n return torch.as_tensor(largest_cc, device=img.device)\n\n\ndef fill_holes(\n img_arr: np.ndarray, applied_labels: Optional[Iterable[int]] = None, connectivity: Optional[int] = None\n) -> np.ndarray:\n \"\"\"\n Fill the holes in the provided image.\n\n The label 0 will be treated as background and the enclosed holes will be set to the neighboring class label.\n What is considered to be an enclosed hole is defined by the connectivity.\n Holes on the edge are always considered to be open (not enclosed).\n\n Note:\n\n The performance of this method heavily depends on the number of labels.\n It is a bit faster if the list of `applied_labels` is provided.\n Limiting the number of `applied_labels` results in a big decrease in processing time.\n\n If the image is one-hot-encoded, then the `applied_labels` need to match the channel index.\n\n Args:\n img_arr: numpy array of shape [C, spatial_dim1[, spatial_dim2, ...]].\n applied_labels: Labels for which to fill holes. Defaults to None,\n that is filling holes for all labels.\n connectivity: Maximum number of orthogonal hops to\n consider a pixel/voxel as a neighbor. Accepted values are ranging from 1 to input.ndim.\n Defaults to a full connectivity of ``input.ndim``.\n\n Returns:\n numpy array of shape [C, spatial_dim1[, spatial_dim2, ...]].\n \"\"\"\n channel_axis = 0\n num_channels = img_arr.shape[channel_axis]\n is_one_hot = num_channels > 1\n spatial_dims = img_arr.ndim - 1\n structure = ndimage.generate_binary_structure(spatial_dims, connectivity or spatial_dims)\n\n # Get labels if not provided. Exclude background label.\n applied_labels = set(applied_labels or (range(num_channels) if is_one_hot else np.unique(img_arr)))\n background_label = 0\n applied_labels.discard(background_label)\n\n for label in applied_labels:\n tmp = np.zeros(img_arr.shape[1:], dtype=bool)\n ndimage.binary_dilation(\n tmp,\n structure=structure,\n iterations=-1,\n mask=np.logical_not(img_arr[label]) if is_one_hot else img_arr[0] != label,\n origin=0,\n border_value=1,\n output=tmp,\n )\n if is_one_hot:\n img_arr[label] = np.logical_not(tmp)\n else:\n img_arr[0, np.logical_not(tmp)] = label\n\n return img_arr\n\n\ndef get_extreme_points(\n img: np.ndarray, rand_state: Optional[np.random.RandomState] = None, background: int = 0, pert: float = 0.0\n) -> List[Tuple[int, ...]]:\n \"\"\"\n Generate extreme points from an image. These are used to generate initial segmentation\n for annotation models. An optional perturbation can be passed to simulate user clicks.\n\n Args:\n img:\n Image to generate extreme points from. Expected Shape is ``(spatial_dim1, [, spatial_dim2, ...])``.\n rand_state: `np.random.RandomState` object used to select random indices.\n background: Value to be consider as background, defaults to 0.\n pert: Random perturbation amount to add to the points, defaults to 0.0.\n\n Returns:\n A list of extreme points, its length is equal to 2 * spatial dimension of input image.\n The output format of the coordinates is:\n\n [1st_spatial_dim_min, 1st_spatial_dim_max, 2nd_spatial_dim_min, ..., Nth_spatial_dim_max]\n\n Raises:\n ValueError: When the input image does not have any foreground pixel.\n \"\"\"\n if rand_state is None:\n rand_state = np.random.random.__self__ # type: ignore\n indices = np.where(img != background)\n if np.size(indices[0]) == 0:\n raise ValueError(\"get_extreme_points: no foreground object in mask!\")\n\n def _get_point(val, dim):\n \"\"\"\n Select one of the indices within slice containing val.\n\n Args:\n val : value for comparison\n dim : dimension in which to look for value\n \"\"\"\n idx = rand_state.choice(np.where(indices[dim] == val)[0])\n pt = []\n for j in range(img.ndim):\n # add +- pert to each dimension\n val = int(indices[j][idx] + 2.0 * pert * (rand_state.rand() - 0.5))\n val = max(val, 0)\n val = min(val, img.shape[j] - 1)\n pt.append(val)\n return pt\n\n points = []\n for i in range(img.ndim):\n points.append(tuple(_get_point(np.min(indices[i][...]), i)))\n points.append(tuple(_get_point(np.max(indices[i][...]), i)))\n\n return points\n\n\ndef extreme_points_to_image(\n points: List[Tuple[int, ...]],\n label: np.ndarray,\n sigma: Union[Sequence[float], float, Sequence[torch.Tensor], torch.Tensor] = 0.0,\n rescale_min: float = -1.0,\n rescale_max: float = 1.0,\n):\n \"\"\"\n Please refer to :py:class:`monai.transforms.AddExtremePointsChannel` for the usage.\n\n Applies a gaussian filter to the extreme points image. Then the pixel values in points image are rescaled\n to range [rescale_min, rescale_max].\n\n Args:\n points: Extreme points of the object/organ.\n label: label image to get extreme points from. Shape must be\n (1, spatial_dim1, [, spatial_dim2, ...]). Doesn't support one-hot labels.\n sigma: if a list of values, must match the count of spatial dimensions of input data,\n and apply every value in the list to 1 spatial dimension. if only 1 value provided,\n use it for all spatial dimensions.\n rescale_min: minimum value of output data.\n rescale_max: maximum value of output data.\n \"\"\"\n # points to image\n points_image = torch.zeros(label.shape[1:], dtype=torch.float)\n for p in points:\n points_image[p] = 1.0\n\n # add channel and add batch\n points_image = points_image.unsqueeze(0).unsqueeze(0)\n gaussian_filter = GaussianFilter(label.ndim - 1, sigma=sigma)\n points_image = gaussian_filter(points_image).squeeze(0).detach().numpy()\n\n # rescale the points image to [rescale_min, rescale_max]\n min_intensity = np.min(points_image)\n max_intensity = np.max(points_image)\n points_image = (points_image - min_intensity) / (max_intensity - min_intensity)\n points_image = points_image * (rescale_max - rescale_min) + rescale_min\n return points_image\n\n\ndef map_spatial_axes(\n img_ndim: int,\n spatial_axes: Optional[Union[Sequence[int], int]] = None,\n channel_first: bool = True,\n) -> List[int]:\n \"\"\"\n Utility to map the spatial axes to real axes in channel first/last shape.\n For example:\n If `channel_first` is True, and `img` has 3 spatial dims, map spatial axes to real axes as below:\n None -> [1, 2, 3]\n [0, 1] -> [1, 2]\n [0, -1] -> [1, -1]\n If `channel_first` is False, and `img` has 3 spatial dims, map spatial axes to real axes as below:\n None -> [0, 1, 2]\n [0, 1] -> [0, 1]\n [0, -1] -> [0, -2]\n\n Args:\n img_ndim: dimension number of the target image.\n spatial_axes: spatial axes to be converted, default is None.\n The default `None` will convert to all the spatial axes of the image.\n If axis is negative it counts from the last to the first axis.\n If axis is a tuple of ints.\n channel_first: the image data is channel first or channel last, default to channel first.\n\n \"\"\"\n if spatial_axes is None:\n spatial_axes_ = list(range(1, img_ndim) if channel_first else range(img_ndim - 1))\n\n else:\n spatial_axes_ = []\n for a in ensure_tuple(spatial_axes):\n if channel_first:\n spatial_axes_.append(a if a < 0 else a + 1)\n else:\n spatial_axes_.append(a - 1 if a < 0 else a)\n\n return spatial_axes_\n\n\n@contextmanager\ndef allow_missing_keys_mode(transform: Union[MapTransform, Compose, Tuple[MapTransform], Tuple[Compose]]):\n \"\"\"Temporarily set all MapTransforms to not throw an error if keys are missing. After, revert to original states.\n\n Args:\n transform: either MapTransform or a Compose\n\n Example:\n\n .. code-block:: python\n\n data = {\"image\": np.arange(16, dtype=float).reshape(1, 4, 4)}\n t = SpatialPadd([\"image\", \"label\"], 10, allow_missing_keys=False)\n _ = t(data) # would raise exception\n with allow_missing_keys_mode(t):\n _ = t(data) # OK!\n \"\"\"\n # If given a sequence of transforms, Compose them to get a single list\n if issequenceiterable(transform):\n transform = Compose(transform)\n\n # Get list of MapTransforms\n transforms = []\n if isinstance(transform, MapTransform):\n transforms = [transform]\n elif isinstance(transform, Compose):\n # Only keep contained MapTransforms\n transforms = [t for t in transform.flatten().transforms if isinstance(t, MapTransform)]\n if len(transforms) == 0:\n raise TypeError(\n \"allow_missing_keys_mode expects either MapTransform(s) or Compose(s) containing MapTransform(s)\"\n )\n\n # Get the state of each `allow_missing_keys`\n orig_states = [t.allow_missing_keys for t in transforms]\n\n try:\n # Set all to True\n for t in transforms:\n t.allow_missing_keys = True\n yield\n finally:\n # Revert\n for t, o_s in zip(transforms, orig_states):\n t.allow_missing_keys = o_s\n\n\ndef convert_inverse_interp_mode(trans_info: List, mode: str = \"nearest\", align_corners: Optional[bool] = None):\n \"\"\"\n Change the interpolation mode when inverting spatial transforms, default to \"nearest\".\n This function modifies trans_info's `InverseKeys.EXTRA_INFO`.\n\n See also: :py:class:`monai.transform.inverse.InvertibleTransform`\n\n Args:\n trans_info: transforms inverse information list, contains context of every invertible transform.\n mode: target interpolation mode to convert, default to \"nearest\" as it's usually used to save the mode output.\n align_corners: target align corner value in PyTorch interpolation API, need to align with the `mode`.\n\n \"\"\"\n interp_modes = [i.value for i in InterpolateMode] + [i.value for i in GridSampleMode]\n\n # set to string for DataLoader collation\n align_corners_ = \"none\" if align_corners is None else align_corners\n\n for item in ensure_tuple(trans_info):\n if InverseKeys.EXTRA_INFO in item:\n orig_mode = item[InverseKeys.EXTRA_INFO].get(\"mode\", None)\n if orig_mode is not None:\n if orig_mode[0] in interp_modes:\n item[InverseKeys.EXTRA_INFO][\"mode\"] = [mode for _ in range(len(mode))]\n elif orig_mode in interp_modes:\n item[InverseKeys.EXTRA_INFO][\"mode\"] = mode\n if \"align_corners\" in item[InverseKeys.EXTRA_INFO]:\n if issequenceiterable(item[InverseKeys.EXTRA_INFO][\"align_corners\"]):\n item[InverseKeys.EXTRA_INFO][\"align_corners\"] = [align_corners_ for _ in range(len(mode))]\n else:\n item[InverseKeys.EXTRA_INFO][\"align_corners\"] = align_corners_\n return trans_info\n\n\ndef compute_divisible_spatial_size(spatial_shape: Sequence[int], k: Union[Sequence[int], int]):\n \"\"\"\n Compute the target spatial size which should be divisible by `k`.\n\n Args:\n spatial_shape: original spatial shape.\n k: the target k for each spatial dimension.\n if `k` is negative or 0, the original size is preserved.\n if `k` is an int, the same `k` be applied to all the input spatial dimensions.\n\n \"\"\"\n k = fall_back_tuple(k, (1,) * len(spatial_shape))\n new_size = []\n for k_d, dim in zip(k, spatial_shape):\n new_dim = int(np.ceil(dim / k_d) * k_d) if k_d > 0 else dim\n new_size.append(new_dim)\n\n return new_size\n\n\ndef equalize_hist(\n img: np.ndarray,\n mask: Optional[np.ndarray] = None,\n num_bins: int = 256,\n min: int = 0,\n max: int = 255,\n dtype: DtypeLike = np.float32,\n) -> np.ndarray:\n \"\"\"\n Utility to equalize input image based on the histogram.\n If `skimage` installed, will leverage `skimage.exposure.histogram`, otherwise, use\n `np.histogram` instead.\n\n Args:\n img: input image to equalize.\n mask: if provided, must be ndarray of bools or 0s and 1s, and same shape as `image`.\n only points at which `mask==True` are used for the equalization.\n num_bins: number of the bins to use in histogram, default to `256`. for more details:\n https://numpy.org/doc/stable/reference/generated/numpy.histogram.html.\n min: the min value to normalize input image, default to `0`.\n max: the max value to normalize input image, default to `255`.\n dtype: data type of the output, default to `float32`.\n\n \"\"\"\n orig_shape = img.shape\n hist_img = img[np.array(mask, dtype=bool)] if mask is not None else img\n if has_skimage:\n hist, bins = exposure.histogram(hist_img.flatten(), num_bins)\n else:\n hist, bins = np.histogram(hist_img.flatten(), num_bins)\n bins = (bins[:-1] + bins[1:]) / 2\n\n cum = hist.cumsum()\n # normalize the cumulative result\n cum = rescale_array(arr=cum, minv=min, maxv=max)\n\n # apply linear interpolation\n img = np.interp(img.flatten(), bins, cum)\n\n return img.reshape(orig_shape).astype(dtype)\n\n\nclass Fourier:\n \"\"\"\n Helper class storing Fourier mappings\n \"\"\"\n\n @staticmethod\n def shift_fourier(x: torch.Tensor, n_dims: int) -> torch.Tensor:\n \"\"\"\n Applies fourier transform and shifts the zero-frequency component to the\n center of the spectrum. Only the spatial dimensions get transformed.\n\n Args:\n x: Image to transform.\n n_dims: Number of spatial dimensions.\n Returns\n k: K-space data.\n \"\"\"\n k: torch.Tensor = torch.fft.fftshift(\n torch.fft.fftn(x, dim=tuple(range(-n_dims, 0))), dim=tuple(range(-n_dims, 0))\n )\n return k\n\n @staticmethod\n def inv_shift_fourier(k: torch.Tensor, n_dims: int) -> torch.Tensor:\n \"\"\"\n Applies inverse shift and fourier transform. Only the spatial\n dimensions are transformed.\n\n Args:\n k: K-space data.\n n_dims: Number of spatial dimensions.\n Returns:\n x: Tensor in image space.\n \"\"\"\n x: torch.Tensor = torch.fft.ifftn(\n torch.fft.ifftshift(k, dim=tuple(range(-n_dims, 0))), dim=tuple(range(-n_dims, 0))\n ).real\n return x\n\n\ndef get_number_image_type_conversions(transform: Compose, test_data: Any, key: Optional[Hashable] = None) -> int:\n \"\"\"\n Get the number of times that the data need to be converted (e.g., numpy to torch).\n Conversions between different devices are also counted (e.g., CPU to GPU).\n\n Args:\n transform: composed transforms to be tested\n test_data: data to be used to count the number of conversions\n key: if using dictionary transforms, this key will be used to check the number of conversions.\n \"\"\"\n\n def _get_data(obj, key):\n return obj if key is None else obj[key]\n\n # if the starting point is a string (e.g., input to LoadImage), start\n # at -1 since we don't want to count the string -> image conversion.\n num_conversions = 0 if not isinstance(_get_data(test_data, key), str) else -1\n\n tr = transform.flatten().transforms\n\n if isinstance(transform, OneOf) or any(isinstance(i, OneOf) for i in tr):\n raise RuntimeError(\"Not compatible with `OneOf`, as the applied transform is deterministically chosen.\")\n\n for _transform in tr:\n prev_data = _get_data(test_data, key)\n prev_type = type(prev_data)\n prev_device = prev_data.device if isinstance(prev_data, torch.Tensor) else None\n test_data = monai.transforms.transform.apply_transform(\n _transform, test_data, transform.map_items, transform.unpack_items\n )\n # every time the type or device changes, increment the counter\n curr_data = _get_data(test_data, key)\n curr_device = curr_data.device if isinstance(curr_data, torch.Tensor) else None\n if not isinstance(curr_data, prev_type) or curr_device != prev_device:\n num_conversions += 1\n return num_conversions\n", |
| "monai/utils/__init__.py": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# have to explicitly bring these in here to resolve circular import issues\nfrom .aliases import alias, resolve_name\nfrom .decorators import MethodReplacer, RestartGenerator\nfrom .deprecated import DeprecatedError, deprecated, deprecated_arg\nfrom .dist import evenly_divisible_all_gather, get_dist_device, string_list_all_gather\nfrom .enums import (\n Average,\n BlendMode,\n ChannelMatching,\n CommonKeys,\n ForwardMode,\n GridSampleMode,\n GridSamplePadMode,\n InterpolateMode,\n InverseKeys,\n LossReduction,\n Method,\n MetricReduction,\n NumpyPadMode,\n PytorchPadMode,\n SkipMode,\n UpsampleMode,\n Weight,\n)\nfrom .jupyter_utils import StatusMembers, ThreadContainer\nfrom .misc import (\n MAX_SEED,\n ImageMetaKey,\n copy_to_device,\n ensure_tuple,\n ensure_tuple_rep,\n ensure_tuple_size,\n fall_back_tuple,\n first,\n get_seed,\n has_option,\n is_scalar,\n is_scalar_tensor,\n issequenceiterable,\n list_to_dict,\n progress_bar,\n set_determinism,\n star_zip_with,\n zip_with,\n)\nfrom .module import (\n PT_BEFORE_1_7,\n InvalidPyTorchVersionError,\n OptionalImportError,\n damerau_levenshtein_distance,\n exact_version,\n export,\n get_full_type_name,\n get_package_version,\n get_torch_version_tuple,\n load_submodules,\n look_up_option,\n min_version,\n optional_import,\n version_leq,\n)\nfrom .profiling import PerfContext, torch_profiler_full, torch_profiler_time_cpu_gpu, torch_profiler_time_end_to_end\nfrom .state_cacher import StateCacher\nfrom .type_conversion import (\n convert_data_type,\n convert_to_dst_type,\n convert_to_numpy,\n convert_to_tensor,\n dtype_numpy_to_torch,\n dtype_torch_to_numpy,\n get_dtype,\n get_equivalent_dtype,\n)\n", |
| "monai/utils/enums.py": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom enum import Enum\n\n__all__ = [\n \"NumpyPadMode\",\n \"GridSampleMode\",\n \"InterpolateMode\",\n \"UpsampleMode\",\n \"BlendMode\",\n \"PytorchPadMode\",\n \"GridSamplePadMode\",\n \"Average\",\n \"MetricReduction\",\n \"LossReduction\",\n \"Weight\",\n \"ChannelMatching\",\n \"SkipMode\",\n \"Method\",\n \"InverseKeys\",\n \"CommonKeys\",\n \"ForwardMode\",\n]\n\n\nclass NumpyPadMode(Enum):\n \"\"\"\n See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html\n \"\"\"\n\n CONSTANT = \"constant\"\n EDGE = \"edge\"\n LINEAR_RAMP = \"linear_ramp\"\n MAXIMUM = \"maximum\"\n MEAN = \"mean\"\n MEDIAN = \"median\"\n MINIMUM = \"minimum\"\n REFLECT = \"reflect\"\n SYMMETRIC = \"symmetric\"\n WRAP = \"wrap\"\n EMPTY = \"empty\"\n\n\nclass GridSampleMode(Enum):\n \"\"\"\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n\n interpolation mode of `torch.nn.functional.grid_sample`\n\n Note:\n (documentation from `torch.nn.functional.grid_sample`)\n `mode='bicubic'` supports only 4-D input.\n When `mode='bilinear'` and the input is 5-D, the interpolation mode used internally will actually be trilinear.\n However, when the input is 4-D, the interpolation mode will legitimately be bilinear.\n \"\"\"\n\n NEAREST = \"nearest\"\n BILINEAR = \"bilinear\"\n BICUBIC = \"bicubic\"\n\n\nclass InterpolateMode(Enum):\n \"\"\"\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n \"\"\"\n\n NEAREST = \"nearest\"\n LINEAR = \"linear\"\n BILINEAR = \"bilinear\"\n BICUBIC = \"bicubic\"\n TRILINEAR = \"trilinear\"\n AREA = \"area\"\n\n\nclass UpsampleMode(Enum):\n \"\"\"\n See also: :py:class:`monai.networks.blocks.UpSample`\n \"\"\"\n\n DECONV = \"deconv\"\n NONTRAINABLE = \"nontrainable\" # e.g. using torch.nn.Upsample\n PIXELSHUFFLE = \"pixelshuffle\"\n\n\nclass BlendMode(Enum):\n \"\"\"\n See also: :py:class:`monai.data.utils.compute_importance_map`\n \"\"\"\n\n CONSTANT = \"constant\"\n GAUSSIAN = \"gaussian\"\n\n\nclass PytorchPadMode(Enum):\n \"\"\"\n See also: https://pytorch.org/docs/stable/nn.functional.html#pad\n \"\"\"\n\n CONSTANT = \"constant\"\n REFLECT = \"reflect\"\n REPLICATE = \"replicate\"\n CIRCULAR = \"circular\"\n\n\nclass GridSamplePadMode(Enum):\n \"\"\"\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n \"\"\"\n\n ZEROS = \"zeros\"\n BORDER = \"border\"\n REFLECTION = \"reflection\"\n\n\nclass Average(Enum):\n \"\"\"\n See also: :py:class:`monai.metrics.rocauc.compute_roc_auc`\n \"\"\"\n\n MACRO = \"macro\"\n WEIGHTED = \"weighted\"\n MICRO = \"micro\"\n NONE = \"none\"\n\n\nclass MetricReduction(Enum):\n \"\"\"\n See also: :py:class:`monai.metrics.meandice.DiceMetric`\n \"\"\"\n\n NONE = \"none\"\n MEAN = \"mean\"\n SUM = \"sum\"\n MEAN_BATCH = \"mean_batch\"\n SUM_BATCH = \"sum_batch\"\n MEAN_CHANNEL = \"mean_channel\"\n SUM_CHANNEL = \"sum_channel\"\n\n\nclass LossReduction(Enum):\n \"\"\"\n See also:\n - :py:class:`monai.losses.dice.DiceLoss`\n - :py:class:`monai.losses.dice.GeneralizedDiceLoss`\n - :py:class:`monai.losses.focal_loss.FocalLoss`\n - :py:class:`monai.losses.tversky.TverskyLoss`\n \"\"\"\n\n NONE = \"none\"\n MEAN = \"mean\"\n SUM = \"sum\"\n\n\nclass Weight(Enum):\n \"\"\"\n See also: :py:class:`monai.losses.dice.GeneralizedDiceLoss`\n \"\"\"\n\n SQUARE = \"square\"\n SIMPLE = \"simple\"\n UNIFORM = \"uniform\"\n\n\nclass ChannelMatching(Enum):\n \"\"\"\n See also: :py:class:`monai.networks.nets.HighResBlock`\n \"\"\"\n\n PAD = \"pad\"\n PROJECT = \"project\"\n\n\nclass SkipMode(Enum):\n \"\"\"\n See also: :py:class:`monai.networks.layers.SkipConnection`\n \"\"\"\n\n CAT = \"cat\"\n ADD = \"add\"\n MUL = \"mul\"\n\n\nclass Method(Enum):\n \"\"\"\n See also: :py:class:`monai.transforms.croppad.array.SpatialPad`\n \"\"\"\n\n SYMMETRIC = \"symmetric\"\n END = \"end\"\n\n\nclass ForwardMode(Enum):\n \"\"\"\n See also: :py:class:`monai.transforms.engines.evaluator.Evaluator`\n \"\"\"\n\n TRAIN = \"train\"\n EVAL = \"eval\"\n\n\nclass InverseKeys:\n \"\"\"Extra meta data keys used for inverse transforms.\"\"\"\n\n CLASS_NAME = \"class\"\n ID = \"id\"\n ORIG_SIZE = \"orig_size\"\n EXTRA_INFO = \"extra_info\"\n DO_TRANSFORM = \"do_transforms\"\n KEY_SUFFIX = \"_transforms\"\n\n\nclass CommonKeys:\n \"\"\"\n A set of common keys for dictionary based supervised training process.\n `IMAGE` is the input image data.\n `LABEL` is the training or evaluation label of segmentation or classification task.\n `PRED` is the prediction data of model output.\n `LOSS` is the loss value of current iteration.\n `INFO` is some useful information during training or evaluation, like loss value, etc.\n\n \"\"\"\n\n IMAGE = \"image\"\n LABEL = \"label\"\n PRED = \"pred\"\n LOSS = \"loss\"\n" |
| }, |
| "non_py_patch": "", |
| "new_components": { |
| "monai/transforms/utils.py": [ |
| { |
| "type": "function", |
| "name": "print_transform_backends", |
| "lines": [ |
| 1157, |
| 1210 |
| ], |
| "signature": "def print_transform_backends():", |
| "doc": "Prints a list of backends of all MONAI transforms." |
| }, |
| { |
| "type": "class", |
| "name": "print_transform_backends.Colours", |
| "lines": [ |
| 1160, |
| 1163 |
| ], |
| "signature": "class Colours:", |
| "doc": "" |
| }, |
| { |
| "type": "function", |
| "name": "print_transform_backends.print_colour", |
| "lines": [ |
| 1165, |
| 1166 |
| ], |
| "signature": "def print_colour(t, colour):", |
| "doc": "" |
| } |
| ], |
| "monai/utils/enums.py": [ |
| { |
| "type": "class", |
| "name": "TransformBackends", |
| "lines": [ |
| 239, |
| 245 |
| ], |
| "signature": "class TransformBackends(Enum):", |
| "doc": "Transform backends." |
| } |
| ] |
| }, |
| "version": null, |
| "FAIL_TO_PASS": [ |
| "tests/test_print_transform_backends.py::TestPrintTransformBackends::test_get_number_of_conversions" |
| ], |
| "PASS_TO_PASS": [], |
| "environment_setup_commit": "e73257caa79309dcce1e93abf1632f4bfd75b11f" |
| } |